static int __init processor_probe()

in kernel/processor.c [79:219]


static int __init processor_probe(struct parisc_device *dev)
{
	unsigned long txn_addr;
	unsigned long cpuid;
	struct cpuinfo_parisc *p;
	struct pdc_pat_cpu_num cpu_info = { };

#ifdef CONFIG_SMP
	if (num_online_cpus() >= nr_cpu_ids) {
		printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
		return 1;
	}
#else
	if (boot_cpu_data.cpu_count > 0) {
		printk(KERN_INFO "CONFIG_SMP=n  ignoring additional CPUs\n");
		return 1;
	}
#endif

	/* logical CPU ID and update global counter
	 * May get overwritten by PAT code.
	 */
	cpuid = boot_cpu_data.cpu_count;
	txn_addr = dev->hpa.start;	/* for legacy PDC */
	cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;

#ifdef CONFIG_64BIT
	if (is_pdc_pat()) {
		ulong status;
		unsigned long bytecnt;
	        pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;

		pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
		if (!pa_pdc_cell)
			panic("couldn't allocate memory for PDC_PAT_CELL!");

		status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
			dev->mod_index, PA_VIEW, pa_pdc_cell);

		BUG_ON(PDC_OK != status);

		/* verify it's the same as what do_pat_inventory() found */
		BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
		BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);

		txn_addr = pa_pdc_cell->mod[0];   /* id_eid for IO sapic */

		kfree(pa_pdc_cell);

		/* get the cpu number */
		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
		BUG_ON(PDC_OK != status);

		pr_info("Logical CPU #%lu is physical cpu #%lu at location "
			"0x%lx with hpa %pa\n",
			cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
			&dev->hpa.start);

#undef USE_PAT_CPUID
#ifdef USE_PAT_CPUID
/* We need contiguous numbers for cpuid. Firmware's notion
 * of cpuid is for physical CPUs and we just don't care yet.
 * We'll care when we need to query PAT PDC about a CPU *after*
 * boot time (ie shutdown a CPU from an OS perspective).
 */
		if (cpu_info.cpu_num >= NR_CPUS) {
			printk(KERN_WARNING "IGNORING CPU at %pa,"
				" cpu_slot_id > NR_CPUS"
				" (%ld > %d)\n",
				&dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
			/* Ignore CPU since it will only crash */
			boot_cpu_data.cpu_count--;
			return 1;
		} else {
			cpuid = cpu_info.cpu_num;
		}
#endif
	}
#endif

	p = &per_cpu(cpu_data, cpuid);
	boot_cpu_data.cpu_count++;

	/* initialize counters - CPU 0 gets it_value set in time_init() */
	if (cpuid)
		memset(p, 0, sizeof(struct cpuinfo_parisc));

	p->dev = dev;		/* Save IODC data in case we need it */
	p->hpa = dev->hpa.start;	/* save CPU hpa */
	p->cpuid = cpuid;	/* save CPU id */
	p->txn_addr = txn_addr;	/* save CPU IRQ address */
	p->cpu_num = cpu_info.cpu_num;
	p->cpu_loc = cpu_info.cpu_loc;

	store_cpu_topology(cpuid);

#ifdef CONFIG_SMP
	/*
	** FIXME: review if any other initialization is clobbered
	**	  for boot_cpu by the above memset().
	*/
	init_percpu_prof(cpuid);
#endif

	/*
	** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
	** OS control. RENDEZVOUS is the default state - see mem_set above.
	**	p->state = STATE_RENDEZVOUS;
	*/

#if 0
	/* CPU 0 IRQ table is statically allocated/initialized */
	if (cpuid) {
		struct irqaction actions[];

		/*
		** itimer and ipi IRQ handlers are statically initialized in
		** arch/parisc/kernel/irq.c. ie Don't need to register them.
		*/
		actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
		if (!actions) {
			/* not getting it's own table, share with monarch */
			actions = cpu_irq_actions[0];
		}

		cpu_irq_actions[cpuid] = actions;
	}
#endif

	/* 
	 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
	 */
#ifdef CONFIG_SMP
	if (cpuid) {
		set_cpu_present(cpuid, true);
		add_cpu(cpuid);
	}
#endif

	return 0;
}