platform_chrp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright (c) 2008 Marcel Moolenaar
  5. * Copyright (c) 2009 Nathan Whitehorn
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  19. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  20. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  21. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  22. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  23. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. */
  29. #include <sys/cdefs.h>
  30. __FBSDID("$FreeBSD$");
  31. #include <sys/endian.h>
  32. #include <sys/param.h>
  33. #include <sys/systm.h>
  34. #include <sys/kernel.h>
  35. #include <sys/bus.h>
  36. #include <sys/pcpu.h>
  37. #include <sys/proc.h>
  38. #include <sys/sched.h>
  39. #include <sys/smp.h>
  40. #include <vm/vm.h>
  41. #include <vm/pmap.h>
  42. #include <machine/bus.h>
  43. #include <machine/cpu.h>
  44. #include <machine/hid.h>
  45. #include <machine/platformvar.h>
  46. #include <machine/rtas.h>
  47. #include <machine/smp.h>
  48. #include <machine/spr.h>
  49. #include <machine/trap.h>
  50. #include <dev/ofw/openfirm.h>
  51. #include <machine/ofw_machdep.h>
  52. #include "platform_if.h"
  53. #ifdef SMP
  54. extern void *ap_pcpu;
  55. #endif
  56. #ifdef __powerpc64__
  57. static uint8_t splpar_vpa[MAXCPU][640] __aligned(128); /* XXX: dpcpu */
  58. #endif
  59. static vm_offset_t realmaxaddr = VM_MAX_ADDRESS;
  60. static int chrp_probe(platform_t);
  61. static int chrp_attach(platform_t);
  62. void chrp_mem_regions(platform_t, struct mem_region *phys, int *physsz,
  63. struct mem_region *avail, int *availsz);
  64. static vm_offset_t chrp_real_maxaddr(platform_t);
  65. static u_long chrp_timebase_freq(platform_t, struct cpuref *cpuref);
  66. static int chrp_smp_first_cpu(platform_t, struct cpuref *cpuref);
  67. static int chrp_smp_next_cpu(platform_t, struct cpuref *cpuref);
  68. static int chrp_smp_get_bsp(platform_t, struct cpuref *cpuref);
  69. static void chrp_smp_ap_init(platform_t);
  70. static int chrp_cpuref_init(void);
  71. #ifdef SMP
  72. static int chrp_smp_start_cpu(platform_t, struct pcpu *cpu);
  73. static void chrp_smp_probe_threads(platform_t plat);
  74. static struct cpu_group *chrp_smp_topo(platform_t plat);
  75. #endif
  76. static void chrp_reset(platform_t);
  77. #ifdef __powerpc64__
  78. #include "phyp-hvcall.h"
  79. static void phyp_cpu_idle(sbintime_t sbt);
  80. #endif
  81. static struct cpuref platform_cpuref[MAXCPU];
  82. static int platform_cpuref_cnt;
  83. static int platform_cpuref_valid;
  84. static platform_method_t chrp_methods[] = {
  85. PLATFORMMETHOD(platform_probe, chrp_probe),
  86. PLATFORMMETHOD(platform_attach, chrp_attach),
  87. PLATFORMMETHOD(platform_mem_regions, chrp_mem_regions),
  88. PLATFORMMETHOD(platform_real_maxaddr, chrp_real_maxaddr),
  89. PLATFORMMETHOD(platform_timebase_freq, chrp_timebase_freq),
  90. PLATFORMMETHOD(platform_smp_ap_init, chrp_smp_ap_init),
  91. PLATFORMMETHOD(platform_smp_first_cpu, chrp_smp_first_cpu),
  92. PLATFORMMETHOD(platform_smp_next_cpu, chrp_smp_next_cpu),
  93. PLATFORMMETHOD(platform_smp_get_bsp, chrp_smp_get_bsp),
  94. #ifdef SMP
  95. PLATFORMMETHOD(platform_smp_start_cpu, chrp_smp_start_cpu),
  96. PLATFORMMETHOD(platform_smp_probe_threads, chrp_smp_probe_threads),
  97. PLATFORMMETHOD(platform_smp_topo, chrp_smp_topo),
  98. #endif
  99. PLATFORMMETHOD(platform_reset, chrp_reset),
  100. { 0, 0 }
  101. };
  102. static platform_def_t chrp_platform = {
  103. "chrp",
  104. chrp_methods,
  105. 0
  106. };
  107. PLATFORM_DEF(chrp_platform);
  108. static int
  109. chrp_probe(platform_t plat)
  110. {
  111. if (OF_finddevice("/memory") != -1 || OF_finddevice("/memory@0") != -1)
  112. return (BUS_PROBE_GENERIC);
  113. return (ENXIO);
  114. }
  115. static int
  116. chrp_attach(platform_t plat)
  117. {
  118. int quiesce;
  119. #ifdef __powerpc64__
  120. int i;
  121. #if BYTE_ORDER == LITTLE_ENDIAN
  122. int result;
  123. #endif
  124. /* XXX: check for /rtas/ibm,hypertas-functions? */
  125. if (!(mfmsr() & PSL_HV)) {
  126. struct mem_region *phys, *avail;
  127. int nphys, navail;
  128. vm_offset_t off;
  129. mem_regions(&phys, &nphys, &avail, &navail);
  130. realmaxaddr = 0;
  131. for (i = 0; i < nphys; i++) {
  132. off = phys[i].mr_start + phys[i].mr_size;
  133. realmaxaddr = MAX(off, realmaxaddr);
  134. }
  135. pmap_mmu_install("mmu_phyp", BUS_PROBE_SPECIFIC);
  136. cpu_idle_hook = phyp_cpu_idle;
  137. /* Set up important VPA fields */
  138. for (i = 0; i < MAXCPU; i++) {
  139. /* First two: VPA size */
  140. splpar_vpa[i][4] =
  141. (uint8_t)((sizeof(splpar_vpa[i]) >> 8) & 0xff);
  142. splpar_vpa[i][5] =
  143. (uint8_t)(sizeof(splpar_vpa[i]) & 0xff);
  144. splpar_vpa[i][0xba] = 1; /* Maintain FPRs */
  145. splpar_vpa[i][0xbb] = 1; /* Maintain PMCs */
  146. splpar_vpa[i][0xfc] = 0xff; /* Maintain full SLB */
  147. splpar_vpa[i][0xfd] = 0xff;
  148. splpar_vpa[i][0xff] = 1; /* Maintain Altivec */
  149. }
  150. mb();
  151. /* Set up hypervisor CPU stuff */
  152. chrp_smp_ap_init(plat);
  153. #if BYTE_ORDER == LITTLE_ENDIAN
  154. /*
  155. * Ask the hypervisor to update the LPAR ILE bit.
  156. *
  157. * This involves all processors reentering the hypervisor
  158. * so the change appears simultaneously in all processors.
  159. * This can take a long time.
  160. */
  161. for(;;) {
  162. result = phyp_hcall(H_SET_MODE, 1UL,
  163. H_SET_MODE_RSRC_ILE, 0, 0);
  164. if (result == H_SUCCESS)
  165. break;
  166. DELAY(1000);
  167. }
  168. #endif
  169. }
  170. #endif
  171. chrp_cpuref_init();
  172. /* Some systems (e.g. QEMU) need Open Firmware to stand down */
  173. quiesce = 1;
  174. TUNABLE_INT_FETCH("debug.quiesce_ofw", &quiesce);
  175. if (quiesce)
  176. ofw_quiesce();
  177. return (0);
  178. }
  179. static int
  180. parse_drconf_memory(struct mem_region *ofmem, int *msz,
  181. struct mem_region *ofavail, int *asz)
  182. {
  183. phandle_t phandle;
  184. vm_offset_t base;
  185. int i, idx, len, lasz, lmsz, res;
  186. uint32_t flags, lmb_size[2];
  187. uint32_t *dmem;
  188. lmsz = *msz;
  189. lasz = *asz;
  190. phandle = OF_finddevice("/ibm,dynamic-reconfiguration-memory");
  191. if (phandle == -1)
  192. /* No drconf node, return. */
  193. return (0);
  194. res = OF_getencprop(phandle, "ibm,lmb-size", lmb_size,
  195. sizeof(lmb_size));
  196. if (res == -1)
  197. return (0);
  198. printf("Logical Memory Block size: %d MB\n", lmb_size[1] >> 20);
  199. /* Parse the /ibm,dynamic-memory.
  200. The first position gives the # of entries. The next two words
  201. reflect the address of the memory block. The next four words are
  202. the DRC index, reserved, list index and flags.
  203. (see PAPR C.6.6.2 ibm,dynamic-reconfiguration-memory)
  204. #el Addr DRC-idx res list-idx flags
  205. -------------------------------------------------
  206. | 4 | 8 | 4 | 4 | 4 | 4 |....
  207. -------------------------------------------------
  208. */
  209. len = OF_getproplen(phandle, "ibm,dynamic-memory");
  210. if (len > 0) {
  211. /* We have to use a variable length array on the stack
  212. since we have very limited stack space.
  213. */
  214. cell_t arr[len/sizeof(cell_t)];
  215. res = OF_getencprop(phandle, "ibm,dynamic-memory", arr,
  216. sizeof(arr));
  217. if (res == -1)
  218. return (0);
  219. /* Number of elements */
  220. idx = arr[0];
  221. /* First address, in arr[1], arr[2]*/
  222. dmem = &arr[1];
  223. for (i = 0; i < idx; i++) {
  224. base = ((uint64_t)dmem[0] << 32) + dmem[1];
  225. dmem += 4;
  226. flags = dmem[1];
  227. /* Use region only if available and not reserved. */
  228. if ((flags & 0x8) && !(flags & 0x80)) {
  229. ofmem[lmsz].mr_start = base;
  230. ofmem[lmsz].mr_size = (vm_size_t)lmb_size[1];
  231. ofavail[lasz].mr_start = base;
  232. ofavail[lasz].mr_size = (vm_size_t)lmb_size[1];
  233. lmsz++;
  234. lasz++;
  235. }
  236. dmem += 2;
  237. }
  238. }
  239. *msz = lmsz;
  240. *asz = lasz;
  241. return (1);
  242. }
  243. void
  244. chrp_mem_regions(platform_t plat, struct mem_region *phys, int *physsz,
  245. struct mem_region *avail, int *availsz)
  246. {
  247. vm_offset_t maxphysaddr;
  248. int i;
  249. ofw_mem_regions(phys, physsz, avail, availsz);
  250. parse_drconf_memory(phys, physsz, avail, availsz);
  251. /*
  252. * On some firmwares (SLOF), some memory may be marked available that
  253. * doesn't actually exist. This manifests as an extension of the last
  254. * available segment past the end of physical memory, so truncate that
  255. * one.
  256. */
  257. maxphysaddr = 0;
  258. for (i = 0; i < *physsz; i++)
  259. if (phys[i].mr_start + phys[i].mr_size > maxphysaddr)
  260. maxphysaddr = phys[i].mr_start + phys[i].mr_size;
  261. for (i = 0; i < *availsz; i++)
  262. if (avail[i].mr_start + avail[i].mr_size > maxphysaddr)
  263. avail[i].mr_size = maxphysaddr - avail[i].mr_start;
  264. }
  265. static vm_offset_t
  266. chrp_real_maxaddr(platform_t plat)
  267. {
  268. return (realmaxaddr);
  269. }
  270. static u_long
  271. chrp_timebase_freq(platform_t plat, struct cpuref *cpuref)
  272. {
  273. phandle_t cpus, cpunode;
  274. int32_t ticks = -1;
  275. int res;
  276. char buf[8];
  277. cpus = OF_finddevice("/cpus");
  278. if (cpus == -1)
  279. panic("CPU tree not found on Open Firmware\n");
  280. for (cpunode = OF_child(cpus); cpunode != 0; cpunode = OF_peer(cpunode)) {
  281. res = OF_getprop(cpunode, "device_type", buf, sizeof(buf));
  282. if (res > 0 && strcmp(buf, "cpu") == 0)
  283. break;
  284. }
  285. if (cpunode <= 0)
  286. panic("CPU node not found on Open Firmware\n");
  287. OF_getencprop(cpunode, "timebase-frequency", &ticks, sizeof(ticks));
  288. if (ticks <= 0)
  289. panic("Unable to determine timebase frequency!");
  290. return (ticks);
  291. }
  292. static int
  293. chrp_smp_first_cpu(platform_t plat, struct cpuref *cpuref)
  294. {
  295. if (platform_cpuref_valid == 0)
  296. return (EINVAL);
  297. cpuref->cr_cpuid = 0;
  298. cpuref->cr_hwref = platform_cpuref[0].cr_hwref;
  299. return (0);
  300. }
  301. static int
  302. chrp_smp_next_cpu(platform_t plat, struct cpuref *cpuref)
  303. {
  304. int id;
  305. if (platform_cpuref_valid == 0)
  306. return (EINVAL);
  307. id = cpuref->cr_cpuid + 1;
  308. if (id >= platform_cpuref_cnt)
  309. return (ENOENT);
  310. cpuref->cr_cpuid = platform_cpuref[id].cr_cpuid;
  311. cpuref->cr_hwref = platform_cpuref[id].cr_hwref;
  312. return (0);
  313. }
  314. static int
  315. chrp_smp_get_bsp(platform_t plat, struct cpuref *cpuref)
  316. {
  317. cpuref->cr_cpuid = platform_cpuref[0].cr_cpuid;
  318. cpuref->cr_hwref = platform_cpuref[0].cr_hwref;
  319. return (0);
  320. }
  321. static void
  322. get_cpu_reg(phandle_t cpu, cell_t *reg)
  323. {
  324. int res;
  325. res = OF_getproplen(cpu, "reg");
  326. if (res != sizeof(cell_t))
  327. panic("Unexpected length for CPU property reg on Open Firmware\n");
  328. OF_getencprop(cpu, "reg", reg, res);
  329. }
  330. static int
  331. chrp_cpuref_init(void)
  332. {
  333. phandle_t cpu, dev, chosen, pbsp;
  334. ihandle_t ibsp;
  335. char buf[32];
  336. int a, bsp, res, res2, tmp_cpuref_cnt;
  337. static struct cpuref tmp_cpuref[MAXCPU];
  338. cell_t interrupt_servers[32], addr_cells, size_cells, reg, bsp_reg;
  339. if (platform_cpuref_valid)
  340. return (0);
  341. dev = OF_peer(0);
  342. dev = OF_child(dev);
  343. while (dev != 0) {
  344. res = OF_getprop(dev, "name", buf, sizeof(buf));
  345. if (res > 0 && strcmp(buf, "cpus") == 0)
  346. break;
  347. dev = OF_peer(dev);
  348. }
  349. /* Make sure that cpus reg property have 1 address cell and 0 size cells */
  350. res = OF_getproplen(dev, "#address-cells");
  351. res2 = OF_getproplen(dev, "#size-cells");
  352. if (res != res2 || res != sizeof(cell_t))
  353. panic("CPU properties #address-cells and #size-cells not found on Open Firmware\n");
  354. OF_getencprop(dev, "#address-cells", &addr_cells, sizeof(addr_cells));
  355. OF_getencprop(dev, "#size-cells", &size_cells, sizeof(size_cells));
  356. if (addr_cells != 1 || size_cells != 0)
  357. panic("Unexpected values for CPU properties #address-cells and #size-cells on Open Firmware\n");
  358. /* Look for boot CPU in /chosen/cpu and /chosen/fdtbootcpu */
  359. chosen = OF_finddevice("/chosen");
  360. if (chosen == -1)
  361. panic("Device /chosen not found on Open Firmware\n");
  362. bsp_reg = -1;
  363. /* /chosen/cpu */
  364. if (OF_getproplen(chosen, "cpu") == sizeof(ihandle_t)) {
  365. OF_getprop(chosen, "cpu", &ibsp, sizeof(ibsp));
  366. pbsp = OF_instance_to_package(be32toh(ibsp));
  367. if (pbsp != -1)
  368. get_cpu_reg(pbsp, &bsp_reg);
  369. }
  370. /* /chosen/fdtbootcpu */
  371. if (bsp_reg == -1) {
  372. if (OF_getproplen(chosen, "fdtbootcpu") == sizeof(cell_t))
  373. OF_getprop(chosen, "fdtbootcpu", &bsp_reg, sizeof(bsp_reg));
  374. }
  375. if (bsp_reg == -1)
  376. panic("Boot CPU not found on Open Firmware\n");
  377. bsp = -1;
  378. tmp_cpuref_cnt = 0;
  379. for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) {
  380. res = OF_getprop(cpu, "device_type", buf, sizeof(buf));
  381. if (res > 0 && strcmp(buf, "cpu") == 0) {
  382. res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s");
  383. if (res > 0) {
  384. OF_getencprop(cpu, "ibm,ppc-interrupt-server#s",
  385. interrupt_servers, res);
  386. get_cpu_reg(cpu, &reg);
  387. if (reg == bsp_reg)
  388. bsp = tmp_cpuref_cnt;
  389. for (a = 0; a < res/sizeof(cell_t); a++) {
  390. tmp_cpuref[tmp_cpuref_cnt].cr_hwref = interrupt_servers[a];
  391. tmp_cpuref[tmp_cpuref_cnt].cr_cpuid = tmp_cpuref_cnt;
  392. tmp_cpuref_cnt++;
  393. }
  394. }
  395. }
  396. }
  397. if (bsp == -1)
  398. panic("Boot CPU not found\n");
  399. /* Map IDs, so BSP has CPUID 0 regardless of hwref */
  400. for (a = bsp; a < tmp_cpuref_cnt; a++) {
  401. platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref;
  402. platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt;
  403. platform_cpuref_cnt++;
  404. }
  405. for (a = 0; a < bsp; a++) {
  406. platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref;
  407. platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt;
  408. platform_cpuref_cnt++;
  409. }
  410. platform_cpuref_valid = 1;
  411. return (0);
  412. }
  413. #ifdef SMP
  414. static int
  415. chrp_smp_start_cpu(platform_t plat, struct pcpu *pc)
  416. {
  417. cell_t start_cpu;
  418. int result, err, timeout;
  419. if (!rtas_exists()) {
  420. printf("RTAS uninitialized: unable to start AP %d\n",
  421. pc->pc_cpuid);
  422. return (ENXIO);
  423. }
  424. start_cpu = rtas_token_lookup("start-cpu");
  425. if (start_cpu == -1) {
  426. printf("RTAS unknown method: unable to start AP %d\n",
  427. pc->pc_cpuid);
  428. return (ENXIO);
  429. }
  430. ap_pcpu = pc;
  431. powerpc_sync();
  432. result = rtas_call_method(start_cpu, 3, 1, pc->pc_hwref, EXC_RST, pc,
  433. &err);
  434. if (result < 0 || err != 0) {
  435. printf("RTAS error (%d/%d): unable to start AP %d\n",
  436. result, err, pc->pc_cpuid);
  437. return (ENXIO);
  438. }
  439. timeout = 10000;
  440. while (!pc->pc_awake && timeout--)
  441. DELAY(100);
  442. return ((pc->pc_awake) ? 0 : EBUSY);
  443. }
  444. static void
  445. chrp_smp_probe_threads(platform_t plat)
  446. {
  447. struct pcpu *pc, *last_pc;
  448. int i, ncores;
  449. ncores = 0;
  450. last_pc = NULL;
  451. for (i = 0; i <= mp_maxid; i++) {
  452. pc = pcpu_find(i);
  453. if (pc == NULL)
  454. continue;
  455. if (last_pc == NULL || pc->pc_hwref != last_pc->pc_hwref)
  456. ncores++;
  457. last_pc = pc;
  458. }
  459. mp_ncores = ncores;
  460. if (mp_ncpus % ncores == 0)
  461. smp_threads_per_core = mp_ncpus / ncores;
  462. }
  463. static struct cpu_group *
  464. chrp_smp_topo(platform_t plat)
  465. {
  466. if (mp_ncpus % mp_ncores != 0) {
  467. printf("WARNING: Irregular SMP topology. Performance may be "
  468. "suboptimal (%d CPUS, %d cores)\n", mp_ncpus, mp_ncores);
  469. return (smp_topo_none());
  470. }
  471. /* Don't do anything fancier for non-threaded SMP */
  472. if (mp_ncpus == mp_ncores)
  473. return (smp_topo_none());
  474. return (smp_topo_1level(CG_SHARE_L1, smp_threads_per_core,
  475. CG_FLAG_SMT));
  476. }
  477. #endif
  478. static void
  479. chrp_reset(platform_t platform)
  480. {
  481. OF_reboot();
  482. }
  483. #ifdef __powerpc64__
  484. static void
  485. phyp_cpu_idle(sbintime_t sbt)
  486. {
  487. register_t msr;
  488. msr = mfmsr();
  489. mtmsr(msr & ~PSL_EE);
  490. if (sched_runnable()) {
  491. mtmsr(msr);
  492. return;
  493. }
  494. phyp_hcall(H_CEDE); /* Re-enables interrupts internally */
  495. mtmsr(msr);
  496. }
  497. static void
  498. chrp_smp_ap_init(platform_t platform)
  499. {
  500. if (!(mfmsr() & PSL_HV)) {
  501. /* Register VPA */
  502. phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(hwref),
  503. splpar_vpa[PCPU_GET(hwref)]);
  504. /* Set interrupt priority */
  505. phyp_hcall(H_CPPR, 0xff);
  506. }
  507. }
  508. #else
  509. static void
  510. chrp_smp_ap_init(platform_t platform)
  511. {
  512. }
  513. #endif