vpe-mt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
  7. * Copyright (C) 2013 Imagination Technologies Ltd.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/device.h>
  11. #include <linux/fs.h>
  12. #include <linux/slab.h>
  13. #include <linux/export.h>
  14. #include <asm/mipsregs.h>
  15. #include <asm/mipsmtregs.h>
  16. #include <asm/mips_mt.h>
  17. #include <asm/vpe.h>
  18. static int major;
  19. /* The number of TCs and VPEs physically available on the core */
  20. static int hw_tcs, hw_vpes;
  21. /* We are prepared so configure and start the VPE... */
  22. int vpe_run(struct vpe *v)
  23. {
  24. unsigned long flags, val, dmt_flag;
  25. struct vpe_notifications *notifier;
  26. unsigned int vpeflags;
  27. struct tc *t;
  28. /* check we are the Master VPE */
  29. local_irq_save(flags);
  30. val = read_c0_vpeconf0();
  31. if (!(val & VPECONF0_MVP)) {
  32. pr_warn("VPE loader: only Master VPE's are able to config MT\n");
  33. local_irq_restore(flags);
  34. return -1;
  35. }
  36. dmt_flag = dmt();
  37. vpeflags = dvpe();
  38. if (list_empty(&v->tc)) {
  39. evpe(vpeflags);
  40. emt(dmt_flag);
  41. local_irq_restore(flags);
  42. pr_warn("VPE loader: No TC's associated with VPE %d\n",
  43. v->minor);
  44. return -ENOEXEC;
  45. }
  46. t = list_first_entry(&v->tc, struct tc, tc);
  47. /* Put MVPE's into 'configuration state' */
  48. set_c0_mvpcontrol(MVPCONTROL_VPC);
  49. settc(t->index);
  50. /* should check it is halted, and not activated */
  51. if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
  52. !(read_tc_c0_tchalt() & TCHALT_H)) {
  53. evpe(vpeflags);
  54. emt(dmt_flag);
  55. local_irq_restore(flags);
  56. pr_warn("VPE loader: TC %d is already active!\n",
  57. t->index);
  58. return -ENOEXEC;
  59. }
  60. /*
  61. * Write the address we want it to start running from in the TCPC
  62. * register.
  63. */
  64. write_tc_c0_tcrestart((unsigned long)v->__start);
  65. write_tc_c0_tccontext((unsigned long)0);
  66. /*
  67. * Mark the TC as activated, not interrupt exempt and not dynamically
  68. * allocatable
  69. */
  70. val = read_tc_c0_tcstatus();
  71. val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
  72. write_tc_c0_tcstatus(val);
  73. write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
  74. /*
  75. * The sde-kit passes 'memsize' to __start in $a3, so set something
  76. * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
  77. * DFLT_HEAP_SIZE when you compile your program
  78. */
  79. mttgpr(6, v->ntcs);
  80. mttgpr(7, physical_memsize);
  81. /* set up VPE1 */
  82. /*
  83. * bind the TC to VPE 1 as late as possible so we only have the final
  84. * VPE registers to set up, and so an EJTAG probe can trigger on it
  85. */
  86. write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
  87. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
  88. back_to_back_c0_hazard();
  89. /* Set up the XTC bit in vpeconf0 to point at our tc */
  90. write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
  91. | (t->index << VPECONF0_XTC_SHIFT));
  92. back_to_back_c0_hazard();
  93. /* enable this VPE */
  94. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
  95. /* clear out any left overs from a previous program */
  96. write_vpe_c0_status(0);
  97. write_vpe_c0_cause(0);
  98. /* take system out of configuration state */
  99. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  100. /*
  101. * SMVP kernels manage VPE enable independently, but uniprocessor
  102. * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
  103. */
  104. #ifdef CONFIG_SMP
  105. evpe(vpeflags);
  106. #else
  107. evpe(EVPE_ENABLE);
  108. #endif
  109. emt(dmt_flag);
  110. local_irq_restore(flags);
  111. list_for_each_entry(notifier, &v->notify, list)
  112. notifier->start(VPE_MODULE_MINOR);
  113. return 0;
  114. }
  115. void cleanup_tc(struct tc *tc)
  116. {
  117. unsigned long flags;
  118. unsigned int mtflags, vpflags;
  119. int tmp;
  120. local_irq_save(flags);
  121. mtflags = dmt();
  122. vpflags = dvpe();
  123. /* Put MVPE's into 'configuration state' */
  124. set_c0_mvpcontrol(MVPCONTROL_VPC);
  125. settc(tc->index);
  126. tmp = read_tc_c0_tcstatus();
  127. /* mark not allocated and not dynamically allocatable */
  128. tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
  129. tmp |= TCSTATUS_IXMT; /* interrupt exempt */
  130. write_tc_c0_tcstatus(tmp);
  131. write_tc_c0_tchalt(TCHALT_H);
  132. mips_ihb();
  133. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  134. evpe(vpflags);
  135. emt(mtflags);
  136. local_irq_restore(flags);
  137. }
  138. /* module wrapper entry points */
  139. /* give me a vpe */
  140. void *vpe_alloc(void)
  141. {
  142. int i;
  143. struct vpe *v;
  144. /* find a vpe */
  145. for (i = 1; i < MAX_VPES; i++) {
  146. v = get_vpe(i);
  147. if (v != NULL) {
  148. v->state = VPE_STATE_INUSE;
  149. return v;
  150. }
  151. }
  152. return NULL;
  153. }
  154. EXPORT_SYMBOL(vpe_alloc);
  155. /* start running from here */
  156. int vpe_start(void *vpe, unsigned long start)
  157. {
  158. struct vpe *v = vpe;
  159. v->__start = start;
  160. return vpe_run(v);
  161. }
  162. EXPORT_SYMBOL(vpe_start);
  163. /* halt it for now */
  164. int vpe_stop(void *vpe)
  165. {
  166. struct vpe *v = vpe;
  167. struct tc *t;
  168. unsigned int evpe_flags;
  169. evpe_flags = dvpe();
  170. t = list_entry(v->tc.next, struct tc, tc);
  171. if (t != NULL) {
  172. settc(t->index);
  173. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
  174. }
  175. evpe(evpe_flags);
  176. return 0;
  177. }
  178. EXPORT_SYMBOL(vpe_stop);
  179. /* I've done with it thank you */
  180. int vpe_free(void *vpe)
  181. {
  182. struct vpe *v = vpe;
  183. struct tc *t;
  184. unsigned int evpe_flags;
  185. t = list_entry(v->tc.next, struct tc, tc);
  186. if (t == NULL)
  187. return -ENOEXEC;
  188. evpe_flags = dvpe();
  189. /* Put MVPE's into 'configuration state' */
  190. set_c0_mvpcontrol(MVPCONTROL_VPC);
  191. settc(t->index);
  192. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
  193. /* halt the TC */
  194. write_tc_c0_tchalt(TCHALT_H);
  195. mips_ihb();
  196. /* mark the TC unallocated */
  197. write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
  198. v->state = VPE_STATE_UNUSED;
  199. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  200. evpe(evpe_flags);
  201. return 0;
  202. }
  203. EXPORT_SYMBOL(vpe_free);
  204. static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
  205. const char *buf, size_t len)
  206. {
  207. struct vpe *vpe = get_vpe(aprp_cpu_index());
  208. struct vpe_notifications *notifier;
  209. list_for_each_entry(notifier, &vpe->notify, list)
  210. notifier->stop(aprp_cpu_index());
  211. release_progmem(vpe->load_addr);
  212. cleanup_tc(get_tc(aprp_cpu_index()));
  213. vpe_stop(vpe);
  214. vpe_free(vpe);
  215. return len;
  216. }
  217. static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
  218. static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
  219. char *buf)
  220. {
  221. struct vpe *vpe = get_vpe(aprp_cpu_index());
  222. return sprintf(buf, "%d\n", vpe->ntcs);
  223. }
  224. static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
  225. const char *buf, size_t len)
  226. {
  227. struct vpe *vpe = get_vpe(aprp_cpu_index());
  228. unsigned long new;
  229. int ret;
  230. ret = kstrtoul(buf, 0, &new);
  231. if (ret < 0)
  232. return ret;
  233. if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
  234. return -EINVAL;
  235. vpe->ntcs = new;
  236. return len;
  237. }
  238. static DEVICE_ATTR_RW(ntcs);
  239. static struct attribute *vpe_attrs[] = {
  240. &dev_attr_kill.attr,
  241. &dev_attr_ntcs.attr,
  242. NULL,
  243. };
  244. ATTRIBUTE_GROUPS(vpe);
  245. static void vpe_device_release(struct device *cd)
  246. {
  247. kfree(cd);
  248. }
  249. static struct class vpe_class = {
  250. .name = "vpe",
  251. .owner = THIS_MODULE,
  252. .dev_release = vpe_device_release,
  253. .dev_groups = vpe_groups,
  254. };
  255. static struct device vpe_device;
  256. int __init vpe_module_init(void)
  257. {
  258. unsigned int mtflags, vpflags;
  259. unsigned long flags, val;
  260. struct vpe *v = NULL;
  261. struct tc *t;
  262. int tc, err;
  263. if (!cpu_has_mipsmt) {
  264. pr_warn("VPE loader: not a MIPS MT capable processor\n");
  265. return -ENODEV;
  266. }
  267. if (vpelimit == 0) {
  268. pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
  269. "Pass maxvpes=<n> argument as kernel argument\n");
  270. return -ENODEV;
  271. }
  272. if (aprp_cpu_index() == 0) {
  273. pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
  274. "Pass maxtcs=<n> argument as kernel argument\n");
  275. return -ENODEV;
  276. }
  277. major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
  278. if (major < 0) {
  279. pr_warn("VPE loader: unable to register character device\n");
  280. return major;
  281. }
  282. err = class_register(&vpe_class);
  283. if (err) {
  284. pr_err("vpe_class registration failed\n");
  285. goto out_chrdev;
  286. }
  287. device_initialize(&vpe_device);
  288. vpe_device.class = &vpe_class,
  289. vpe_device.parent = NULL,
  290. dev_set_name(&vpe_device, "vpe1");
  291. vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
  292. err = device_add(&vpe_device);
  293. if (err) {
  294. pr_err("Adding vpe_device failed\n");
  295. goto out_class;
  296. }
  297. local_irq_save(flags);
  298. mtflags = dmt();
  299. vpflags = dvpe();
  300. /* Put MVPE's into 'configuration state' */
  301. set_c0_mvpcontrol(MVPCONTROL_VPC);
  302. val = read_c0_mvpconf0();
  303. hw_tcs = (val & MVPCONF0_PTC) + 1;
  304. hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
  305. for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
  306. /*
  307. * Must re-enable multithreading temporarily or in case we
  308. * reschedule send IPIs or similar we might hang.
  309. */
  310. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  311. evpe(vpflags);
  312. emt(mtflags);
  313. local_irq_restore(flags);
  314. t = alloc_tc(tc);
  315. if (!t) {
  316. err = -ENOMEM;
  317. goto out_dev;
  318. }
  319. local_irq_save(flags);
  320. mtflags = dmt();
  321. vpflags = dvpe();
  322. set_c0_mvpcontrol(MVPCONTROL_VPC);
  323. /* VPE's */
  324. if (tc < hw_tcs) {
  325. settc(tc);
  326. v = alloc_vpe(tc);
  327. if (v == NULL) {
  328. pr_warn("VPE: unable to allocate VPE\n");
  329. goto out_reenable;
  330. }
  331. v->ntcs = hw_tcs - aprp_cpu_index();
  332. /* add the tc to the list of this vpe's tc's. */
  333. list_add(&t->tc, &v->tc);
  334. /* deactivate all but vpe0 */
  335. if (tc >= aprp_cpu_index()) {
  336. unsigned long tmp = read_vpe_c0_vpeconf0();
  337. tmp &= ~VPECONF0_VPA;
  338. /* master VPE */
  339. tmp |= VPECONF0_MVP;
  340. write_vpe_c0_vpeconf0(tmp);
  341. }
  342. /* disable multi-threading with TC's */
  343. write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
  344. ~VPECONTROL_TE);
  345. if (tc >= vpelimit) {
  346. /*
  347. * Set config to be the same as vpe0,
  348. * particularly kseg0 coherency alg
  349. */
  350. write_vpe_c0_config(read_c0_config());
  351. }
  352. }
  353. /* TC's */
  354. t->pvpe = v; /* set the parent vpe */
  355. if (tc >= aprp_cpu_index()) {
  356. unsigned long tmp;
  357. settc(tc);
  358. /*
  359. * A TC that is bound to any other VPE gets bound to
  360. * VPE0, ideally I'd like to make it homeless but it
  361. * doesn't appear to let me bind a TC to a non-existent
  362. * VPE. Which is perfectly reasonable.
  363. *
  364. * The (un)bound state is visible to an EJTAG probe so
  365. * may notify GDB...
  366. */
  367. tmp = read_tc_c0_tcbind();
  368. if (tmp & TCBIND_CURVPE) {
  369. /* tc is bound >vpe0 */
  370. write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
  371. t->pvpe = get_vpe(0); /* set the parent vpe */
  372. }
  373. /* halt the TC */
  374. write_tc_c0_tchalt(TCHALT_H);
  375. mips_ihb();
  376. tmp = read_tc_c0_tcstatus();
  377. /* mark not activated and not dynamically allocatable */
  378. tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
  379. tmp |= TCSTATUS_IXMT; /* interrupt exempt */
  380. write_tc_c0_tcstatus(tmp);
  381. }
  382. }
  383. out_reenable:
  384. /* release config state */
  385. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  386. evpe(vpflags);
  387. emt(mtflags);
  388. local_irq_restore(flags);
  389. return 0;
  390. out_dev:
  391. device_del(&vpe_device);
  392. out_class:
  393. class_unregister(&vpe_class);
  394. out_chrdev:
  395. unregister_chrdev(major, VPE_MODULE_NAME);
  396. return err;
  397. }
  398. void __exit vpe_module_exit(void)
  399. {
  400. struct vpe *v, *n;
  401. device_del(&vpe_device);
  402. class_unregister(&vpe_class);
  403. unregister_chrdev(major, VPE_MODULE_NAME);
  404. /* No locking needed here */
  405. list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
  406. if (v->state != VPE_STATE_UNUSED)
  407. release_vpe(v);
  408. }
  409. }