hardwall.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/fs.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/rwsem.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/sched.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/smp.h>
  23. #include <linux/cdev.h>
  24. #include <linux/compat.h>
  25. #include <asm/hardwall.h>
  26. #include <asm/traps.h>
  27. #include <asm/siginfo.h>
  28. #include <asm/irq_regs.h>
  29. #include <arch/interrupts.h>
  30. #include <arch/spr_def.h>
  31. /*
  32. * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
  33. * We use "hardwall" nomenclature throughout for historical reasons.
  34. * The lock here controls access to the list data structure as well as
  35. * to the items on the list.
  36. */
  37. struct hardwall_type {
  38. int index;
  39. int is_xdn;
  40. int is_idn;
  41. int disabled;
  42. const char *name;
  43. struct list_head list;
  44. spinlock_t lock;
  45. struct proc_dir_entry *proc_dir;
  46. };
  47. enum hardwall_index {
  48. HARDWALL_UDN = 0,
  49. #ifndef __tilepro__
  50. HARDWALL_IDN = 1,
  51. HARDWALL_IPI = 2,
  52. #endif
  53. _HARDWALL_TYPES
  54. };
  55. static struct hardwall_type hardwall_types[] = {
  56. { /* user-space access to UDN */
  57. 0,
  58. 1,
  59. 0,
  60. 0,
  61. "udn",
  62. LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
  63. __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
  64. NULL
  65. },
  66. #ifndef __tilepro__
  67. { /* user-space access to IDN */
  68. 1,
  69. 1,
  70. 1,
  71. 1, /* disabled pending hypervisor support */
  72. "idn",
  73. LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
  74. __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
  75. NULL
  76. },
  77. { /* access to user-space IPI */
  78. 2,
  79. 0,
  80. 0,
  81. 0,
  82. "ipi",
  83. LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
  84. __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
  85. NULL
  86. },
  87. #endif
  88. };
  89. /*
  90. * This data structure tracks the cpu data, etc., associated
  91. * one-to-one with a "struct file *" from opening a hardwall device file.
  92. * Note that the file's private data points back to this structure.
  93. */
  94. struct hardwall_info {
  95. struct list_head list; /* for hardwall_types.list */
  96. struct list_head task_head; /* head of tasks in this hardwall */
  97. struct hardwall_type *type; /* type of this resource */
  98. struct cpumask cpumask; /* cpus reserved */
  99. int id; /* integer id for this hardwall */
  100. int teardown_in_progress; /* are we tearing this one down? */
  101. /* Remaining fields only valid for user-network resources. */
  102. int ulhc_x; /* upper left hand corner x coord */
  103. int ulhc_y; /* upper left hand corner y coord */
  104. int width; /* rectangle width */
  105. int height; /* rectangle height */
  106. #if CHIP_HAS_REV1_XDN()
  107. atomic_t xdn_pending_count; /* cores in phase 1 of drain */
  108. #endif
  109. };
  110. /* /proc/tile/hardwall */
  111. static struct proc_dir_entry *hardwall_proc_dir;
  112. /* Functions to manage files in /proc/tile/hardwall. */
  113. static void hardwall_add_proc(struct hardwall_info *);
  114. static void hardwall_remove_proc(struct hardwall_info *);
  115. /* Allow disabling UDN access. */
  116. static int __init noudn(char *str)
  117. {
  118. pr_info("User-space UDN access is disabled\n");
  119. hardwall_types[HARDWALL_UDN].disabled = 1;
  120. return 0;
  121. }
  122. early_param("noudn", noudn);
  123. #ifndef __tilepro__
  124. /* Allow disabling IDN access. */
  125. static int __init noidn(char *str)
  126. {
  127. pr_info("User-space IDN access is disabled\n");
  128. hardwall_types[HARDWALL_IDN].disabled = 1;
  129. return 0;
  130. }
  131. early_param("noidn", noidn);
  132. /* Allow disabling IPI access. */
  133. static int __init noipi(char *str)
  134. {
  135. pr_info("User-space IPI access is disabled\n");
  136. hardwall_types[HARDWALL_IPI].disabled = 1;
  137. return 0;
  138. }
  139. early_param("noipi", noipi);
  140. #endif
  141. /*
  142. * Low-level primitives for UDN/IDN
  143. */
  144. #ifdef __tilepro__
  145. #define mtspr_XDN(hwt, name, val) \
  146. do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
  147. #define mtspr_MPL_XDN(hwt, name, val) \
  148. do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
  149. #define mfspr_XDN(hwt, name) \
  150. ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
  151. #else
  152. #define mtspr_XDN(hwt, name, val) \
  153. do { \
  154. if ((hwt)->is_idn) \
  155. __insn_mtspr(SPR_IDN_##name, (val)); \
  156. else \
  157. __insn_mtspr(SPR_UDN_##name, (val)); \
  158. } while (0)
  159. #define mtspr_MPL_XDN(hwt, name, val) \
  160. do { \
  161. if ((hwt)->is_idn) \
  162. __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
  163. else \
  164. __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
  165. } while (0)
  166. #define mfspr_XDN(hwt, name) \
  167. ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
  168. #endif
  169. /* Set a CPU bit if the CPU is online. */
  170. #define cpu_online_set(cpu, dst) do { \
  171. if (cpu_online(cpu)) \
  172. cpumask_set_cpu(cpu, dst); \
  173. } while (0)
  174. /* Does the given rectangle contain the given x,y coordinate? */
  175. static int contains(struct hardwall_info *r, int x, int y)
  176. {
  177. return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
  178. (y >= r->ulhc_y && y < r->ulhc_y + r->height);
  179. }
  180. /* Compute the rectangle parameters and validate the cpumask. */
  181. static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
  182. {
  183. int x, y, cpu, ulhc, lrhc;
  184. /* The first cpu is the ULHC, the last the LRHC. */
  185. ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
  186. lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
  187. /* Compute the rectangle attributes from the cpus. */
  188. r->ulhc_x = cpu_x(ulhc);
  189. r->ulhc_y = cpu_y(ulhc);
  190. r->width = cpu_x(lrhc) - r->ulhc_x + 1;
  191. r->height = cpu_y(lrhc) - r->ulhc_y + 1;
  192. /* Width and height must be positive */
  193. if (r->width <= 0 || r->height <= 0)
  194. return -EINVAL;
  195. /* Confirm that the cpumask is exactly the rectangle. */
  196. for (y = 0, cpu = 0; y < smp_height; ++y)
  197. for (x = 0; x < smp_width; ++x, ++cpu)
  198. if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
  199. return -EINVAL;
  200. /*
  201. * Note that offline cpus can't be drained when this user network
  202. * rectangle eventually closes. We used to detect this
  203. * situation and print a warning, but it annoyed users and
  204. * they ignored it anyway, so now we just return without a
  205. * warning.
  206. */
  207. return 0;
  208. }
  209. /*
  210. * Hardware management of hardwall setup, teardown, trapping,
  211. * and enabling/disabling PL0 access to the networks.
  212. */
  213. /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
  214. enum direction_protect {
  215. N_PROTECT = (1 << 0),
  216. E_PROTECT = (1 << 1),
  217. S_PROTECT = (1 << 2),
  218. W_PROTECT = (1 << 3),
  219. C_PROTECT = (1 << 4),
  220. };
  221. static inline int xdn_which_interrupt(struct hardwall_type *hwt)
  222. {
  223. #ifndef __tilepro__
  224. if (hwt->is_idn)
  225. return INT_IDN_FIREWALL;
  226. #endif
  227. return INT_UDN_FIREWALL;
  228. }
  229. static void enable_firewall_interrupts(struct hardwall_type *hwt)
  230. {
  231. arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
  232. }
  233. static void disable_firewall_interrupts(struct hardwall_type *hwt)
  234. {
  235. arch_local_irq_mask_now(xdn_which_interrupt(hwt));
  236. }
  237. /* Set up hardwall on this cpu based on the passed hardwall_info. */
  238. static void hardwall_setup_func(void *info)
  239. {
  240. struct hardwall_info *r = info;
  241. struct hardwall_type *hwt = r->type;
  242. int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
  243. int x = cpu_x(cpu);
  244. int y = cpu_y(cpu);
  245. int bits = 0;
  246. if (x == r->ulhc_x)
  247. bits |= W_PROTECT;
  248. if (x == r->ulhc_x + r->width - 1)
  249. bits |= E_PROTECT;
  250. if (y == r->ulhc_y)
  251. bits |= N_PROTECT;
  252. if (y == r->ulhc_y + r->height - 1)
  253. bits |= S_PROTECT;
  254. BUG_ON(bits == 0);
  255. mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
  256. enable_firewall_interrupts(hwt);
  257. }
  258. /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
  259. static void hardwall_protect_rectangle(struct hardwall_info *r)
  260. {
  261. int x, y, cpu, delta;
  262. struct cpumask rect_cpus;
  263. cpumask_clear(&rect_cpus);
  264. /* First include the top and bottom edges */
  265. cpu = r->ulhc_y * smp_width + r->ulhc_x;
  266. delta = (r->height - 1) * smp_width;
  267. for (x = 0; x < r->width; ++x, ++cpu) {
  268. cpu_online_set(cpu, &rect_cpus);
  269. cpu_online_set(cpu + delta, &rect_cpus);
  270. }
  271. /* Then the left and right edges */
  272. cpu -= r->width;
  273. delta = r->width - 1;
  274. for (y = 0; y < r->height; ++y, cpu += smp_width) {
  275. cpu_online_set(cpu, &rect_cpus);
  276. cpu_online_set(cpu + delta, &rect_cpus);
  277. }
  278. /* Then tell all the cpus to set up their protection SPR */
  279. on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
  280. }
  281. /* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
  282. void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
  283. {
  284. struct hardwall_info *rect;
  285. struct hardwall_type *hwt;
  286. struct task_struct *p;
  287. struct siginfo info;
  288. int cpu = smp_processor_id();
  289. int found_processes;
  290. struct pt_regs *old_regs = set_irq_regs(regs);
  291. irq_enter();
  292. /* Figure out which network trapped. */
  293. switch (fault_num) {
  294. #ifndef __tilepro__
  295. case INT_IDN_FIREWALL:
  296. hwt = &hardwall_types[HARDWALL_IDN];
  297. break;
  298. #endif
  299. case INT_UDN_FIREWALL:
  300. hwt = &hardwall_types[HARDWALL_UDN];
  301. break;
  302. default:
  303. BUG();
  304. }
  305. BUG_ON(hwt->disabled);
  306. /* This tile trapped a network access; find the rectangle. */
  307. spin_lock(&hwt->lock);
  308. list_for_each_entry(rect, &hwt->list, list) {
  309. if (cpumask_test_cpu(cpu, &rect->cpumask))
  310. break;
  311. }
  312. /*
  313. * It shouldn't be possible not to find this cpu on the
  314. * rectangle list, since only cpus in rectangles get hardwalled.
  315. * The hardwall is only removed after the user network is drained.
  316. */
  317. BUG_ON(&rect->list == &hwt->list);
  318. /*
  319. * If we already started teardown on this hardwall, don't worry;
  320. * the abort signal has been sent and we are just waiting for things
  321. * to quiesce.
  322. */
  323. if (rect->teardown_in_progress) {
  324. pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
  325. cpu, hwt->name,
  326. (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
  327. goto done;
  328. }
  329. /*
  330. * Kill off any process that is activated in this rectangle.
  331. * We bypass security to deliver the signal, since it must be
  332. * one of the activated processes that generated the user network
  333. * message that caused this trap, and all the activated
  334. * processes shared a single open file so are pretty tightly
  335. * bound together from a security point of view to begin with.
  336. */
  337. rect->teardown_in_progress = 1;
  338. wmb(); /* Ensure visibility of rectangle before notifying processes. */
  339. pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
  340. cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
  341. info.si_signo = SIGILL;
  342. info.si_errno = 0;
  343. info.si_code = ILL_HARDWALL;
  344. found_processes = 0;
  345. list_for_each_entry(p, &rect->task_head,
  346. thread.hardwall[hwt->index].list) {
  347. BUG_ON(p->thread.hardwall[hwt->index].info != rect);
  348. if (!(p->flags & PF_EXITING)) {
  349. found_processes = 1;
  350. pr_notice("hardwall: killing %d\n", p->pid);
  351. do_send_sig_info(info.si_signo, &info, p, false);
  352. }
  353. }
  354. if (!found_processes)
  355. pr_notice("hardwall: no associated processes!\n");
  356. done:
  357. spin_unlock(&hwt->lock);
  358. /*
  359. * We have to disable firewall interrupts now, or else when we
  360. * return from this handler, we will simply re-interrupt back to
  361. * it. However, we can't clear the protection bits, since we
  362. * haven't yet drained the network, and that would allow packets
  363. * to cross out of the hardwall region.
  364. */
  365. disable_firewall_interrupts(hwt);
  366. irq_exit();
  367. set_irq_regs(old_regs);
  368. }
  369. /* Allow access from user space to the user network. */
  370. void grant_hardwall_mpls(struct hardwall_type *hwt)
  371. {
  372. #ifndef __tilepro__
  373. if (!hwt->is_xdn) {
  374. __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
  375. return;
  376. }
  377. #endif
  378. mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
  379. mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
  380. mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
  381. mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
  382. #if !CHIP_HAS_REV1_XDN()
  383. mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
  384. mtspr_MPL_XDN(hwt, CA_SET_0, 1);
  385. #endif
  386. }
  387. /* Deny access from user space to the user network. */
  388. void restrict_hardwall_mpls(struct hardwall_type *hwt)
  389. {
  390. #ifndef __tilepro__
  391. if (!hwt->is_xdn) {
  392. __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
  393. return;
  394. }
  395. #endif
  396. mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
  397. mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
  398. mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
  399. mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
  400. #if !CHIP_HAS_REV1_XDN()
  401. mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
  402. mtspr_MPL_XDN(hwt, CA_SET_1, 1);
  403. #endif
  404. }
  405. /* Restrict or deny as necessary for the task we're switching to. */
  406. void hardwall_switch_tasks(struct task_struct *prev,
  407. struct task_struct *next)
  408. {
  409. int i;
  410. for (i = 0; i < HARDWALL_TYPES; ++i) {
  411. if (prev->thread.hardwall[i].info != NULL) {
  412. if (next->thread.hardwall[i].info == NULL)
  413. restrict_hardwall_mpls(&hardwall_types[i]);
  414. } else if (next->thread.hardwall[i].info != NULL) {
  415. grant_hardwall_mpls(&hardwall_types[i]);
  416. }
  417. }
  418. }
  419. /* Does this task have the right to IPI the given cpu? */
  420. int hardwall_ipi_valid(int cpu)
  421. {
  422. #ifdef __tilegx__
  423. struct hardwall_info *info =
  424. current->thread.hardwall[HARDWALL_IPI].info;
  425. return info && cpumask_test_cpu(cpu, &info->cpumask);
  426. #else
  427. return 0;
  428. #endif
  429. }
  430. /*
  431. * Code to create, activate, deactivate, and destroy hardwall resources.
  432. */
  433. /* Create a hardwall for the given resource */
  434. static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
  435. size_t size,
  436. const unsigned char __user *bits)
  437. {
  438. struct hardwall_info *iter, *info;
  439. struct cpumask mask;
  440. unsigned long flags;
  441. int rc;
  442. /* Reject crazy sizes out of hand, a la sys_mbind(). */
  443. if (size > PAGE_SIZE)
  444. return ERR_PTR(-EINVAL);
  445. /* Copy whatever fits into a cpumask. */
  446. if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
  447. return ERR_PTR(-EFAULT);
  448. /*
  449. * If the size was short, clear the rest of the mask;
  450. * otherwise validate that the rest of the user mask was zero
  451. * (we don't try hard to be efficient when validating huge masks).
  452. */
  453. if (size < sizeof(struct cpumask)) {
  454. memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
  455. } else if (size > sizeof(struct cpumask)) {
  456. size_t i;
  457. for (i = sizeof(struct cpumask); i < size; ++i) {
  458. char c;
  459. if (get_user(c, &bits[i]))
  460. return ERR_PTR(-EFAULT);
  461. if (c)
  462. return ERR_PTR(-EINVAL);
  463. }
  464. }
  465. /* Allocate a new hardwall_info optimistically. */
  466. info = kmalloc(sizeof(struct hardwall_info),
  467. GFP_KERNEL | __GFP_ZERO);
  468. if (info == NULL)
  469. return ERR_PTR(-ENOMEM);
  470. INIT_LIST_HEAD(&info->task_head);
  471. info->type = hwt;
  472. /* Compute the rectangle size and validate that it's plausible. */
  473. cpumask_copy(&info->cpumask, &mask);
  474. info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
  475. if (hwt->is_xdn) {
  476. rc = check_rectangle(info, &mask);
  477. if (rc != 0) {
  478. kfree(info);
  479. return ERR_PTR(rc);
  480. }
  481. }
  482. /*
  483. * Eliminate cpus that are not part of this Linux client.
  484. * Note that this allows for configurations that we might not want to
  485. * support, such as one client on every even cpu, another client on
  486. * every odd cpu.
  487. */
  488. cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
  489. /* Confirm it doesn't overlap and add it to the list. */
  490. spin_lock_irqsave(&hwt->lock, flags);
  491. list_for_each_entry(iter, &hwt->list, list) {
  492. if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
  493. spin_unlock_irqrestore(&hwt->lock, flags);
  494. kfree(info);
  495. return ERR_PTR(-EBUSY);
  496. }
  497. }
  498. list_add_tail(&info->list, &hwt->list);
  499. spin_unlock_irqrestore(&hwt->lock, flags);
  500. /* Set up appropriate hardwalling on all affected cpus. */
  501. if (hwt->is_xdn)
  502. hardwall_protect_rectangle(info);
  503. /* Create a /proc/tile/hardwall entry. */
  504. hardwall_add_proc(info);
  505. return info;
  506. }
  507. /* Activate a given hardwall on this cpu for this process. */
  508. static int hardwall_activate(struct hardwall_info *info)
  509. {
  510. int cpu;
  511. unsigned long flags;
  512. struct task_struct *p = current;
  513. struct thread_struct *ts = &p->thread;
  514. struct hardwall_type *hwt;
  515. /* Require a hardwall. */
  516. if (info == NULL)
  517. return -ENODATA;
  518. /* Not allowed to activate a hardwall that is being torn down. */
  519. if (info->teardown_in_progress)
  520. return -EINVAL;
  521. /*
  522. * Get our affinity; if we're not bound to this tile uniquely,
  523. * we can't access the network registers.
  524. */
  525. if (cpumask_weight(&p->cpus_allowed) != 1)
  526. return -EPERM;
  527. /* Make sure we are bound to a cpu assigned to this resource. */
  528. cpu = smp_processor_id();
  529. BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
  530. if (!cpumask_test_cpu(cpu, &info->cpumask))
  531. return -EINVAL;
  532. /* If we are already bound to this hardwall, it's a no-op. */
  533. hwt = info->type;
  534. if (ts->hardwall[hwt->index].info) {
  535. BUG_ON(ts->hardwall[hwt->index].info != info);
  536. return 0;
  537. }
  538. /* Success! This process gets to use the resource on this cpu. */
  539. ts->hardwall[hwt->index].info = info;
  540. spin_lock_irqsave(&hwt->lock, flags);
  541. list_add(&ts->hardwall[hwt->index].list, &info->task_head);
  542. spin_unlock_irqrestore(&hwt->lock, flags);
  543. grant_hardwall_mpls(hwt);
  544. printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
  545. p->pid, p->comm, hwt->name, cpu);
  546. return 0;
  547. }
  548. /*
  549. * Deactivate a task's hardwall. Must hold lock for hardwall_type.
  550. * This method may be called from exit_thread(), so we don't want to
  551. * rely on too many fields of struct task_struct still being valid.
  552. * We assume the cpus_allowed, pid, and comm fields are still valid.
  553. */
  554. static void _hardwall_deactivate(struct hardwall_type *hwt,
  555. struct task_struct *task)
  556. {
  557. struct thread_struct *ts = &task->thread;
  558. if (cpumask_weight(&task->cpus_allowed) != 1) {
  559. pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
  560. task->pid, task->comm, hwt->name,
  561. cpumask_weight(&task->cpus_allowed));
  562. BUG();
  563. }
  564. BUG_ON(ts->hardwall[hwt->index].info == NULL);
  565. ts->hardwall[hwt->index].info = NULL;
  566. list_del(&ts->hardwall[hwt->index].list);
  567. if (task == current)
  568. restrict_hardwall_mpls(hwt);
  569. }
  570. /* Deactivate a task's hardwall. */
  571. static int hardwall_deactivate(struct hardwall_type *hwt,
  572. struct task_struct *task)
  573. {
  574. unsigned long flags;
  575. int activated;
  576. spin_lock_irqsave(&hwt->lock, flags);
  577. activated = (task->thread.hardwall[hwt->index].info != NULL);
  578. if (activated)
  579. _hardwall_deactivate(hwt, task);
  580. spin_unlock_irqrestore(&hwt->lock, flags);
  581. if (!activated)
  582. return -EINVAL;
  583. printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
  584. task->pid, task->comm, hwt->name, raw_smp_processor_id());
  585. return 0;
  586. }
  587. void hardwall_deactivate_all(struct task_struct *task)
  588. {
  589. int i;
  590. for (i = 0; i < HARDWALL_TYPES; ++i)
  591. if (task->thread.hardwall[i].info)
  592. hardwall_deactivate(&hardwall_types[i], task);
  593. }
  594. /* Stop the switch before draining the network. */
  595. static void stop_xdn_switch(void *arg)
  596. {
  597. #if !CHIP_HAS_REV1_XDN()
  598. /* Freeze the switch and the demux. */
  599. __insn_mtspr(SPR_UDN_SP_FREEZE,
  600. SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
  601. SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
  602. SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
  603. #else
  604. /*
  605. * Drop all packets bound for the core or off the edge.
  606. * We rely on the normal hardwall protection setup code
  607. * to have set the low four bits to trigger firewall interrupts,
  608. * and shift those bits up to trigger "drop on send" semantics,
  609. * plus adding "drop on send to core" for all switches.
  610. * In practice it seems the switches latch the DIRECTION_PROTECT
  611. * SPR so they won't start dropping if they're already
  612. * delivering the last message to the core, but it doesn't
  613. * hurt to enable it here.
  614. */
  615. struct hardwall_type *hwt = arg;
  616. unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
  617. mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
  618. #endif
  619. }
  620. static void empty_xdn_demuxes(struct hardwall_type *hwt)
  621. {
  622. #ifndef __tilepro__
  623. if (hwt->is_idn) {
  624. while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
  625. (void) __tile_idn0_receive();
  626. while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
  627. (void) __tile_idn1_receive();
  628. return;
  629. }
  630. #endif
  631. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
  632. (void) __tile_udn0_receive();
  633. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
  634. (void) __tile_udn1_receive();
  635. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
  636. (void) __tile_udn2_receive();
  637. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
  638. (void) __tile_udn3_receive();
  639. }
  640. /* Drain all the state from a stopped switch. */
  641. static void drain_xdn_switch(void *arg)
  642. {
  643. struct hardwall_info *info = arg;
  644. struct hardwall_type *hwt = info->type;
  645. #if CHIP_HAS_REV1_XDN()
  646. /*
  647. * The switches have been configured to drop any messages
  648. * destined for cores (or off the edge of the rectangle).
  649. * But the current message may continue to be delivered,
  650. * so we wait until all the cores have finished any pending
  651. * messages before we stop draining.
  652. */
  653. int pending = mfspr_XDN(hwt, PENDING);
  654. while (pending--) {
  655. empty_xdn_demuxes(hwt);
  656. if (hwt->is_idn)
  657. __tile_idn_send(0);
  658. else
  659. __tile_udn_send(0);
  660. }
  661. atomic_dec(&info->xdn_pending_count);
  662. while (atomic_read(&info->xdn_pending_count))
  663. empty_xdn_demuxes(hwt);
  664. #else
  665. int i;
  666. int from_tile_words, ca_count;
  667. /* Empty out the 5 switch point fifos. */
  668. for (i = 0; i < 5; i++) {
  669. int words, j;
  670. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  671. words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
  672. for (j = 0; j < words; j++)
  673. (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
  674. BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
  675. }
  676. /* Dump out the 3 word fifo at top. */
  677. from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
  678. for (i = 0; i < from_tile_words; i++)
  679. (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
  680. /* Empty out demuxes. */
  681. empty_xdn_demuxes(hwt);
  682. /* Empty out catch all. */
  683. ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
  684. for (i = 0; i < ca_count; i++)
  685. (void) __insn_mfspr(SPR_UDN_CA_DATA);
  686. BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
  687. /* Clear demux logic. */
  688. __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
  689. /*
  690. * Write switch state; experimentation indicates that 0xc3000
  691. * is an idle switch point.
  692. */
  693. for (i = 0; i < 5; i++) {
  694. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  695. __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
  696. }
  697. #endif
  698. }
  699. /* Reset random XDN state registers at boot up and during hardwall teardown. */
  700. static void reset_xdn_network_state(struct hardwall_type *hwt)
  701. {
  702. if (hwt->disabled)
  703. return;
  704. /* Clear out other random registers so we have a clean slate. */
  705. mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
  706. mtspr_XDN(hwt, AVAIL_EN, 0);
  707. mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
  708. #if !CHIP_HAS_REV1_XDN()
  709. /* Reset UDN coordinates to their standard value */
  710. {
  711. unsigned int cpu = smp_processor_id();
  712. unsigned int x = cpu_x(cpu);
  713. unsigned int y = cpu_y(cpu);
  714. __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
  715. }
  716. /* Set demux tags to predefined values and enable them. */
  717. __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
  718. __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
  719. __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
  720. __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
  721. __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
  722. /* Set other rev0 random registers to a clean state. */
  723. __insn_mtspr(SPR_UDN_REFILL_EN, 0);
  724. __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
  725. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
  726. /* Start the switch and demux. */
  727. __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
  728. #endif
  729. }
  730. void reset_network_state(void)
  731. {
  732. reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
  733. #ifndef __tilepro__
  734. reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
  735. #endif
  736. }
  737. /* Restart an XDN switch after draining. */
  738. static void restart_xdn_switch(void *arg)
  739. {
  740. struct hardwall_type *hwt = arg;
  741. #if CHIP_HAS_REV1_XDN()
  742. /* One last drain step to avoid races with injection and draining. */
  743. empty_xdn_demuxes(hwt);
  744. #endif
  745. reset_xdn_network_state(hwt);
  746. /* Disable firewall interrupts. */
  747. disable_firewall_interrupts(hwt);
  748. }
  749. /* Last reference to a hardwall is gone, so clear the network. */
  750. static void hardwall_destroy(struct hardwall_info *info)
  751. {
  752. struct task_struct *task;
  753. struct hardwall_type *hwt;
  754. unsigned long flags;
  755. /* Make sure this file actually represents a hardwall. */
  756. if (info == NULL)
  757. return;
  758. /*
  759. * Deactivate any remaining tasks. It's possible to race with
  760. * some other thread that is exiting and hasn't yet called
  761. * deactivate (when freeing its thread_info), so we carefully
  762. * deactivate any remaining tasks before freeing the
  763. * hardwall_info object itself.
  764. */
  765. hwt = info->type;
  766. info->teardown_in_progress = 1;
  767. spin_lock_irqsave(&hwt->lock, flags);
  768. list_for_each_entry(task, &info->task_head,
  769. thread.hardwall[hwt->index].list)
  770. _hardwall_deactivate(hwt, task);
  771. spin_unlock_irqrestore(&hwt->lock, flags);
  772. if (hwt->is_xdn) {
  773. /* Configure the switches for draining the user network. */
  774. printk(KERN_DEBUG
  775. "Clearing %s hardwall rectangle %dx%d %d,%d\n",
  776. hwt->name, info->width, info->height,
  777. info->ulhc_x, info->ulhc_y);
  778. on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
  779. /* Drain the network. */
  780. #if CHIP_HAS_REV1_XDN()
  781. atomic_set(&info->xdn_pending_count,
  782. cpumask_weight(&info->cpumask));
  783. on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
  784. #else
  785. on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
  786. #endif
  787. /* Restart switch and disable firewall. */
  788. on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
  789. }
  790. /* Remove the /proc/tile/hardwall entry. */
  791. hardwall_remove_proc(info);
  792. /* Now free the hardwall from the list. */
  793. spin_lock_irqsave(&hwt->lock, flags);
  794. BUG_ON(!list_empty(&info->task_head));
  795. list_del(&info->list);
  796. spin_unlock_irqrestore(&hwt->lock, flags);
  797. kfree(info);
  798. }
  799. static int hardwall_proc_show(struct seq_file *sf, void *v)
  800. {
  801. struct hardwall_info *info = sf->private;
  802. seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask));
  803. return 0;
  804. }
  805. static int hardwall_proc_open(struct inode *inode,
  806. struct file *file)
  807. {
  808. return single_open(file, hardwall_proc_show, PDE_DATA(inode));
  809. }
  810. static const struct file_operations hardwall_proc_fops = {
  811. .open = hardwall_proc_open,
  812. .read = seq_read,
  813. .llseek = seq_lseek,
  814. .release = single_release,
  815. };
  816. static void hardwall_add_proc(struct hardwall_info *info)
  817. {
  818. char buf[64];
  819. snprintf(buf, sizeof(buf), "%d", info->id);
  820. proc_create_data(buf, 0444, info->type->proc_dir,
  821. &hardwall_proc_fops, info);
  822. }
  823. static void hardwall_remove_proc(struct hardwall_info *info)
  824. {
  825. char buf[64];
  826. snprintf(buf, sizeof(buf), "%d", info->id);
  827. remove_proc_entry(buf, info->type->proc_dir);
  828. }
  829. int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns,
  830. struct pid *pid, struct task_struct *task)
  831. {
  832. int i;
  833. int n = 0;
  834. for (i = 0; i < HARDWALL_TYPES; ++i) {
  835. struct hardwall_info *info = task->thread.hardwall[i].info;
  836. if (info)
  837. seq_printf(m, "%s: %d\n", info->type->name, info->id);
  838. }
  839. return n;
  840. }
  841. void proc_tile_hardwall_init(struct proc_dir_entry *root)
  842. {
  843. int i;
  844. for (i = 0; i < HARDWALL_TYPES; ++i) {
  845. struct hardwall_type *hwt = &hardwall_types[i];
  846. if (hwt->disabled)
  847. continue;
  848. if (hardwall_proc_dir == NULL)
  849. hardwall_proc_dir = proc_mkdir("hardwall", root);
  850. hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
  851. }
  852. }
  853. /*
  854. * Character device support via ioctl/close.
  855. */
  856. static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
  857. {
  858. struct hardwall_info *info = file->private_data;
  859. int minor = iminor(file->f_mapping->host);
  860. struct hardwall_type* hwt;
  861. if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
  862. return -EINVAL;
  863. BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
  864. BUILD_BUG_ON(HARDWALL_TYPES !=
  865. sizeof(hardwall_types)/sizeof(hardwall_types[0]));
  866. if (minor < 0 || minor >= HARDWALL_TYPES)
  867. return -EINVAL;
  868. hwt = &hardwall_types[minor];
  869. WARN_ON(info && hwt != info->type);
  870. switch (_IOC_NR(a)) {
  871. case _HARDWALL_CREATE:
  872. if (hwt->disabled)
  873. return -ENOSYS;
  874. if (info != NULL)
  875. return -EALREADY;
  876. info = hardwall_create(hwt, _IOC_SIZE(a),
  877. (const unsigned char __user *)b);
  878. if (IS_ERR(info))
  879. return PTR_ERR(info);
  880. file->private_data = info;
  881. return 0;
  882. case _HARDWALL_ACTIVATE:
  883. return hardwall_activate(info);
  884. case _HARDWALL_DEACTIVATE:
  885. if (current->thread.hardwall[hwt->index].info != info)
  886. return -EINVAL;
  887. return hardwall_deactivate(hwt, current);
  888. case _HARDWALL_GET_ID:
  889. return info ? info->id : -EINVAL;
  890. default:
  891. return -EINVAL;
  892. }
  893. }
  894. #ifdef CONFIG_COMPAT
  895. static long hardwall_compat_ioctl(struct file *file,
  896. unsigned int a, unsigned long b)
  897. {
  898. /* Sign-extend the argument so it can be used as a pointer. */
  899. return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
  900. }
  901. #endif
  902. /* The user process closed the file; revoke access to user networks. */
  903. static int hardwall_flush(struct file *file, fl_owner_t owner)
  904. {
  905. struct hardwall_info *info = file->private_data;
  906. struct task_struct *task, *tmp;
  907. unsigned long flags;
  908. if (info) {
  909. /*
  910. * NOTE: if multiple threads are activated on this hardwall
  911. * file, the other threads will continue having access to the
  912. * user network until they are context-switched out and back
  913. * in again.
  914. *
  915. * NOTE: A NULL files pointer means the task is being torn
  916. * down, so in that case we also deactivate it.
  917. */
  918. struct hardwall_type *hwt = info->type;
  919. spin_lock_irqsave(&hwt->lock, flags);
  920. list_for_each_entry_safe(task, tmp, &info->task_head,
  921. thread.hardwall[hwt->index].list) {
  922. if (task->files == owner || task->files == NULL)
  923. _hardwall_deactivate(hwt, task);
  924. }
  925. spin_unlock_irqrestore(&hwt->lock, flags);
  926. }
  927. return 0;
  928. }
  929. /* This hardwall is gone, so destroy it. */
  930. static int hardwall_release(struct inode *inode, struct file *file)
  931. {
  932. hardwall_destroy(file->private_data);
  933. return 0;
  934. }
  935. static const struct file_operations dev_hardwall_fops = {
  936. .open = nonseekable_open,
  937. .unlocked_ioctl = hardwall_ioctl,
  938. #ifdef CONFIG_COMPAT
  939. .compat_ioctl = hardwall_compat_ioctl,
  940. #endif
  941. .flush = hardwall_flush,
  942. .release = hardwall_release,
  943. };
  944. static struct cdev hardwall_dev;
  945. static int __init dev_hardwall_init(void)
  946. {
  947. int rc;
  948. dev_t dev;
  949. rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
  950. if (rc < 0)
  951. return rc;
  952. cdev_init(&hardwall_dev, &dev_hardwall_fops);
  953. rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
  954. if (rc < 0)
  955. return rc;
  956. return 0;
  957. }
  958. late_initcall(dev_hardwall_init);