xen_intr.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696
  1. /******************************************************************************
  2. * xen_intr.c
  3. *
  4. * Xen event and interrupt services for x86 HVM guests.
  5. *
  6. * Copyright (c) 2002-2005, K A Fraser
  7. * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
  8. * Copyright (c) 2012, Spectra Logic Corporation
  9. *
  10. * This file may be distributed separately from the Linux kernel, or
  11. * incorporated into other software packages, subject to the following license:
  12. *
  13. * Permission is hereby granted, free of charge, to any person obtaining a copy
  14. * of this source file (the "Software"), to deal in the Software without
  15. * restriction, including without limitation the rights to use, copy, modify,
  16. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17. * and to permit persons to whom the Software is furnished to do so, subject to
  18. * the following conditions:
  19. *
  20. * The above copyright notice and this permission notice shall be included in
  21. * all copies or substantial portions of the Software.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29. * IN THE SOFTWARE.
  30. */
  31. #include <sys/cdefs.h>
  32. __FBSDID("$FreeBSD$");
  33. #include "opt_ddb.h"
  34. #include <sys/param.h>
  35. #include <sys/systm.h>
  36. #include <sys/bus.h>
  37. #include <sys/malloc.h>
  38. #include <sys/kernel.h>
  39. #include <sys/limits.h>
  40. #include <sys/lock.h>
  41. #include <sys/mutex.h>
  42. #include <sys/interrupt.h>
  43. #include <sys/pcpu.h>
  44. #include <sys/smp.h>
  45. #include <sys/refcount.h>
  46. #include <vm/vm.h>
  47. #include <vm/pmap.h>
  48. #include <machine/intr_machdep.h>
  49. #include <x86/apicvar.h>
  50. #include <x86/apicreg.h>
  51. #include <machine/smp.h>
  52. #include <machine/stdarg.h>
  53. #include <machine/xen/synch_bitops.h>
  54. #include <machine/xen/xen-os.h>
  55. #include <xen/xen-os.h>
  56. #include <xen/hvm.h>
  57. #include <xen/hypervisor.h>
  58. #include <xen/xen_intr.h>
  59. #include <xen/evtchn/evtchnvar.h>
  60. #include <dev/xen/xenpci/xenpcivar.h>
  61. #include <dev/pci/pcivar.h>
  62. #ifdef DDB
  63. #include <ddb/ddb.h>
  64. #endif
  65. static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
  66. static u_int first_evtchn_irq;
  67. /**
  68. * Per-cpu event channel processing state.
  69. */
  70. struct xen_intr_pcpu_data {
  71. /**
  72. * The last event channel bitmap section (level one bit) processed.
  73. * This is used to ensure we scan all ports before
  74. * servicing an already servied port again.
  75. */
  76. u_int last_processed_l1i;
  77. /**
  78. * The last event channel processed within the event channel
  79. * bitmap being scanned.
  80. */
  81. u_int last_processed_l2i;
  82. /** Pointer to this CPU's interrupt statistic counter. */
  83. u_long *evtchn_intrcnt;
  84. /**
  85. * A bitmap of ports that can be serviced from this CPU.
  86. * A set bit means interrupt handling is enabled.
  87. */
  88. u_long evtchn_enabled[sizeof(u_long) * 8];
  89. };
  90. /*
  91. * Start the scan at port 0 by initializing the last scanned
  92. * location as the highest numbered event channel port.
  93. */
  94. DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
  95. .last_processed_l1i = LONG_BIT - 1,
  96. .last_processed_l2i = LONG_BIT - 1
  97. };
  98. DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
  99. #define XEN_EEXIST 17 /* Xen "already exists" error */
  100. #define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */
  101. #define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
  102. #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
  103. struct xenisrc {
  104. struct intsrc xi_intsrc;
  105. enum evtchn_type xi_type;
  106. int xi_cpu; /* VCPU for delivery. */
  107. int xi_vector; /* Global isrc vector number. */
  108. evtchn_port_t xi_port;
  109. int xi_pirq;
  110. int xi_virq;
  111. void *xi_cookie;
  112. u_int xi_close:1; /* close on unbind? */
  113. u_int xi_activehi:1;
  114. u_int xi_edgetrigger:1;
  115. u_int xi_masked:1;
  116. volatile u_int xi_refcount;
  117. };
  118. static void xen_intr_suspend(struct pic *);
  119. static void xen_intr_resume(struct pic *, bool suspend_cancelled);
  120. static void xen_intr_enable_source(struct intsrc *isrc);
  121. static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
  122. static void xen_intr_eoi_source(struct intsrc *isrc);
  123. static void xen_intr_enable_intr(struct intsrc *isrc);
  124. static void xen_intr_disable_intr(struct intsrc *isrc);
  125. static int xen_intr_vector(struct intsrc *isrc);
  126. static int xen_intr_source_pending(struct intsrc *isrc);
  127. static int xen_intr_config_intr(struct intsrc *isrc,
  128. enum intr_trigger trig, enum intr_polarity pol);
  129. static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
  130. static void xen_intr_pirq_enable_source(struct intsrc *isrc);
  131. static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
  132. static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
  133. static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
  134. static void xen_intr_pirq_disable_intr(struct intsrc *isrc);
  135. static int xen_intr_pirq_config_intr(struct intsrc *isrc,
  136. enum intr_trigger trig, enum intr_polarity pol);
  137. /**
  138. * PIC interface for all event channel port types except physical IRQs.
  139. */
  140. struct pic xen_intr_pic = {
  141. .pic_enable_source = xen_intr_enable_source,
  142. .pic_disable_source = xen_intr_disable_source,
  143. .pic_eoi_source = xen_intr_eoi_source,
  144. .pic_enable_intr = xen_intr_enable_intr,
  145. .pic_disable_intr = xen_intr_disable_intr,
  146. .pic_vector = xen_intr_vector,
  147. .pic_source_pending = xen_intr_source_pending,
  148. .pic_suspend = xen_intr_suspend,
  149. .pic_resume = xen_intr_resume,
  150. .pic_config_intr = xen_intr_config_intr,
  151. .pic_assign_cpu = xen_intr_assign_cpu
  152. };
  153. /**
  154. * PIC interface for all event channel representing
  155. * physical interrupt sources.
  156. */
  157. struct pic xen_intr_pirq_pic = {
  158. #ifdef __amd64__
  159. .pic_register_sources = xenpv_register_pirqs,
  160. #endif
  161. .pic_enable_source = xen_intr_pirq_enable_source,
  162. .pic_disable_source = xen_intr_pirq_disable_source,
  163. .pic_eoi_source = xen_intr_pirq_eoi_source,
  164. .pic_enable_intr = xen_intr_pirq_enable_intr,
  165. .pic_disable_intr = xen_intr_pirq_disable_intr,
  166. .pic_vector = xen_intr_vector,
  167. .pic_source_pending = xen_intr_source_pending,
  168. .pic_config_intr = xen_intr_pirq_config_intr,
  169. .pic_assign_cpu = xen_intr_assign_cpu
  170. };
  171. static struct mtx xen_intr_isrc_lock;
  172. static u_int xen_intr_auto_vector_count;
  173. static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
  174. static u_long *xen_intr_pirq_eoi_map;
  175. static boolean_t xen_intr_pirq_eoi_map_enabled;
  176. /*------------------------- Private Functions --------------------------------*/
  177. /**
  178. * Disable signal delivery for an event channel port on the
  179. * specified CPU.
  180. *
  181. * \param port The event channel port to mask.
  182. *
  183. * This API is used to manage the port<=>CPU binding of event
  184. * channel handlers.
  185. *
  186. * \note This operation does not preclude reception of an event
  187. * for this event channel on another CPU. To mask the
  188. * event channel globally, use evtchn_mask().
  189. */
  190. static inline void
  191. evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
  192. {
  193. struct xen_intr_pcpu_data *pcpu;
  194. pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
  195. xen_clear_bit(port, pcpu->evtchn_enabled);
  196. }
  197. /**
  198. * Enable signal delivery for an event channel port on the
  199. * specified CPU.
  200. *
  201. * \param port The event channel port to unmask.
  202. *
  203. * This API is used to manage the port<=>CPU binding of event
  204. * channel handlers.
  205. *
  206. * \note This operation does not guarantee that event delivery
  207. * is enabled for this event channel port. The port must
  208. * also be globally enabled. See evtchn_unmask().
  209. */
  210. static inline void
  211. evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
  212. {
  213. struct xen_intr_pcpu_data *pcpu;
  214. pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
  215. xen_set_bit(port, pcpu->evtchn_enabled);
  216. }
  217. /**
  218. * Allocate and register a per-cpu Xen upcall interrupt counter.
  219. *
  220. * \param cpu The cpu for which to register this interrupt count.
  221. */
  222. static void
  223. xen_intr_intrcnt_add(u_int cpu)
  224. {
  225. char buf[MAXCOMLEN + 1];
  226. struct xen_intr_pcpu_data *pcpu;
  227. pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
  228. if (pcpu->evtchn_intrcnt != NULL)
  229. return;
  230. snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
  231. intrcnt_add(buf, &pcpu->evtchn_intrcnt);
  232. }
  233. /**
  234. * Search for an already allocated but currently unused Xen interrupt
  235. * source object.
  236. *
  237. * \param type Restrict the search to interrupt sources of the given
  238. * type.
  239. *
  240. * \return A pointer to a free Xen interrupt source object or NULL.
  241. */
  242. static struct xenisrc *
  243. xen_intr_find_unused_isrc(enum evtchn_type type)
  244. {
  245. int isrc_idx;
  246. KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
  247. for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
  248. struct xenisrc *isrc;
  249. u_int vector;
  250. vector = first_evtchn_irq + isrc_idx;
  251. isrc = (struct xenisrc *)intr_lookup_source(vector);
  252. if (isrc != NULL
  253. && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
  254. KASSERT(isrc->xi_intsrc.is_handlers == 0,
  255. ("Free evtchn still has handlers"));
  256. isrc->xi_type = type;
  257. return (isrc);
  258. }
  259. }
  260. return (NULL);
  261. }
  262. /**
  263. * Allocate a Xen interrupt source object.
  264. *
  265. * \param type The type of interrupt source to create.
  266. *
  267. * \return A pointer to a newly allocated Xen interrupt source
  268. * object or NULL.
  269. */
  270. static struct xenisrc *
  271. xen_intr_alloc_isrc(enum evtchn_type type, int vector)
  272. {
  273. static int warned;
  274. struct xenisrc *isrc;
  275. KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
  276. if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
  277. if (!warned) {
  278. warned = 1;
  279. printf("xen_intr_alloc: Event channels exhausted.\n");
  280. }
  281. return (NULL);
  282. }
  283. if (type != EVTCHN_TYPE_PIRQ) {
  284. vector = first_evtchn_irq + xen_intr_auto_vector_count;
  285. xen_intr_auto_vector_count++;
  286. }
  287. KASSERT((intr_lookup_source(vector) == NULL),
  288. ("Trying to use an already allocated vector"));
  289. mtx_unlock(&xen_intr_isrc_lock);
  290. isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
  291. isrc->xi_intsrc.is_pic =
  292. (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic;
  293. isrc->xi_vector = vector;
  294. isrc->xi_type = type;
  295. intr_register_source(&isrc->xi_intsrc);
  296. mtx_lock(&xen_intr_isrc_lock);
  297. return (isrc);
  298. }
  299. /**
  300. * Attempt to free an active Xen interrupt source object.
  301. *
  302. * \param isrc The interrupt source object to release.
  303. *
  304. * \returns EBUSY if the source is still in use, otherwise 0.
  305. */
  306. static int
  307. xen_intr_release_isrc(struct xenisrc *isrc)
  308. {
  309. mtx_lock(&xen_intr_isrc_lock);
  310. KASSERT(isrc->xi_intsrc.is_handlers == 0,
  311. ("Release called, but xenisrc still in use"));
  312. evtchn_mask_port(isrc->xi_port);
  313. evtchn_clear_port(isrc->xi_port);
  314. /* Rebind port to CPU 0. */
  315. evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
  316. evtchn_cpu_unmask_port(0, isrc->xi_port);
  317. if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
  318. struct evtchn_close close = { .port = isrc->xi_port };
  319. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
  320. panic("EVTCHNOP_close failed");
  321. }
  322. xen_intr_port_to_isrc[isrc->xi_port] = NULL;
  323. isrc->xi_cpu = 0;
  324. isrc->xi_type = EVTCHN_TYPE_UNBOUND;
  325. isrc->xi_port = 0;
  326. isrc->xi_cookie = NULL;
  327. mtx_unlock(&xen_intr_isrc_lock);
  328. return (0);
  329. }
  330. /**
  331. * Associate an interrupt handler with an already allocated local Xen
  332. * event channel port.
  333. *
  334. * \param isrcp The returned Xen interrupt object associated with
  335. * the specified local port.
  336. * \param local_port The event channel to bind.
  337. * \param type The event channel type of local_port.
  338. * \param intr_owner The device making this bind request.
  339. * \param filter An interrupt filter handler. Specify NULL
  340. * to always dispatch to the ithread handler.
  341. * \param handler An interrupt ithread handler. Optional (can
  342. * specify NULL) if all necessary event actions
  343. * are performed by filter.
  344. * \param arg Argument to present to both filter and handler.
  345. * \param irqflags Interrupt handler flags. See sys/bus.h.
  346. * \param handlep Pointer to an opaque handle used to manage this
  347. * registration.
  348. *
  349. * \returns 0 on success, otherwise an errno.
  350. */
  351. static int
  352. xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
  353. enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
  354. driver_intr_t handler, void *arg, enum intr_type flags,
  355. xen_intr_handle_t *port_handlep)
  356. {
  357. struct xenisrc *isrc;
  358. int error;
  359. *isrcp = NULL;
  360. if (port_handlep == NULL) {
  361. printf("%s: xen_intr_bind_isrc: Bad event handle\n",
  362. intr_owner);
  363. return (EINVAL);
  364. }
  365. mtx_lock(&xen_intr_isrc_lock);
  366. isrc = xen_intr_find_unused_isrc(type);
  367. if (isrc == NULL) {
  368. isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR);
  369. if (isrc == NULL) {
  370. mtx_unlock(&xen_intr_isrc_lock);
  371. return (ENOSPC);
  372. }
  373. }
  374. isrc->xi_port = local_port;
  375. xen_intr_port_to_isrc[local_port] = isrc;
  376. refcount_init(&isrc->xi_refcount, 1);
  377. mtx_unlock(&xen_intr_isrc_lock);
  378. /* Assign the opaque handler (the event channel port) */
  379. *port_handlep = &isrc->xi_vector;
  380. #ifdef SMP
  381. if (type == EVTCHN_TYPE_PORT) {
  382. /*
  383. * By default all interrupts are assigned to vCPU#0
  384. * unless specified otherwise, so shuffle them to balance
  385. * the interrupt load.
  386. */
  387. xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu(0));
  388. }
  389. #endif
  390. if (filter == NULL && handler == NULL) {
  391. /*
  392. * No filter/handler provided, leave the event channel
  393. * masked and without a valid handler, the caller is
  394. * in charge of setting that up.
  395. */
  396. *isrcp = isrc;
  397. return (0);
  398. }
  399. error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
  400. *port_handlep);
  401. if (error != 0) {
  402. xen_intr_release_isrc(isrc);
  403. return (error);
  404. }
  405. *isrcp = isrc;
  406. return (0);
  407. }
  408. /**
  409. * Lookup a Xen interrupt source object given an interrupt binding handle.
  410. *
  411. * \param handle A handle initialized by a previous call to
  412. * xen_intr_bind_isrc().
  413. *
  414. * \returns A pointer to the Xen interrupt source object associated
  415. * with the given interrupt handle. NULL if no association
  416. * currently exists.
  417. */
  418. static struct xenisrc *
  419. xen_intr_isrc(xen_intr_handle_t handle)
  420. {
  421. int vector;
  422. if (handle == NULL)
  423. return (NULL);
  424. vector = *(int *)handle;
  425. KASSERT(vector >= first_evtchn_irq &&
  426. vector < (first_evtchn_irq + xen_intr_auto_vector_count),
  427. ("Xen interrupt vector is out of range"));
  428. return ((struct xenisrc *)intr_lookup_source(vector));
  429. }
  430. /**
  431. * Determine the event channel ports at the given section of the
  432. * event port bitmap which have pending events for the given cpu.
  433. *
  434. * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
  435. * \param sh The Xen shared info area.
  436. * \param idx The index of the section of the event channel bitmap to
  437. * inspect.
  438. *
  439. * \returns A u_long with bits set for every event channel with pending
  440. * events.
  441. */
  442. static inline u_long
  443. xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
  444. u_int idx)
  445. {
  446. CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
  447. CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
  448. CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
  449. CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
  450. return (sh->evtchn_pending[idx]
  451. & ~sh->evtchn_mask[idx]
  452. & pcpu->evtchn_enabled[idx]);
  453. }
  454. /**
  455. * Interrupt handler for processing all Xen event channel events.
  456. *
  457. * \param trap_frame The trap frame context for the current interrupt.
  458. */
  459. void
  460. xen_intr_handle_upcall(struct trapframe *trap_frame)
  461. {
  462. u_int l1i, l2i, port, cpu;
  463. u_long masked_l1, masked_l2;
  464. struct xenisrc *isrc;
  465. shared_info_t *s;
  466. vcpu_info_t *v;
  467. struct xen_intr_pcpu_data *pc;
  468. u_long l1, l2;
  469. /*
  470. * Disable preemption in order to always check and fire events
  471. * on the right vCPU
  472. */
  473. critical_enter();
  474. cpu = PCPU_GET(cpuid);
  475. pc = DPCPU_PTR(xen_intr_pcpu);
  476. s = HYPERVISOR_shared_info;
  477. v = DPCPU_GET(vcpu_info);
  478. if (xen_hvm_domain() && !xen_vector_callback_enabled) {
  479. KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
  480. }
  481. v->evtchn_upcall_pending = 0;
  482. #if 0
  483. #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
  484. /* Clear master flag /before/ clearing selector flag. */
  485. wmb();
  486. #endif
  487. #endif
  488. l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
  489. l1i = pc->last_processed_l1i;
  490. l2i = pc->last_processed_l2i;
  491. (*pc->evtchn_intrcnt)++;
  492. while (l1 != 0) {
  493. l1i = (l1i + 1) % LONG_BIT;
  494. masked_l1 = l1 & ((~0UL) << l1i);
  495. if (masked_l1 == 0) {
  496. /*
  497. * if we masked out all events, wrap around
  498. * to the beginning.
  499. */
  500. l1i = LONG_BIT - 1;
  501. l2i = LONG_BIT - 1;
  502. continue;
  503. }
  504. l1i = ffsl(masked_l1) - 1;
  505. do {
  506. l2 = xen_intr_active_ports(pc, s, l1i);
  507. l2i = (l2i + 1) % LONG_BIT;
  508. masked_l2 = l2 & ((~0UL) << l2i);
  509. if (masked_l2 == 0) {
  510. /* if we masked out all events, move on */
  511. l2i = LONG_BIT - 1;
  512. break;
  513. }
  514. l2i = ffsl(masked_l2) - 1;
  515. /* process port */
  516. port = (l1i * LONG_BIT) + l2i;
  517. synch_clear_bit(port, &s->evtchn_pending[0]);
  518. isrc = xen_intr_port_to_isrc[port];
  519. if (__predict_false(isrc == NULL))
  520. continue;
  521. /* Make sure we are firing on the right vCPU */
  522. KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
  523. ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
  524. PCPU_GET(cpuid), isrc->xi_cpu));
  525. intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
  526. /*
  527. * If this is the final port processed,
  528. * we'll pick up here+1 next time.
  529. */
  530. pc->last_processed_l1i = l1i;
  531. pc->last_processed_l2i = l2i;
  532. } while (l2i != LONG_BIT - 1);
  533. l2 = xen_intr_active_ports(pc, s, l1i);
  534. if (l2 == 0) {
  535. /*
  536. * We handled all ports, so we can clear the
  537. * selector bit.
  538. */
  539. l1 &= ~(1UL << l1i);
  540. }
  541. }
  542. if (xen_evtchn_needs_ack)
  543. lapic_eoi();
  544. critical_exit();
  545. }
  546. static int
  547. xen_intr_init(void *dummy __unused)
  548. {
  549. shared_info_t *s = HYPERVISOR_shared_info;
  550. struct xen_intr_pcpu_data *pcpu;
  551. struct physdev_pirq_eoi_gmfn eoi_gmfn;
  552. int i, rc;
  553. if (!xen_domain())
  554. return (0);
  555. mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
  556. /*
  557. * Set the per-cpu mask of CPU#0 to enable all, since by default all
  558. * event channels are bound to CPU#0.
  559. */
  560. CPU_FOREACH(i) {
  561. pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
  562. memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
  563. sizeof(pcpu->evtchn_enabled));
  564. }
  565. for (i = 0; i < nitems(s->evtchn_mask); i++)
  566. atomic_store_rel_long(&s->evtchn_mask[i], ~0);
  567. /* Try to register PIRQ EOI map */
  568. xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO);
  569. eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map));
  570. rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
  571. if (rc != 0 && bootverbose)
  572. printf("Xen interrupts: unable to register PIRQ EOI map\n");
  573. else
  574. xen_intr_pirq_eoi_map_enabled = true;
  575. intr_register_pic(&xen_intr_pic);
  576. if (xen_pv_domain() && xen_initial_domain())
  577. intr_register_pic(&xen_intr_pirq_pic);
  578. if (bootverbose)
  579. printf("Xen interrupt system initialized\n");
  580. return (0);
  581. }
  582. SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
  583. static void
  584. xen_intrcnt_init(void *dummy __unused)
  585. {
  586. unsigned int i;
  587. if (!xen_domain())
  588. return;
  589. /*
  590. * Register interrupt count manually as we aren't guaranteed to see a
  591. * call to xen_intr_assign_cpu() before our first interrupt.
  592. */
  593. CPU_FOREACH(i)
  594. xen_intr_intrcnt_add(i);
  595. }
  596. SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL);
  597. void
  598. xen_intr_alloc_irqs(void)
  599. {
  600. if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS)
  601. panic("IRQ allocation overflow (num_msi_irqs too high?)");
  602. first_evtchn_irq = num_io_irqs;
  603. num_io_irqs += NR_EVENT_CHANNELS;
  604. }
  605. /*--------------------------- Common PIC Functions ---------------------------*/
  606. /**
  607. * Prepare this PIC for system suspension.
  608. */
  609. static void
  610. xen_intr_suspend(struct pic *unused)
  611. {
  612. }
  613. static void
  614. xen_rebind_ipi(struct xenisrc *isrc)
  615. {
  616. #ifdef SMP
  617. int cpu = isrc->xi_cpu;
  618. int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
  619. int error;
  620. struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
  621. error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  622. &bind_ipi);
  623. if (error != 0)
  624. panic("unable to rebind xen IPI: %d", error);
  625. isrc->xi_port = bind_ipi.port;
  626. isrc->xi_cpu = 0;
  627. xen_intr_port_to_isrc[bind_ipi.port] = isrc;
  628. error = xen_intr_assign_cpu(&isrc->xi_intsrc,
  629. cpu_apic_ids[cpu]);
  630. if (error)
  631. panic("unable to bind xen IPI to CPU#%d: %d",
  632. cpu, error);
  633. evtchn_unmask_port(bind_ipi.port);
  634. #else
  635. panic("Resume IPI event channel on UP");
  636. #endif
  637. }
  638. static void
  639. xen_rebind_virq(struct xenisrc *isrc)
  640. {
  641. int cpu = isrc->xi_cpu;
  642. int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
  643. int error;
  644. struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
  645. .vcpu = vcpu_id };
  646. error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  647. &bind_virq);
  648. if (error != 0)
  649. panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
  650. isrc->xi_port = bind_virq.port;
  651. isrc->xi_cpu = 0;
  652. xen_intr_port_to_isrc[bind_virq.port] = isrc;
  653. #ifdef SMP
  654. error = xen_intr_assign_cpu(&isrc->xi_intsrc,
  655. cpu_apic_ids[cpu]);
  656. if (error)
  657. panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
  658. isrc->xi_virq, cpu, error);
  659. #endif
  660. evtchn_unmask_port(bind_virq.port);
  661. }
  662. /**
  663. * Return this PIC to service after being suspended.
  664. */
  665. static void
  666. xen_intr_resume(struct pic *unused, bool suspend_cancelled)
  667. {
  668. shared_info_t *s = HYPERVISOR_shared_info;
  669. struct xenisrc *isrc;
  670. u_int isrc_idx;
  671. int i;
  672. if (suspend_cancelled)
  673. return;
  674. /* Reset the per-CPU masks */
  675. CPU_FOREACH(i) {
  676. struct xen_intr_pcpu_data *pcpu;
  677. pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
  678. memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
  679. sizeof(pcpu->evtchn_enabled));
  680. }
  681. /* Mask all event channels. */
  682. for (i = 0; i < nitems(s->evtchn_mask); i++)
  683. atomic_store_rel_long(&s->evtchn_mask[i], ~0);
  684. /* Remove port -> isrc mappings */
  685. memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
  686. /* Free unused isrcs and rebind VIRQs and IPIs */
  687. for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
  688. u_int vector;
  689. vector = first_evtchn_irq + isrc_idx;
  690. isrc = (struct xenisrc *)intr_lookup_source(vector);
  691. if (isrc != NULL) {
  692. isrc->xi_port = 0;
  693. switch (isrc->xi_type) {
  694. case EVTCHN_TYPE_IPI:
  695. xen_rebind_ipi(isrc);
  696. break;
  697. case EVTCHN_TYPE_VIRQ:
  698. xen_rebind_virq(isrc);
  699. break;
  700. default:
  701. break;
  702. }
  703. }
  704. }
  705. }
  706. /**
  707. * Disable a Xen interrupt source.
  708. *
  709. * \param isrc The interrupt source to disable.
  710. */
  711. static void
  712. xen_intr_disable_intr(struct intsrc *base_isrc)
  713. {
  714. struct xenisrc *isrc = (struct xenisrc *)base_isrc;
  715. evtchn_mask_port(isrc->xi_port);
  716. }
  717. /**
  718. * Determine the global interrupt vector number for
  719. * a Xen interrupt source.
  720. *
  721. * \param isrc The interrupt source to query.
  722. *
  723. * \return The vector number corresponding to the given interrupt source.
  724. */
  725. static int
  726. xen_intr_vector(struct intsrc *base_isrc)
  727. {
  728. struct xenisrc *isrc = (struct xenisrc *)base_isrc;
  729. return (isrc->xi_vector);
  730. }
  731. /**
  732. * Determine whether or not interrupt events are pending on the
  733. * the given interrupt source.
  734. *
  735. * \param isrc The interrupt source to query.
  736. *
  737. * \returns 0 if no events are pending, otherwise non-zero.
  738. */
  739. static int
  740. xen_intr_source_pending(struct intsrc *isrc)
  741. {
  742. /*
  743. * EventChannels are edge triggered and never masked.
  744. * There can be no pending events.
  745. */
  746. return (0);
  747. }
  748. /**
  749. * Perform configuration of an interrupt source.
  750. *
  751. * \param isrc The interrupt source to configure.
  752. * \param trig Edge or level.
  753. * \param pol Active high or low.
  754. *
  755. * \returns 0 if no events are pending, otherwise non-zero.
  756. */
  757. static int
  758. xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
  759. enum intr_polarity pol)
  760. {
  761. /* Configuration is only possible via the evtchn apis. */
  762. return (ENODEV);
  763. }
  764. /**
  765. * Configure CPU affinity for interrupt source event delivery.
  766. *
  767. * \param isrc The interrupt source to configure.
  768. * \param apic_id The apic id of the CPU for handling future events.
  769. *
  770. * \returns 0 if successful, otherwise an errno.
  771. */
  772. static int
  773. xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
  774. {
  775. #ifdef SMP
  776. struct evtchn_bind_vcpu bind_vcpu;
  777. struct xenisrc *isrc;
  778. u_int to_cpu, vcpu_id;
  779. int error, masked;
  780. if (xen_vector_callback_enabled == 0)
  781. return (EOPNOTSUPP);
  782. to_cpu = apic_cpuid(apic_id);
  783. vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
  784. mtx_lock(&xen_intr_isrc_lock);
  785. isrc = (struct xenisrc *)base_isrc;
  786. if (!is_valid_evtchn(isrc->xi_port)) {
  787. mtx_unlock(&xen_intr_isrc_lock);
  788. return (EINVAL);
  789. }
  790. /*
  791. * Mask the event channel while binding it to prevent interrupt
  792. * delivery with an inconsistent state in isrc->xi_cpu.
  793. */
  794. masked = evtchn_test_and_set_mask(isrc->xi_port);
  795. if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
  796. (isrc->xi_type == EVTCHN_TYPE_IPI)) {
  797. /*
  798. * Virtual IRQs are associated with a cpu by
  799. * the Hypervisor at evtchn_bind_virq time, so
  800. * all we need to do is update the per-CPU masks.
  801. */
  802. evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
  803. isrc->xi_cpu = to_cpu;
  804. evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
  805. goto out;
  806. }
  807. bind_vcpu.port = isrc->xi_port;
  808. bind_vcpu.vcpu = vcpu_id;
  809. error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
  810. if (isrc->xi_cpu != to_cpu) {
  811. if (error == 0) {
  812. /* Commit to new binding by removing the old one. */
  813. evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
  814. isrc->xi_cpu = to_cpu;
  815. evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
  816. }
  817. }
  818. out:
  819. if (masked == 0)
  820. evtchn_unmask_port(isrc->xi_port);
  821. mtx_unlock(&xen_intr_isrc_lock);
  822. return (0);
  823. #else
  824. return (EOPNOTSUPP);
  825. #endif
  826. }
  827. /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
  828. /*
  829. * Mask a level triggered interrupt source.
  830. *
  831. * \param isrc The interrupt source to mask (if necessary).
  832. * \param eoi If non-zero, perform any necessary end-of-interrupt
  833. * acknowledgements.
  834. */
  835. static void
  836. xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
  837. {
  838. struct xenisrc *isrc;
  839. isrc = (struct xenisrc *)base_isrc;
  840. /*
  841. * NB: checking if the event channel is already masked is
  842. * needed because the event channel user-space device
  843. * masks event channels on its filter as part of its
  844. * normal operation, and those shouldn't be automatically
  845. * unmasked by the generic interrupt code. The event channel
  846. * device will unmask them when needed.
  847. */
  848. isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
  849. }
  850. /*
  851. * Unmask a level triggered interrupt source.
  852. *
  853. * \param isrc The interrupt source to unmask (if necessary).
  854. */
  855. static void
  856. xen_intr_enable_source(struct intsrc *base_isrc)
  857. {
  858. struct xenisrc *isrc;
  859. isrc = (struct xenisrc *)base_isrc;
  860. if (isrc->xi_masked == 0)
  861. evtchn_unmask_port(isrc->xi_port);
  862. }
  863. /*
  864. * Perform any necessary end-of-interrupt acknowledgements.
  865. *
  866. * \param isrc The interrupt source to EOI.
  867. */
  868. static void
  869. xen_intr_eoi_source(struct intsrc *base_isrc)
  870. {
  871. }
  872. /*
  873. * Enable and unmask the interrupt source.
  874. *
  875. * \param isrc The interrupt source to enable.
  876. */
  877. static void
  878. xen_intr_enable_intr(struct intsrc *base_isrc)
  879. {
  880. struct xenisrc *isrc = (struct xenisrc *)base_isrc;
  881. evtchn_unmask_port(isrc->xi_port);
  882. }
  883. /*------------------ Physical Interrupt Source PIC Functions -----------------*/
  884. /*
  885. * Mask a level triggered interrupt source.
  886. *
  887. * \param isrc The interrupt source to mask (if necessary).
  888. * \param eoi If non-zero, perform any necessary end-of-interrupt
  889. * acknowledgements.
  890. */
  891. static void
  892. xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
  893. {
  894. struct xenisrc *isrc;
  895. isrc = (struct xenisrc *)base_isrc;
  896. if (isrc->xi_edgetrigger == 0)
  897. evtchn_mask_port(isrc->xi_port);
  898. if (eoi == PIC_EOI)
  899. xen_intr_pirq_eoi_source(base_isrc);
  900. }
  901. /*
  902. * Unmask a level triggered interrupt source.
  903. *
  904. * \param isrc The interrupt source to unmask (if necessary).
  905. */
  906. static void
  907. xen_intr_pirq_enable_source(struct intsrc *base_isrc)
  908. {
  909. struct xenisrc *isrc;
  910. isrc = (struct xenisrc *)base_isrc;
  911. if (isrc->xi_edgetrigger == 0)
  912. evtchn_unmask_port(isrc->xi_port);
  913. }
  914. /*
  915. * Perform any necessary end-of-interrupt acknowledgements.
  916. *
  917. * \param isrc The interrupt source to EOI.
  918. */
  919. static void
  920. xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
  921. {
  922. struct xenisrc *isrc;
  923. int error;
  924. isrc = (struct xenisrc *)base_isrc;
  925. if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
  926. struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
  927. error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  928. if (error != 0)
  929. panic("Unable to EOI PIRQ#%d: %d\n",
  930. isrc->xi_pirq, error);
  931. }
  932. }
  933. /*
  934. * Enable and unmask the interrupt source.
  935. *
  936. * \param isrc The interrupt source to enable.
  937. */
  938. static void
  939. xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
  940. {
  941. struct xenisrc *isrc;
  942. struct evtchn_bind_pirq bind_pirq;
  943. struct physdev_irq_status_query irq_status;
  944. int error;
  945. isrc = (struct xenisrc *)base_isrc;
  946. if (!xen_intr_pirq_eoi_map_enabled) {
  947. irq_status.irq = isrc->xi_pirq;
  948. error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query,
  949. &irq_status);
  950. if (error)
  951. panic("unable to get status of IRQ#%d", isrc->xi_pirq);
  952. if (irq_status.flags & XENIRQSTAT_needs_eoi) {
  953. /*
  954. * Since the dynamic PIRQ EOI map is not available
  955. * mark the PIRQ as needing EOI unconditionally.
  956. */
  957. xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
  958. }
  959. }
  960. bind_pirq.pirq = isrc->xi_pirq;
  961. bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE;
  962. error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  963. if (error)
  964. panic("unable to bind IRQ#%d", isrc->xi_pirq);
  965. isrc->xi_port = bind_pirq.port;
  966. mtx_lock(&xen_intr_isrc_lock);
  967. KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL),
  968. ("trying to override an already setup event channel port"));
  969. xen_intr_port_to_isrc[bind_pirq.port] = isrc;
  970. mtx_unlock(&xen_intr_isrc_lock);
  971. evtchn_unmask_port(isrc->xi_port);
  972. }
  973. /*
  974. * Disable an interrupt source.
  975. *
  976. * \param isrc The interrupt source to disable.
  977. */
  978. static void
  979. xen_intr_pirq_disable_intr(struct intsrc *base_isrc)
  980. {
  981. struct xenisrc *isrc;
  982. struct evtchn_close close;
  983. int error;
  984. isrc = (struct xenisrc *)base_isrc;
  985. evtchn_mask_port(isrc->xi_port);
  986. close.port = isrc->xi_port;
  987. error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
  988. if (error)
  989. panic("unable to close event channel %d IRQ#%d",
  990. isrc->xi_port, isrc->xi_pirq);
  991. mtx_lock(&xen_intr_isrc_lock);
  992. xen_intr_port_to_isrc[isrc->xi_port] = NULL;
  993. mtx_unlock(&xen_intr_isrc_lock);
  994. isrc->xi_port = 0;
  995. }
  996. /**
  997. * Perform configuration of an interrupt source.
  998. *
  999. * \param isrc The interrupt source to configure.
  1000. * \param trig Edge or level.
  1001. * \param pol Active high or low.
  1002. *
  1003. * \returns 0 if no events are pending, otherwise non-zero.
  1004. */
  1005. static int
  1006. xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig,
  1007. enum intr_polarity pol)
  1008. {
  1009. struct xenisrc *isrc = (struct xenisrc *)base_isrc;
  1010. struct physdev_setup_gsi setup_gsi;
  1011. int error;
  1012. KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM),
  1013. ("%s: Conforming trigger or polarity\n", __func__));
  1014. setup_gsi.gsi = isrc->xi_pirq;
  1015. setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1;
  1016. setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1;
  1017. error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
  1018. if (error == -XEN_EEXIST) {
  1019. if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) ||
  1020. (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH)))
  1021. panic("unable to reconfigure interrupt IRQ#%d",
  1022. isrc->xi_pirq);
  1023. error = 0;
  1024. }
  1025. if (error)
  1026. panic("unable to configure IRQ#%d\n", isrc->xi_pirq);
  1027. isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
  1028. isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
  1029. return (0);
  1030. }
  1031. /*--------------------------- Public Functions -------------------------------*/
  1032. /*------- API comments for these methods can be found in xen/xenintr.h -------*/
  1033. int
  1034. xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
  1035. driver_filter_t filter, driver_intr_t handler, void *arg,
  1036. enum intr_type flags, xen_intr_handle_t *port_handlep)
  1037. {
  1038. struct xenisrc *isrc;
  1039. int error;
  1040. error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
  1041. device_get_nameunit(dev), filter, handler, arg, flags,
  1042. port_handlep);
  1043. if (error != 0)
  1044. return (error);
  1045. /*
  1046. * The Event Channel API didn't open this port, so it is not
  1047. * responsible for closing it automatically on unbind.
  1048. */
  1049. isrc->xi_close = 0;
  1050. return (0);
  1051. }
  1052. int
  1053. xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
  1054. driver_filter_t filter, driver_intr_t handler, void *arg,
  1055. enum intr_type flags, xen_intr_handle_t *port_handlep)
  1056. {
  1057. struct xenisrc *isrc;
  1058. struct evtchn_alloc_unbound alloc_unbound;
  1059. int error;
  1060. alloc_unbound.dom = DOMID_SELF;
  1061. alloc_unbound.remote_dom = remote_domain;
  1062. error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
  1063. &alloc_unbound);
  1064. if (error != 0) {
  1065. /*
  1066. * XXX Trap Hypercall error code Linuxisms in
  1067. * the HYPERCALL layer.
  1068. */
  1069. return (-error);
  1070. }
  1071. error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
  1072. device_get_nameunit(dev), filter, handler, arg, flags,
  1073. port_handlep);
  1074. if (error != 0) {
  1075. evtchn_close_t close = { .port = alloc_unbound.port };
  1076. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
  1077. panic("EVTCHNOP_close failed");
  1078. return (error);
  1079. }
  1080. isrc->xi_close = 1;
  1081. return (0);
  1082. }
  1083. int
  1084. xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
  1085. u_int remote_port, driver_filter_t filter, driver_intr_t handler,
  1086. void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
  1087. {
  1088. struct xenisrc *isrc;
  1089. struct evtchn_bind_interdomain bind_interdomain;
  1090. int error;
  1091. bind_interdomain.remote_dom = remote_domain;
  1092. bind_interdomain.remote_port = remote_port;
  1093. error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
  1094. &bind_interdomain);
  1095. if (error != 0) {
  1096. /*
  1097. * XXX Trap Hypercall error code Linuxisms in
  1098. * the HYPERCALL layer.
  1099. */
  1100. return (-error);
  1101. }
  1102. error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
  1103. EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
  1104. flags, port_handlep);
  1105. if (error) {
  1106. evtchn_close_t close = { .port = bind_interdomain.local_port };
  1107. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
  1108. panic("EVTCHNOP_close failed");
  1109. return (error);
  1110. }
  1111. /*
  1112. * The Event Channel API opened this port, so it is
  1113. * responsible for closing it automatically on unbind.
  1114. */
  1115. isrc->xi_close = 1;
  1116. return (0);
  1117. }
  1118. int
  1119. xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
  1120. driver_filter_t filter, driver_intr_t handler, void *arg,
  1121. enum intr_type flags, xen_intr_handle_t *port_handlep)
  1122. {
  1123. int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
  1124. struct xenisrc *isrc;
  1125. struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
  1126. int error;
  1127. isrc = NULL;
  1128. error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
  1129. if (error != 0) {
  1130. /*
  1131. * XXX Trap Hypercall error code Linuxisms in
  1132. * the HYPERCALL layer.
  1133. */
  1134. return (-error);
  1135. }
  1136. error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
  1137. device_get_nameunit(dev), filter, handler, arg, flags,
  1138. port_handlep);
  1139. #ifdef SMP
  1140. if (error == 0)
  1141. error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
  1142. #endif
  1143. if (error != 0) {
  1144. evtchn_close_t close = { .port = bind_virq.port };
  1145. xen_intr_unbind(*port_handlep);
  1146. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
  1147. panic("EVTCHNOP_close failed");
  1148. return (error);
  1149. }
  1150. #ifdef SMP
  1151. if (isrc->xi_cpu != cpu) {
  1152. /*
  1153. * Too early in the boot process for the generic interrupt
  1154. * code to perform the binding. Update our event channel
  1155. * masks manually so events can't fire on the wrong cpu
  1156. * during AP startup.
  1157. */
  1158. xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
  1159. }
  1160. #endif
  1161. /*
  1162. * The Event Channel API opened this port, so it is
  1163. * responsible for closing it automatically on unbind.
  1164. */
  1165. isrc->xi_close = 1;
  1166. isrc->xi_virq = virq;
  1167. return (0);
  1168. }
  1169. int
  1170. xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
  1171. enum intr_type flags, xen_intr_handle_t *port_handlep)
  1172. {
  1173. #ifdef SMP
  1174. int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
  1175. struct xenisrc *isrc;
  1176. struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
  1177. /* Same size as the one used by intr_handler->ih_name. */
  1178. char name[MAXCOMLEN + 1];
  1179. int error;
  1180. isrc = NULL;
  1181. error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
  1182. if (error != 0) {
  1183. /*
  1184. * XXX Trap Hypercall error code Linuxisms in
  1185. * the HYPERCALL layer.
  1186. */
  1187. return (-error);
  1188. }
  1189. snprintf(name, sizeof(name), "cpu%u", cpu);
  1190. error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
  1191. name, filter, NULL, NULL, flags, port_handlep);
  1192. if (error != 0) {
  1193. evtchn_close_t close = { .port = bind_ipi.port };
  1194. xen_intr_unbind(*port_handlep);
  1195. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
  1196. panic("EVTCHNOP_close failed");
  1197. return (error);
  1198. }
  1199. if (isrc->xi_cpu != cpu) {
  1200. /*
  1201. * Too early in the boot process for the generic interrupt
  1202. * code to perform the binding. Update our event channel
  1203. * masks manually so events can't fire on the wrong cpu
  1204. * during AP startup.
  1205. */
  1206. xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
  1207. }
  1208. /*
  1209. * The Event Channel API opened this port, so it is
  1210. * responsible for closing it automatically on unbind.
  1211. */
  1212. isrc->xi_close = 1;
  1213. return (0);
  1214. #else
  1215. return (EOPNOTSUPP);
  1216. #endif
  1217. }
  1218. int
  1219. xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol)
  1220. {
  1221. struct physdev_map_pirq map_pirq;
  1222. struct xenisrc *isrc;
  1223. int error;
  1224. if (vector == 0)
  1225. return (EINVAL);
  1226. if (bootverbose)
  1227. printf("xen: register IRQ#%d\n", vector);
  1228. map_pirq.domid = DOMID_SELF;
  1229. map_pirq.type = MAP_PIRQ_TYPE_GSI;
  1230. map_pirq.index = vector;
  1231. map_pirq.pirq = vector;
  1232. error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq);
  1233. if (error) {
  1234. printf("xen: unable to map IRQ#%d\n", vector);
  1235. return (error);
  1236. }
  1237. mtx_lock(&xen_intr_isrc_lock);
  1238. isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector);
  1239. mtx_unlock(&xen_intr_isrc_lock);
  1240. KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt"));
  1241. isrc->xi_pirq = vector;
  1242. isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
  1243. isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
  1244. return (0);
  1245. }
  1246. int
  1247. xen_register_msi(device_t dev, int vector, int count)
  1248. {
  1249. struct physdev_map_pirq msi_irq;
  1250. struct xenisrc *isrc;
  1251. int ret;
  1252. memset(&msi_irq, 0, sizeof(msi_irq));
  1253. msi_irq.domid = DOMID_SELF;
  1254. msi_irq.type = count == 1 ?
  1255. MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI;
  1256. msi_irq.index = -1;
  1257. msi_irq.pirq = -1;
  1258. msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16);
  1259. msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev);
  1260. msi_irq.entry_nr = count;
  1261. ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq);
  1262. if (ret != 0)
  1263. return (ret);
  1264. if (count != msi_irq.entry_nr) {
  1265. panic("unable to setup all requested MSI vectors "
  1266. "(expected %d got %d)", count, msi_irq.entry_nr);
  1267. }
  1268. mtx_lock(&xen_intr_isrc_lock);
  1269. for (int i = 0; i < count; i++) {
  1270. isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i);
  1271. KASSERT(isrc != NULL,
  1272. ("xen: unable to allocate isrc for interrupt"));
  1273. isrc->xi_pirq = msi_irq.pirq + i;
  1274. /* MSI interrupts are always edge triggered */
  1275. isrc->xi_edgetrigger = 1;
  1276. }
  1277. mtx_unlock(&xen_intr_isrc_lock);
  1278. return (0);
  1279. }
  1280. int
  1281. xen_release_msi(int vector)
  1282. {
  1283. struct physdev_unmap_pirq unmap;
  1284. struct xenisrc *isrc;
  1285. int ret;
  1286. isrc = (struct xenisrc *)intr_lookup_source(vector);
  1287. if (isrc == NULL)
  1288. return (ENXIO);
  1289. unmap.pirq = isrc->xi_pirq;
  1290. ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap);
  1291. if (ret != 0)
  1292. return (ret);
  1293. xen_intr_release_isrc(isrc);
  1294. return (0);
  1295. }
  1296. int
  1297. xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
  1298. {
  1299. char descr[MAXCOMLEN + 1];
  1300. struct xenisrc *isrc;
  1301. va_list ap;
  1302. isrc = xen_intr_isrc(port_handle);
  1303. if (isrc == NULL)
  1304. return (EINVAL);
  1305. va_start(ap, fmt);
  1306. vsnprintf(descr, sizeof(descr), fmt, ap);
  1307. va_end(ap);
  1308. return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
  1309. }
  1310. void
  1311. xen_intr_unbind(xen_intr_handle_t *port_handlep)
  1312. {
  1313. struct xenisrc *isrc;
  1314. KASSERT(port_handlep != NULL,
  1315. ("NULL xen_intr_handle_t passed to xen_intr_unbind"));
  1316. isrc = xen_intr_isrc(*port_handlep);
  1317. *port_handlep = NULL;
  1318. if (isrc == NULL)
  1319. return;
  1320. mtx_lock(&xen_intr_isrc_lock);
  1321. if (refcount_release(&isrc->xi_refcount) == 0) {
  1322. mtx_unlock(&xen_intr_isrc_lock);
  1323. return;
  1324. }
  1325. mtx_unlock(&xen_intr_isrc_lock);
  1326. if (isrc->xi_cookie != NULL)
  1327. intr_remove_handler(isrc->xi_cookie);
  1328. xen_intr_release_isrc(isrc);
  1329. }
  1330. void
  1331. xen_intr_signal(xen_intr_handle_t handle)
  1332. {
  1333. struct xenisrc *isrc;
  1334. isrc = xen_intr_isrc(handle);
  1335. if (isrc != NULL) {
  1336. KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
  1337. isrc->xi_type == EVTCHN_TYPE_IPI,
  1338. ("evtchn_signal on something other than a local port"));
  1339. struct evtchn_send send = { .port = isrc->xi_port };
  1340. (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
  1341. }
  1342. }
  1343. evtchn_port_t
  1344. xen_intr_port(xen_intr_handle_t handle)
  1345. {
  1346. struct xenisrc *isrc;
  1347. isrc = xen_intr_isrc(handle);
  1348. if (isrc == NULL)
  1349. return (0);
  1350. return (isrc->xi_port);
  1351. }
  1352. int
  1353. xen_intr_add_handler(const char *name, driver_filter_t filter,
  1354. driver_intr_t handler, void *arg, enum intr_type flags,
  1355. xen_intr_handle_t handle)
  1356. {
  1357. struct xenisrc *isrc;
  1358. int error;
  1359. isrc = xen_intr_isrc(handle);
  1360. if (isrc == NULL || isrc->xi_cookie != NULL)
  1361. return (EINVAL);
  1362. error = intr_add_handler(name, isrc->xi_vector,filter, handler, arg,
  1363. flags|INTR_EXCL, &isrc->xi_cookie, 0);
  1364. if (error != 0) {
  1365. printf(
  1366. "%s: xen_intr_add_handler: intr_add_handler failed: %d\n",
  1367. name, error);
  1368. }
  1369. return (error);
  1370. }
  1371. int
  1372. xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
  1373. {
  1374. if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS)
  1375. return (EINVAL);
  1376. if (handlep == NULL) {
  1377. return (EINVAL);
  1378. }
  1379. mtx_lock(&xen_intr_isrc_lock);
  1380. if (xen_intr_port_to_isrc[port] == NULL) {
  1381. mtx_unlock(&xen_intr_isrc_lock);
  1382. return (EINVAL);
  1383. }
  1384. refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
  1385. mtx_unlock(&xen_intr_isrc_lock);
  1386. /* Assign the opaque handler (the event channel port) */
  1387. *handlep = &xen_intr_port_to_isrc[port]->xi_vector;
  1388. return (0);
  1389. }
  1390. #ifdef DDB
  1391. static const char *
  1392. xen_intr_print_type(enum evtchn_type type)
  1393. {
  1394. static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
  1395. [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
  1396. [EVTCHN_TYPE_PIRQ] = "PIRQ",
  1397. [EVTCHN_TYPE_VIRQ] = "VIRQ",
  1398. [EVTCHN_TYPE_IPI] = "IPI",
  1399. [EVTCHN_TYPE_PORT] = "PORT",
  1400. };
  1401. if (type >= EVTCHN_TYPE_COUNT)
  1402. return ("UNKNOWN");
  1403. return (evtchn_type_to_string[type]);
  1404. }
  1405. static void
  1406. xen_intr_dump_port(struct xenisrc *isrc)
  1407. {
  1408. struct xen_intr_pcpu_data *pcpu;
  1409. shared_info_t *s = HYPERVISOR_shared_info;
  1410. int i;
  1411. db_printf("Port %d Type: %s\n",
  1412. isrc->xi_port, xen_intr_print_type(isrc->xi_type));
  1413. if (isrc->xi_type == EVTCHN_TYPE_PIRQ) {
  1414. db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
  1415. "NeedsEOI: %d\n",
  1416. isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
  1417. !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
  1418. }
  1419. if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
  1420. db_printf("\tVirq: %d\n", isrc->xi_virq);
  1421. db_printf("\tMasked: %d Pending: %d\n",
  1422. !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
  1423. !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
  1424. db_printf("\tPer-CPU Masks: ");
  1425. CPU_FOREACH(i) {
  1426. pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
  1427. db_printf("cpu#%d: %d ", i,
  1428. !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
  1429. }
  1430. db_printf("\n");
  1431. }
  1432. DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
  1433. {
  1434. int i;
  1435. if (!xen_domain()) {
  1436. db_printf("Only available on Xen guests\n");
  1437. return;
  1438. }
  1439. for (i = 0; i < NR_EVENT_CHANNELS; i++) {
  1440. struct xenisrc *isrc;
  1441. isrc = xen_intr_port_to_isrc[i];
  1442. if (isrc == NULL)
  1443. continue;
  1444. xen_intr_dump_port(isrc);
  1445. }
  1446. }
  1447. #endif /* DDB */