irqdomain.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "irq: " fmt
  3. #include <linux/acpi.h>
  4. #include <linux/debugfs.h>
  5. #include <linux/hardirq.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/irq.h>
  8. #include <linux/irqdesc.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/module.h>
  11. #include <linux/mutex.h>
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/topology.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/slab.h>
  18. #include <linux/smp.h>
  19. #include <linux/fs.h>
  20. static LIST_HEAD(irq_domain_list);
  21. static DEFINE_MUTEX(irq_domain_mutex);
  22. static struct irq_domain *irq_default_domain;
  23. static void irq_domain_check_hierarchy(struct irq_domain *domain);
  24. struct irqchip_fwid {
  25. struct fwnode_handle fwnode;
  26. unsigned int type;
  27. char *name;
  28. void *data;
  29. };
  30. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  31. static void debugfs_add_domain_dir(struct irq_domain *d);
  32. static void debugfs_remove_domain_dir(struct irq_domain *d);
  33. #else
  34. static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  35. static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  36. #endif
  37. const struct fwnode_operations irqchip_fwnode_ops;
  38. EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  39. /**
  40. * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  41. * identifying an irq domain
  42. * @type: Type of irqchip_fwnode. See linux/irqdomain.h
  43. * @name: Optional user provided domain name
  44. * @id: Optional user provided id if name != NULL
  45. * @data: Optional user-provided data
  46. *
  47. * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  48. * fwnode_handle (or NULL on failure).
  49. *
  50. * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
  51. * solely to transport name information to irqdomain creation code. The
  52. * node is not stored. For other types the pointer is kept in the irq
  53. * domain struct.
  54. */
  55. struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
  56. const char *name, void *data)
  57. {
  58. struct irqchip_fwid *fwid;
  59. char *n;
  60. fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
  61. switch (type) {
  62. case IRQCHIP_FWNODE_NAMED:
  63. n = kasprintf(GFP_KERNEL, "%s", name);
  64. break;
  65. case IRQCHIP_FWNODE_NAMED_ID:
  66. n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
  67. break;
  68. default:
  69. n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
  70. break;
  71. }
  72. if (!fwid || !n) {
  73. kfree(fwid);
  74. kfree(n);
  75. return NULL;
  76. }
  77. fwid->type = type;
  78. fwid->name = n;
  79. fwid->data = data;
  80. fwid->fwnode.ops = &irqchip_fwnode_ops;
  81. return &fwid->fwnode;
  82. }
  83. EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
  84. /**
  85. * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
  86. *
  87. * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
  88. */
  89. void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
  90. {
  91. struct irqchip_fwid *fwid;
  92. if (WARN_ON(!is_fwnode_irqchip(fwnode)))
  93. return;
  94. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  95. kfree(fwid->name);
  96. kfree(fwid);
  97. }
  98. EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
  99. /**
  100. * __irq_domain_add() - Allocate a new irq_domain data structure
  101. * @fwnode: firmware node for the interrupt controller
  102. * @size: Size of linear map; 0 for radix mapping only
  103. * @hwirq_max: Maximum number of interrupts supported by controller
  104. * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
  105. * direct mapping
  106. * @ops: domain callbacks
  107. * @host_data: Controller private data pointer
  108. *
  109. * Allocates and initialize and irq_domain structure.
  110. * Returns pointer to IRQ domain, or NULL on failure.
  111. */
  112. struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
  113. irq_hw_number_t hwirq_max, int direct_max,
  114. const struct irq_domain_ops *ops,
  115. void *host_data)
  116. {
  117. struct device_node *of_node = to_of_node(fwnode);
  118. struct irqchip_fwid *fwid;
  119. struct irq_domain *domain;
  120. static atomic_t unknown_domains;
  121. domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
  122. GFP_KERNEL, of_node_to_nid(of_node));
  123. if (WARN_ON(!domain))
  124. return NULL;
  125. if (fwnode && is_fwnode_irqchip(fwnode)) {
  126. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  127. switch (fwid->type) {
  128. case IRQCHIP_FWNODE_NAMED:
  129. case IRQCHIP_FWNODE_NAMED_ID:
  130. domain->fwnode = fwnode;
  131. domain->name = kstrdup(fwid->name, GFP_KERNEL);
  132. if (!domain->name) {
  133. kfree(domain);
  134. return NULL;
  135. }
  136. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  137. break;
  138. default:
  139. domain->fwnode = fwnode;
  140. domain->name = fwid->name;
  141. break;
  142. }
  143. #ifdef CONFIG_ACPI
  144. } else if (is_acpi_device_node(fwnode)) {
  145. struct acpi_buffer buf = {
  146. .length = ACPI_ALLOCATE_BUFFER,
  147. };
  148. acpi_handle handle;
  149. handle = acpi_device_handle(to_acpi_device_node(fwnode));
  150. if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
  151. domain->name = buf.pointer;
  152. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  153. }
  154. domain->fwnode = fwnode;
  155. #endif
  156. } else if (of_node) {
  157. char *name;
  158. /*
  159. * DT paths contain '/', which debugfs is legitimately
  160. * unhappy about. Replace them with ':', which does
  161. * the trick and is not as offensive as '\'...
  162. */
  163. name = kasprintf(GFP_KERNEL, "%pOF", of_node);
  164. if (!name) {
  165. kfree(domain);
  166. return NULL;
  167. }
  168. strreplace(name, '/', ':');
  169. domain->name = name;
  170. domain->fwnode = fwnode;
  171. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  172. }
  173. if (!domain->name) {
  174. if (fwnode)
  175. pr_err("Invalid fwnode type for irqdomain\n");
  176. domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
  177. atomic_inc_return(&unknown_domains));
  178. if (!domain->name) {
  179. kfree(domain);
  180. return NULL;
  181. }
  182. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  183. }
  184. of_node_get(of_node);
  185. /* Fill structure */
  186. INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
  187. mutex_init(&domain->revmap_tree_mutex);
  188. domain->ops = ops;
  189. domain->host_data = host_data;
  190. domain->hwirq_max = hwirq_max;
  191. domain->revmap_size = size;
  192. domain->revmap_direct_max_irq = direct_max;
  193. irq_domain_check_hierarchy(domain);
  194. mutex_lock(&irq_domain_mutex);
  195. debugfs_add_domain_dir(domain);
  196. list_add(&domain->link, &irq_domain_list);
  197. mutex_unlock(&irq_domain_mutex);
  198. pr_debug("Added domain %s\n", domain->name);
  199. return domain;
  200. }
  201. EXPORT_SYMBOL_GPL(__irq_domain_add);
  202. /**
  203. * irq_domain_remove() - Remove an irq domain.
  204. * @domain: domain to remove
  205. *
  206. * This routine is used to remove an irq domain. The caller must ensure
  207. * that all mappings within the domain have been disposed of prior to
  208. * use, depending on the revmap type.
  209. */
  210. void irq_domain_remove(struct irq_domain *domain)
  211. {
  212. mutex_lock(&irq_domain_mutex);
  213. debugfs_remove_domain_dir(domain);
  214. WARN_ON(!radix_tree_empty(&domain->revmap_tree));
  215. list_del(&domain->link);
  216. /*
  217. * If the going away domain is the default one, reset it.
  218. */
  219. if (unlikely(irq_default_domain == domain))
  220. irq_set_default_host(NULL);
  221. mutex_unlock(&irq_domain_mutex);
  222. pr_debug("Removed domain %s\n", domain->name);
  223. of_node_put(irq_domain_get_of_node(domain));
  224. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  225. kfree(domain->name);
  226. kfree(domain);
  227. }
  228. EXPORT_SYMBOL_GPL(irq_domain_remove);
  229. void irq_domain_update_bus_token(struct irq_domain *domain,
  230. enum irq_domain_bus_token bus_token)
  231. {
  232. char *name;
  233. if (domain->bus_token == bus_token)
  234. return;
  235. mutex_lock(&irq_domain_mutex);
  236. domain->bus_token = bus_token;
  237. name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
  238. if (!name) {
  239. mutex_unlock(&irq_domain_mutex);
  240. return;
  241. }
  242. debugfs_remove_domain_dir(domain);
  243. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  244. kfree(domain->name);
  245. else
  246. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  247. domain->name = name;
  248. debugfs_add_domain_dir(domain);
  249. mutex_unlock(&irq_domain_mutex);
  250. }
  251. /**
  252. * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
  253. * @of_node: pointer to interrupt controller's device tree node.
  254. * @size: total number of irqs in mapping
  255. * @first_irq: first number of irq block assigned to the domain,
  256. * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
  257. * pre-map all of the irqs in the domain to virqs starting at first_irq.
  258. * @ops: domain callbacks
  259. * @host_data: Controller private data pointer
  260. *
  261. * Allocates an irq_domain, and optionally if first_irq is positive then also
  262. * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
  263. *
  264. * This is intended to implement the expected behaviour for most
  265. * interrupt controllers. If device tree is used, then first_irq will be 0 and
  266. * irqs get mapped dynamically on the fly. However, if the controller requires
  267. * static virq assignments (non-DT boot) then it will set that up correctly.
  268. */
  269. struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
  270. unsigned int size,
  271. unsigned int first_irq,
  272. const struct irq_domain_ops *ops,
  273. void *host_data)
  274. {
  275. struct irq_domain *domain;
  276. domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
  277. if (!domain)
  278. return NULL;
  279. if (first_irq > 0) {
  280. if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
  281. /* attempt to allocated irq_descs */
  282. int rc = irq_alloc_descs(first_irq, first_irq, size,
  283. of_node_to_nid(of_node));
  284. if (rc < 0)
  285. pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  286. first_irq);
  287. }
  288. irq_domain_associate_many(domain, first_irq, 0, size);
  289. }
  290. return domain;
  291. }
  292. EXPORT_SYMBOL_GPL(irq_domain_add_simple);
  293. /**
  294. * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
  295. * @of_node: pointer to interrupt controller's device tree node.
  296. * @size: total number of irqs in legacy mapping
  297. * @first_irq: first number of irq block assigned to the domain
  298. * @first_hwirq: first hwirq number to use for the translation. Should normally
  299. * be '0', but a positive integer can be used if the effective
  300. * hwirqs numbering does not begin at zero.
  301. * @ops: map/unmap domain callbacks
  302. * @host_data: Controller private data pointer
  303. *
  304. * Note: the map() callback will be called before this function returns
  305. * for all legacy interrupts except 0 (which is always the invalid irq for
  306. * a legacy controller).
  307. */
  308. struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
  309. unsigned int size,
  310. unsigned int first_irq,
  311. irq_hw_number_t first_hwirq,
  312. const struct irq_domain_ops *ops,
  313. void *host_data)
  314. {
  315. struct irq_domain *domain;
  316. domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
  317. first_hwirq + size, 0, ops, host_data);
  318. if (domain)
  319. irq_domain_associate_many(domain, first_irq, first_hwirq, size);
  320. return domain;
  321. }
  322. EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
  323. /**
  324. * irq_find_matching_fwspec() - Locates a domain for a given fwspec
  325. * @fwspec: FW specifier for an interrupt
  326. * @bus_token: domain-specific data
  327. */
  328. struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
  329. enum irq_domain_bus_token bus_token)
  330. {
  331. struct irq_domain *h, *found = NULL;
  332. struct fwnode_handle *fwnode = fwspec->fwnode;
  333. int rc;
  334. /* We might want to match the legacy controller last since
  335. * it might potentially be set to match all interrupts in
  336. * the absence of a device node. This isn't a problem so far
  337. * yet though...
  338. *
  339. * bus_token == DOMAIN_BUS_ANY matches any domain, any other
  340. * values must generate an exact match for the domain to be
  341. * selected.
  342. */
  343. mutex_lock(&irq_domain_mutex);
  344. list_for_each_entry(h, &irq_domain_list, link) {
  345. if (h->ops->select && fwspec->param_count)
  346. rc = h->ops->select(h, fwspec, bus_token);
  347. else if (h->ops->match)
  348. rc = h->ops->match(h, to_of_node(fwnode), bus_token);
  349. else
  350. rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
  351. ((bus_token == DOMAIN_BUS_ANY) ||
  352. (h->bus_token == bus_token)));
  353. if (rc) {
  354. found = h;
  355. break;
  356. }
  357. }
  358. mutex_unlock(&irq_domain_mutex);
  359. return found;
  360. }
  361. EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
  362. /**
  363. * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
  364. * IRQ remapping
  365. *
  366. * Return: false if any MSI irq domain does not support IRQ remapping,
  367. * true otherwise (including if there is no MSI irq domain)
  368. */
  369. bool irq_domain_check_msi_remap(void)
  370. {
  371. struct irq_domain *h;
  372. bool ret = true;
  373. mutex_lock(&irq_domain_mutex);
  374. list_for_each_entry(h, &irq_domain_list, link) {
  375. if (irq_domain_is_msi(h) &&
  376. !irq_domain_hierarchical_is_msi_remap(h)) {
  377. ret = false;
  378. break;
  379. }
  380. }
  381. mutex_unlock(&irq_domain_mutex);
  382. return ret;
  383. }
  384. EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
  385. /**
  386. * irq_set_default_host() - Set a "default" irq domain
  387. * @domain: default domain pointer
  388. *
  389. * For convenience, it's possible to set a "default" domain that will be used
  390. * whenever NULL is passed to irq_create_mapping(). It makes life easier for
  391. * platforms that want to manipulate a few hard coded interrupt numbers that
  392. * aren't properly represented in the device-tree.
  393. */
  394. void irq_set_default_host(struct irq_domain *domain)
  395. {
  396. pr_debug("Default domain set to @0x%p\n", domain);
  397. irq_default_domain = domain;
  398. }
  399. EXPORT_SYMBOL_GPL(irq_set_default_host);
  400. static void irq_domain_clear_mapping(struct irq_domain *domain,
  401. irq_hw_number_t hwirq)
  402. {
  403. if (hwirq < domain->revmap_size) {
  404. domain->linear_revmap[hwirq] = 0;
  405. } else {
  406. mutex_lock(&domain->revmap_tree_mutex);
  407. radix_tree_delete(&domain->revmap_tree, hwirq);
  408. mutex_unlock(&domain->revmap_tree_mutex);
  409. }
  410. }
  411. static void irq_domain_set_mapping(struct irq_domain *domain,
  412. irq_hw_number_t hwirq,
  413. struct irq_data *irq_data)
  414. {
  415. if (hwirq < domain->revmap_size) {
  416. domain->linear_revmap[hwirq] = irq_data->irq;
  417. } else {
  418. mutex_lock(&domain->revmap_tree_mutex);
  419. radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
  420. mutex_unlock(&domain->revmap_tree_mutex);
  421. }
  422. }
  423. void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
  424. {
  425. struct irq_data *irq_data = irq_get_irq_data(irq);
  426. irq_hw_number_t hwirq;
  427. if (WARN(!irq_data || irq_data->domain != domain,
  428. "virq%i doesn't exist; cannot disassociate\n", irq))
  429. return;
  430. hwirq = irq_data->hwirq;
  431. irq_set_status_flags(irq, IRQ_NOREQUEST);
  432. /* remove chip and handler */
  433. irq_set_chip_and_handler(irq, NULL, NULL);
  434. /* Make sure it's completed */
  435. synchronize_irq(irq);
  436. /* Tell the PIC about it */
  437. if (domain->ops->unmap)
  438. domain->ops->unmap(domain, irq);
  439. smp_mb();
  440. irq_data->domain = NULL;
  441. irq_data->hwirq = 0;
  442. domain->mapcount--;
  443. /* Clear reverse map for this hwirq */
  444. irq_domain_clear_mapping(domain, hwirq);
  445. }
  446. int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
  447. irq_hw_number_t hwirq)
  448. {
  449. struct irq_data *irq_data = irq_get_irq_data(virq);
  450. int ret;
  451. if (WARN(hwirq >= domain->hwirq_max,
  452. "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
  453. return -EINVAL;
  454. if (WARN(!irq_data, "error: virq%i is not allocated", virq))
  455. return -EINVAL;
  456. if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
  457. return -EINVAL;
  458. mutex_lock(&irq_domain_mutex);
  459. irq_data->hwirq = hwirq;
  460. irq_data->domain = domain;
  461. if (domain->ops->map) {
  462. ret = domain->ops->map(domain, virq, hwirq);
  463. if (ret != 0) {
  464. /*
  465. * If map() returns -EPERM, this interrupt is protected
  466. * by the firmware or some other service and shall not
  467. * be mapped. Don't bother telling the user about it.
  468. */
  469. if (ret != -EPERM) {
  470. pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
  471. domain->name, hwirq, virq, ret);
  472. }
  473. irq_data->domain = NULL;
  474. irq_data->hwirq = 0;
  475. mutex_unlock(&irq_domain_mutex);
  476. return ret;
  477. }
  478. /* If not already assigned, give the domain the chip's name */
  479. if (!domain->name && irq_data->chip)
  480. domain->name = irq_data->chip->name;
  481. }
  482. domain->mapcount++;
  483. irq_domain_set_mapping(domain, hwirq, irq_data);
  484. mutex_unlock(&irq_domain_mutex);
  485. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  486. return 0;
  487. }
  488. EXPORT_SYMBOL_GPL(irq_domain_associate);
  489. void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
  490. irq_hw_number_t hwirq_base, int count)
  491. {
  492. struct device_node *of_node;
  493. int i;
  494. of_node = irq_domain_get_of_node(domain);
  495. pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
  496. of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
  497. for (i = 0; i < count; i++) {
  498. irq_domain_associate(domain, irq_base + i, hwirq_base + i);
  499. }
  500. }
  501. EXPORT_SYMBOL_GPL(irq_domain_associate_many);
  502. /**
  503. * irq_create_direct_mapping() - Allocate an irq for direct mapping
  504. * @domain: domain to allocate the irq for or NULL for default domain
  505. *
  506. * This routine is used for irq controllers which can choose the hardware
  507. * interrupt numbers they generate. In such a case it's simplest to use
  508. * the linux irq as the hardware interrupt number. It still uses the linear
  509. * or radix tree to store the mapping, but the irq controller can optimize
  510. * the revmap path by using the hwirq directly.
  511. */
  512. unsigned int irq_create_direct_mapping(struct irq_domain *domain)
  513. {
  514. struct device_node *of_node;
  515. unsigned int virq;
  516. if (domain == NULL)
  517. domain = irq_default_domain;
  518. of_node = irq_domain_get_of_node(domain);
  519. virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
  520. if (!virq) {
  521. pr_debug("create_direct virq allocation failed\n");
  522. return 0;
  523. }
  524. if (virq >= domain->revmap_direct_max_irq) {
  525. pr_err("ERROR: no free irqs available below %i maximum\n",
  526. domain->revmap_direct_max_irq);
  527. irq_free_desc(virq);
  528. return 0;
  529. }
  530. pr_debug("create_direct obtained virq %d\n", virq);
  531. if (irq_domain_associate(domain, virq, virq)) {
  532. irq_free_desc(virq);
  533. return 0;
  534. }
  535. return virq;
  536. }
  537. EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
  538. /**
  539. * irq_create_mapping() - Map a hardware interrupt into linux irq space
  540. * @domain: domain owning this hardware interrupt or NULL for default domain
  541. * @hwirq: hardware irq number in that domain space
  542. *
  543. * Only one mapping per hardware interrupt is permitted. Returns a linux
  544. * irq number.
  545. * If the sense/trigger is to be specified, set_irq_type() should be called
  546. * on the number returned from that call.
  547. */
  548. unsigned int irq_create_mapping(struct irq_domain *domain,
  549. irq_hw_number_t hwirq)
  550. {
  551. struct device_node *of_node;
  552. int virq;
  553. pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
  554. /* Look for default domain if nececssary */
  555. if (domain == NULL)
  556. domain = irq_default_domain;
  557. if (domain == NULL) {
  558. WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
  559. return 0;
  560. }
  561. pr_debug("-> using domain @%p\n", domain);
  562. of_node = irq_domain_get_of_node(domain);
  563. /* Check if mapping already exists */
  564. virq = irq_find_mapping(domain, hwirq);
  565. if (virq) {
  566. pr_debug("-> existing mapping on virq %d\n", virq);
  567. return virq;
  568. }
  569. /* Allocate a virtual interrupt number */
  570. virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
  571. if (virq <= 0) {
  572. pr_debug("-> virq allocation failed\n");
  573. return 0;
  574. }
  575. if (irq_domain_associate(domain, virq, hwirq)) {
  576. irq_free_desc(virq);
  577. return 0;
  578. }
  579. pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
  580. hwirq, of_node_full_name(of_node), virq);
  581. return virq;
  582. }
  583. EXPORT_SYMBOL_GPL(irq_create_mapping);
  584. /**
  585. * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
  586. * @domain: domain owning the interrupt range
  587. * @irq_base: beginning of linux IRQ range
  588. * @hwirq_base: beginning of hardware IRQ range
  589. * @count: Number of interrupts to map
  590. *
  591. * This routine is used for allocating and mapping a range of hardware
  592. * irqs to linux irqs where the linux irq numbers are at pre-defined
  593. * locations. For use by controllers that already have static mappings
  594. * to insert in to the domain.
  595. *
  596. * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
  597. * domain insertion.
  598. *
  599. * 0 is returned upon success, while any failure to establish a static
  600. * mapping is treated as an error.
  601. */
  602. int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
  603. irq_hw_number_t hwirq_base, int count)
  604. {
  605. struct device_node *of_node;
  606. int ret;
  607. of_node = irq_domain_get_of_node(domain);
  608. ret = irq_alloc_descs(irq_base, irq_base, count,
  609. of_node_to_nid(of_node));
  610. if (unlikely(ret < 0))
  611. return ret;
  612. irq_domain_associate_many(domain, irq_base, hwirq_base, count);
  613. return 0;
  614. }
  615. EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
  616. static int irq_domain_translate(struct irq_domain *d,
  617. struct irq_fwspec *fwspec,
  618. irq_hw_number_t *hwirq, unsigned int *type)
  619. {
  620. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  621. if (d->ops->translate)
  622. return d->ops->translate(d, fwspec, hwirq, type);
  623. #endif
  624. if (d->ops->xlate)
  625. return d->ops->xlate(d, to_of_node(fwspec->fwnode),
  626. fwspec->param, fwspec->param_count,
  627. hwirq, type);
  628. /* If domain has no translation, then we assume interrupt line */
  629. *hwirq = fwspec->param[0];
  630. return 0;
  631. }
  632. static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
  633. struct irq_fwspec *fwspec)
  634. {
  635. int i;
  636. fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
  637. fwspec->param_count = irq_data->args_count;
  638. for (i = 0; i < irq_data->args_count; i++)
  639. fwspec->param[i] = irq_data->args[i];
  640. }
  641. unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
  642. {
  643. struct irq_domain *domain;
  644. struct irq_data *irq_data;
  645. irq_hw_number_t hwirq;
  646. unsigned int type = IRQ_TYPE_NONE;
  647. int virq;
  648. if (fwspec->fwnode) {
  649. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
  650. if (!domain)
  651. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
  652. } else {
  653. domain = irq_default_domain;
  654. }
  655. if (!domain) {
  656. pr_warn("no irq domain found for %s !\n",
  657. of_node_full_name(to_of_node(fwspec->fwnode)));
  658. return 0;
  659. }
  660. if (irq_domain_translate(domain, fwspec, &hwirq, &type))
  661. return 0;
  662. /*
  663. * WARN if the irqchip returns a type with bits
  664. * outside the sense mask set and clear these bits.
  665. */
  666. if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
  667. type &= IRQ_TYPE_SENSE_MASK;
  668. /*
  669. * If we've already configured this interrupt,
  670. * don't do it again, or hell will break loose.
  671. */
  672. virq = irq_find_mapping(domain, hwirq);
  673. if (virq) {
  674. /*
  675. * If the trigger type is not specified or matches the
  676. * current trigger type then we are done so return the
  677. * interrupt number.
  678. */
  679. if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
  680. return virq;
  681. /*
  682. * If the trigger type has not been set yet, then set
  683. * it now and return the interrupt number.
  684. */
  685. if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
  686. irq_data = irq_get_irq_data(virq);
  687. if (!irq_data)
  688. return 0;
  689. irqd_set_trigger_type(irq_data, type);
  690. return virq;
  691. }
  692. pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
  693. hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
  694. return 0;
  695. }
  696. if (irq_domain_is_hierarchy(domain)) {
  697. virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
  698. if (virq <= 0)
  699. return 0;
  700. } else {
  701. /* Create mapping */
  702. virq = irq_create_mapping(domain, hwirq);
  703. if (!virq)
  704. return virq;
  705. }
  706. irq_data = irq_get_irq_data(virq);
  707. if (!irq_data) {
  708. if (irq_domain_is_hierarchy(domain))
  709. irq_domain_free_irqs(virq, 1);
  710. else
  711. irq_dispose_mapping(virq);
  712. return 0;
  713. }
  714. /* Store trigger type */
  715. irqd_set_trigger_type(irq_data, type);
  716. return virq;
  717. }
  718. EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
  719. unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
  720. {
  721. struct irq_fwspec fwspec;
  722. of_phandle_args_to_fwspec(irq_data, &fwspec);
  723. return irq_create_fwspec_mapping(&fwspec);
  724. }
  725. EXPORT_SYMBOL_GPL(irq_create_of_mapping);
  726. /**
  727. * irq_dispose_mapping() - Unmap an interrupt
  728. * @virq: linux irq number of the interrupt to unmap
  729. */
  730. void irq_dispose_mapping(unsigned int virq)
  731. {
  732. struct irq_data *irq_data = irq_get_irq_data(virq);
  733. struct irq_domain *domain;
  734. if (!virq || !irq_data)
  735. return;
  736. domain = irq_data->domain;
  737. if (WARN_ON(domain == NULL))
  738. return;
  739. if (irq_domain_is_hierarchy(domain)) {
  740. irq_domain_free_irqs(virq, 1);
  741. } else {
  742. irq_domain_disassociate(domain, virq);
  743. irq_free_desc(virq);
  744. }
  745. }
  746. EXPORT_SYMBOL_GPL(irq_dispose_mapping);
  747. /**
  748. * irq_find_mapping() - Find a linux irq from an hw irq number.
  749. * @domain: domain owning this hardware interrupt
  750. * @hwirq: hardware irq number in that domain space
  751. */
  752. unsigned int irq_find_mapping(struct irq_domain *domain,
  753. irq_hw_number_t hwirq)
  754. {
  755. struct irq_data *data;
  756. /* Look for default domain if nececssary */
  757. if (domain == NULL)
  758. domain = irq_default_domain;
  759. if (domain == NULL)
  760. return 0;
  761. if (hwirq < domain->revmap_direct_max_irq) {
  762. data = irq_domain_get_irq_data(domain, hwirq);
  763. if (data && data->hwirq == hwirq)
  764. return hwirq;
  765. }
  766. /* Check if the hwirq is in the linear revmap. */
  767. if (hwirq < domain->revmap_size)
  768. return domain->linear_revmap[hwirq];
  769. rcu_read_lock();
  770. data = radix_tree_lookup(&domain->revmap_tree, hwirq);
  771. rcu_read_unlock();
  772. return data ? data->irq : 0;
  773. }
  774. EXPORT_SYMBOL_GPL(irq_find_mapping);
  775. /**
  776. * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
  777. *
  778. * Device Tree IRQ specifier translation function which works with one cell
  779. * bindings where the cell value maps directly to the hwirq number.
  780. */
  781. int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
  782. const u32 *intspec, unsigned int intsize,
  783. unsigned long *out_hwirq, unsigned int *out_type)
  784. {
  785. if (WARN_ON(intsize < 1))
  786. return -EINVAL;
  787. *out_hwirq = intspec[0];
  788. *out_type = IRQ_TYPE_NONE;
  789. return 0;
  790. }
  791. EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
  792. /**
  793. * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
  794. *
  795. * Device Tree IRQ specifier translation function which works with two cell
  796. * bindings where the cell values map directly to the hwirq number
  797. * and linux irq flags.
  798. */
  799. int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
  800. const u32 *intspec, unsigned int intsize,
  801. irq_hw_number_t *out_hwirq, unsigned int *out_type)
  802. {
  803. if (WARN_ON(intsize < 2))
  804. return -EINVAL;
  805. *out_hwirq = intspec[0];
  806. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  807. return 0;
  808. }
  809. EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
  810. /**
  811. * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
  812. *
  813. * Device Tree IRQ specifier translation function which works with either one
  814. * or two cell bindings where the cell values map directly to the hwirq number
  815. * and linux irq flags.
  816. *
  817. * Note: don't use this function unless your interrupt controller explicitly
  818. * supports both one and two cell bindings. For the majority of controllers
  819. * the _onecell() or _twocell() variants above should be used.
  820. */
  821. int irq_domain_xlate_onetwocell(struct irq_domain *d,
  822. struct device_node *ctrlr,
  823. const u32 *intspec, unsigned int intsize,
  824. unsigned long *out_hwirq, unsigned int *out_type)
  825. {
  826. if (WARN_ON(intsize < 1))
  827. return -EINVAL;
  828. *out_hwirq = intspec[0];
  829. if (intsize > 1)
  830. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  831. else
  832. *out_type = IRQ_TYPE_NONE;
  833. return 0;
  834. }
  835. EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
  836. const struct irq_domain_ops irq_domain_simple_ops = {
  837. .xlate = irq_domain_xlate_onetwocell,
  838. };
  839. EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
  840. int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
  841. int node, const struct cpumask *affinity)
  842. {
  843. unsigned int hint;
  844. if (virq >= 0) {
  845. virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
  846. affinity);
  847. } else {
  848. hint = hwirq % nr_irqs;
  849. if (hint == 0)
  850. hint++;
  851. virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
  852. affinity);
  853. if (virq <= 0 && hint > 1) {
  854. virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
  855. affinity);
  856. }
  857. }
  858. return virq;
  859. }
  860. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  861. /**
  862. * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
  863. * @parent: Parent irq domain to associate with the new domain
  864. * @flags: Irq domain flags associated to the domain
  865. * @size: Size of the domain. See below
  866. * @fwnode: Optional fwnode of the interrupt controller
  867. * @ops: Pointer to the interrupt domain callbacks
  868. * @host_data: Controller private data pointer
  869. *
  870. * If @size is 0 a tree domain is created, otherwise a linear domain.
  871. *
  872. * If successful the parent is associated to the new domain and the
  873. * domain flags are set.
  874. * Returns pointer to IRQ domain, or NULL on failure.
  875. */
  876. struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
  877. unsigned int flags,
  878. unsigned int size,
  879. struct fwnode_handle *fwnode,
  880. const struct irq_domain_ops *ops,
  881. void *host_data)
  882. {
  883. struct irq_domain *domain;
  884. if (size)
  885. domain = irq_domain_create_linear(fwnode, size, ops, host_data);
  886. else
  887. domain = irq_domain_create_tree(fwnode, ops, host_data);
  888. if (domain) {
  889. domain->parent = parent;
  890. domain->flags |= flags;
  891. }
  892. return domain;
  893. }
  894. EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
  895. static void irq_domain_insert_irq(int virq)
  896. {
  897. struct irq_data *data;
  898. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  899. struct irq_domain *domain = data->domain;
  900. domain->mapcount++;
  901. irq_domain_set_mapping(domain, data->hwirq, data);
  902. /* If not already assigned, give the domain the chip's name */
  903. if (!domain->name && data->chip)
  904. domain->name = data->chip->name;
  905. }
  906. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  907. }
  908. static void irq_domain_remove_irq(int virq)
  909. {
  910. struct irq_data *data;
  911. irq_set_status_flags(virq, IRQ_NOREQUEST);
  912. irq_set_chip_and_handler(virq, NULL, NULL);
  913. synchronize_irq(virq);
  914. smp_mb();
  915. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  916. struct irq_domain *domain = data->domain;
  917. irq_hw_number_t hwirq = data->hwirq;
  918. domain->mapcount--;
  919. irq_domain_clear_mapping(domain, hwirq);
  920. }
  921. }
  922. static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
  923. struct irq_data *child)
  924. {
  925. struct irq_data *irq_data;
  926. irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
  927. irq_data_get_node(child));
  928. if (irq_data) {
  929. child->parent_data = irq_data;
  930. irq_data->irq = child->irq;
  931. irq_data->common = child->common;
  932. irq_data->domain = domain;
  933. }
  934. return irq_data;
  935. }
  936. static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
  937. {
  938. struct irq_data *irq_data, *tmp;
  939. int i;
  940. for (i = 0; i < nr_irqs; i++) {
  941. irq_data = irq_get_irq_data(virq + i);
  942. tmp = irq_data->parent_data;
  943. irq_data->parent_data = NULL;
  944. irq_data->domain = NULL;
  945. while (tmp) {
  946. irq_data = tmp;
  947. tmp = tmp->parent_data;
  948. kfree(irq_data);
  949. }
  950. }
  951. }
  952. static int irq_domain_alloc_irq_data(struct irq_domain *domain,
  953. unsigned int virq, unsigned int nr_irqs)
  954. {
  955. struct irq_data *irq_data;
  956. struct irq_domain *parent;
  957. int i;
  958. /* The outermost irq_data is embedded in struct irq_desc */
  959. for (i = 0; i < nr_irqs; i++) {
  960. irq_data = irq_get_irq_data(virq + i);
  961. irq_data->domain = domain;
  962. for (parent = domain->parent; parent; parent = parent->parent) {
  963. irq_data = irq_domain_insert_irq_data(parent, irq_data);
  964. if (!irq_data) {
  965. irq_domain_free_irq_data(virq, i + 1);
  966. return -ENOMEM;
  967. }
  968. }
  969. }
  970. return 0;
  971. }
  972. /**
  973. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  974. * @domain: domain to match
  975. * @virq: IRQ number to get irq_data
  976. */
  977. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  978. unsigned int virq)
  979. {
  980. struct irq_data *irq_data;
  981. for (irq_data = irq_get_irq_data(virq); irq_data;
  982. irq_data = irq_data->parent_data)
  983. if (irq_data->domain == domain)
  984. return irq_data;
  985. return NULL;
  986. }
  987. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  988. /**
  989. * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
  990. * @domain: Interrupt domain to match
  991. * @virq: IRQ number
  992. * @hwirq: The hwirq number
  993. * @chip: The associated interrupt chip
  994. * @chip_data: The associated chip data
  995. */
  996. int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
  997. irq_hw_number_t hwirq, struct irq_chip *chip,
  998. void *chip_data)
  999. {
  1000. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  1001. if (!irq_data)
  1002. return -ENOENT;
  1003. irq_data->hwirq = hwirq;
  1004. irq_data->chip = chip ? chip : &no_irq_chip;
  1005. irq_data->chip_data = chip_data;
  1006. return 0;
  1007. }
  1008. EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
  1009. /**
  1010. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1011. * @domain: Interrupt domain to match
  1012. * @virq: IRQ number
  1013. * @hwirq: The hardware interrupt number
  1014. * @chip: The associated interrupt chip
  1015. * @chip_data: The associated interrupt chip data
  1016. * @handler: The interrupt flow handler
  1017. * @handler_data: The interrupt flow handler data
  1018. * @handler_name: The interrupt handler name
  1019. */
  1020. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1021. irq_hw_number_t hwirq, struct irq_chip *chip,
  1022. void *chip_data, irq_flow_handler_t handler,
  1023. void *handler_data, const char *handler_name)
  1024. {
  1025. irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
  1026. __irq_set_handler(virq, handler, 0, handler_name);
  1027. irq_set_handler_data(virq, handler_data);
  1028. }
  1029. EXPORT_SYMBOL(irq_domain_set_info);
  1030. /**
  1031. * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
  1032. * @irq_data: The pointer to irq_data
  1033. */
  1034. void irq_domain_reset_irq_data(struct irq_data *irq_data)
  1035. {
  1036. irq_data->hwirq = 0;
  1037. irq_data->chip = &no_irq_chip;
  1038. irq_data->chip_data = NULL;
  1039. }
  1040. EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
  1041. /**
  1042. * irq_domain_free_irqs_common - Clear irq_data and free the parent
  1043. * @domain: Interrupt domain to match
  1044. * @virq: IRQ number to start with
  1045. * @nr_irqs: The number of irqs to free
  1046. */
  1047. void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
  1048. unsigned int nr_irqs)
  1049. {
  1050. struct irq_data *irq_data;
  1051. int i;
  1052. for (i = 0; i < nr_irqs; i++) {
  1053. irq_data = irq_domain_get_irq_data(domain, virq + i);
  1054. if (irq_data)
  1055. irq_domain_reset_irq_data(irq_data);
  1056. }
  1057. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  1058. }
  1059. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
  1060. /**
  1061. * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
  1062. * @domain: Interrupt domain to match
  1063. * @virq: IRQ number to start with
  1064. * @nr_irqs: The number of irqs to free
  1065. */
  1066. void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
  1067. unsigned int nr_irqs)
  1068. {
  1069. int i;
  1070. for (i = 0; i < nr_irqs; i++) {
  1071. irq_set_handler_data(virq + i, NULL);
  1072. irq_set_handler(virq + i, NULL);
  1073. }
  1074. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  1075. }
  1076. static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
  1077. unsigned int irq_base,
  1078. unsigned int nr_irqs)
  1079. {
  1080. if (domain->ops->free)
  1081. domain->ops->free(domain, irq_base, nr_irqs);
  1082. }
  1083. int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
  1084. unsigned int irq_base,
  1085. unsigned int nr_irqs, void *arg)
  1086. {
  1087. return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
  1088. }
  1089. /**
  1090. * __irq_domain_alloc_irqs - Allocate IRQs from domain
  1091. * @domain: domain to allocate from
  1092. * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
  1093. * @nr_irqs: number of IRQs to allocate
  1094. * @node: NUMA node id for memory allocation
  1095. * @arg: domain specific argument
  1096. * @realloc: IRQ descriptors have already been allocated if true
  1097. * @affinity: Optional irq affinity mask for multiqueue devices
  1098. *
  1099. * Allocate IRQ numbers and initialized all data structures to support
  1100. * hierarchy IRQ domains.
  1101. * Parameter @realloc is mainly to support legacy IRQs.
  1102. * Returns error code or allocated IRQ number
  1103. *
  1104. * The whole process to setup an IRQ has been split into two steps.
  1105. * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
  1106. * descriptor and required hardware resources. The second step,
  1107. * irq_domain_activate_irq(), is to program hardwares with preallocated
  1108. * resources. In this way, it's easier to rollback when failing to
  1109. * allocate resources.
  1110. */
  1111. int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
  1112. unsigned int nr_irqs, int node, void *arg,
  1113. bool realloc, const struct cpumask *affinity)
  1114. {
  1115. int i, ret, virq;
  1116. if (domain == NULL) {
  1117. domain = irq_default_domain;
  1118. if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
  1119. return -EINVAL;
  1120. }
  1121. if (!domain->ops->alloc) {
  1122. pr_debug("domain->ops->alloc() is NULL\n");
  1123. return -ENOSYS;
  1124. }
  1125. if (realloc && irq_base >= 0) {
  1126. virq = irq_base;
  1127. } else {
  1128. virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
  1129. affinity);
  1130. if (virq < 0) {
  1131. pr_debug("cannot allocate IRQ(base %d, count %d)\n",
  1132. irq_base, nr_irqs);
  1133. return virq;
  1134. }
  1135. }
  1136. if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
  1137. pr_debug("cannot allocate memory for IRQ%d\n", virq);
  1138. ret = -ENOMEM;
  1139. goto out_free_desc;
  1140. }
  1141. mutex_lock(&irq_domain_mutex);
  1142. ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
  1143. if (ret < 0) {
  1144. mutex_unlock(&irq_domain_mutex);
  1145. goto out_free_irq_data;
  1146. }
  1147. for (i = 0; i < nr_irqs; i++)
  1148. irq_domain_insert_irq(virq + i);
  1149. mutex_unlock(&irq_domain_mutex);
  1150. return virq;
  1151. out_free_irq_data:
  1152. irq_domain_free_irq_data(virq, nr_irqs);
  1153. out_free_desc:
  1154. irq_free_descs(virq, nr_irqs);
  1155. return ret;
  1156. }
  1157. /* The irq_data was moved, fix the revmap to refer to the new location */
  1158. static void irq_domain_fix_revmap(struct irq_data *d)
  1159. {
  1160. void __rcu **slot;
  1161. if (d->hwirq < d->domain->revmap_size)
  1162. return; /* Not using radix tree. */
  1163. /* Fix up the revmap. */
  1164. mutex_lock(&d->domain->revmap_tree_mutex);
  1165. slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
  1166. if (slot)
  1167. radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
  1168. mutex_unlock(&d->domain->revmap_tree_mutex);
  1169. }
  1170. /**
  1171. * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
  1172. * @domain: Domain to push.
  1173. * @virq: Irq to push the domain in to.
  1174. * @arg: Passed to the irq_domain_ops alloc() function.
  1175. *
  1176. * For an already existing irqdomain hierarchy, as might be obtained
  1177. * via a call to pci_enable_msix(), add an additional domain to the
  1178. * head of the processing chain. Must be called before request_irq()
  1179. * has been called.
  1180. */
  1181. int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
  1182. {
  1183. struct irq_data *child_irq_data;
  1184. struct irq_data *root_irq_data = irq_get_irq_data(virq);
  1185. struct irq_desc *desc;
  1186. int rv = 0;
  1187. /*
  1188. * Check that no action has been set, which indicates the virq
  1189. * is in a state where this function doesn't have to deal with
  1190. * races between interrupt handling and maintaining the
  1191. * hierarchy. This will catch gross misuse. Attempting to
  1192. * make the check race free would require holding locks across
  1193. * calls to struct irq_domain_ops->alloc(), which could lead
  1194. * to deadlock, so we just do a simple check before starting.
  1195. */
  1196. desc = irq_to_desc(virq);
  1197. if (!desc)
  1198. return -EINVAL;
  1199. if (WARN_ON(desc->action))
  1200. return -EBUSY;
  1201. if (domain == NULL)
  1202. return -EINVAL;
  1203. if (WARN_ON(!irq_domain_is_hierarchy(domain)))
  1204. return -EINVAL;
  1205. if (!root_irq_data)
  1206. return -EINVAL;
  1207. if (domain->parent != root_irq_data->domain)
  1208. return -EINVAL;
  1209. child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
  1210. irq_data_get_node(root_irq_data));
  1211. if (!child_irq_data)
  1212. return -ENOMEM;
  1213. mutex_lock(&irq_domain_mutex);
  1214. /* Copy the original irq_data. */
  1215. *child_irq_data = *root_irq_data;
  1216. /*
  1217. * Overwrite the root_irq_data, which is embedded in struct
  1218. * irq_desc, with values for this domain.
  1219. */
  1220. root_irq_data->parent_data = child_irq_data;
  1221. root_irq_data->domain = domain;
  1222. root_irq_data->mask = 0;
  1223. root_irq_data->hwirq = 0;
  1224. root_irq_data->chip = NULL;
  1225. root_irq_data->chip_data = NULL;
  1226. /* May (probably does) set hwirq, chip, etc. */
  1227. rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
  1228. if (rv) {
  1229. /* Restore the original irq_data. */
  1230. *root_irq_data = *child_irq_data;
  1231. kfree(child_irq_data);
  1232. goto error;
  1233. }
  1234. irq_domain_fix_revmap(child_irq_data);
  1235. irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
  1236. error:
  1237. mutex_unlock(&irq_domain_mutex);
  1238. return rv;
  1239. }
  1240. EXPORT_SYMBOL_GPL(irq_domain_push_irq);
  1241. /**
  1242. * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
  1243. * @domain: Domain to remove.
  1244. * @virq: Irq to remove the domain from.
  1245. *
  1246. * Undo the effects of a call to irq_domain_push_irq(). Must be
  1247. * called either before request_irq() or after free_irq().
  1248. */
  1249. int irq_domain_pop_irq(struct irq_domain *domain, int virq)
  1250. {
  1251. struct irq_data *root_irq_data = irq_get_irq_data(virq);
  1252. struct irq_data *child_irq_data;
  1253. struct irq_data *tmp_irq_data;
  1254. struct irq_desc *desc;
  1255. /*
  1256. * Check that no action is set, which indicates the virq is in
  1257. * a state where this function doesn't have to deal with races
  1258. * between interrupt handling and maintaining the hierarchy.
  1259. * This will catch gross misuse. Attempting to make the check
  1260. * race free would require holding locks across calls to
  1261. * struct irq_domain_ops->free(), which could lead to
  1262. * deadlock, so we just do a simple check before starting.
  1263. */
  1264. desc = irq_to_desc(virq);
  1265. if (!desc)
  1266. return -EINVAL;
  1267. if (WARN_ON(desc->action))
  1268. return -EBUSY;
  1269. if (domain == NULL)
  1270. return -EINVAL;
  1271. if (!root_irq_data)
  1272. return -EINVAL;
  1273. tmp_irq_data = irq_domain_get_irq_data(domain, virq);
  1274. /* We can only "pop" if this domain is at the top of the list */
  1275. if (WARN_ON(root_irq_data != tmp_irq_data))
  1276. return -EINVAL;
  1277. if (WARN_ON(root_irq_data->domain != domain))
  1278. return -EINVAL;
  1279. child_irq_data = root_irq_data->parent_data;
  1280. if (WARN_ON(!child_irq_data))
  1281. return -EINVAL;
  1282. mutex_lock(&irq_domain_mutex);
  1283. root_irq_data->parent_data = NULL;
  1284. irq_domain_clear_mapping(domain, root_irq_data->hwirq);
  1285. irq_domain_free_irqs_hierarchy(domain, virq, 1);
  1286. /* Restore the original irq_data. */
  1287. *root_irq_data = *child_irq_data;
  1288. irq_domain_fix_revmap(root_irq_data);
  1289. mutex_unlock(&irq_domain_mutex);
  1290. kfree(child_irq_data);
  1291. return 0;
  1292. }
  1293. EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
  1294. /**
  1295. * irq_domain_free_irqs - Free IRQ number and associated data structures
  1296. * @virq: base IRQ number
  1297. * @nr_irqs: number of IRQs to free
  1298. */
  1299. void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
  1300. {
  1301. struct irq_data *data = irq_get_irq_data(virq);
  1302. int i;
  1303. if (WARN(!data || !data->domain || !data->domain->ops->free,
  1304. "NULL pointer, cannot free irq\n"))
  1305. return;
  1306. mutex_lock(&irq_domain_mutex);
  1307. for (i = 0; i < nr_irqs; i++)
  1308. irq_domain_remove_irq(virq + i);
  1309. irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
  1310. mutex_unlock(&irq_domain_mutex);
  1311. irq_domain_free_irq_data(virq, nr_irqs);
  1312. irq_free_descs(virq, nr_irqs);
  1313. }
  1314. /**
  1315. * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
  1316. * @irq_base: Base IRQ number
  1317. * @nr_irqs: Number of IRQs to allocate
  1318. * @arg: Allocation data (arch/domain specific)
  1319. *
  1320. * Check whether the domain has been setup recursive. If not allocate
  1321. * through the parent domain.
  1322. */
  1323. int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
  1324. unsigned int irq_base, unsigned int nr_irqs,
  1325. void *arg)
  1326. {
  1327. if (!domain->parent)
  1328. return -ENOSYS;
  1329. return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
  1330. nr_irqs, arg);
  1331. }
  1332. EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
  1333. /**
  1334. * irq_domain_free_irqs_parent - Free interrupts from parent domain
  1335. * @irq_base: Base IRQ number
  1336. * @nr_irqs: Number of IRQs to free
  1337. *
  1338. * Check whether the domain has been setup recursive. If not free
  1339. * through the parent domain.
  1340. */
  1341. void irq_domain_free_irqs_parent(struct irq_domain *domain,
  1342. unsigned int irq_base, unsigned int nr_irqs)
  1343. {
  1344. if (!domain->parent)
  1345. return;
  1346. irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
  1347. }
  1348. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  1349. static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
  1350. {
  1351. if (irq_data && irq_data->domain) {
  1352. struct irq_domain *domain = irq_data->domain;
  1353. if (domain->ops->deactivate)
  1354. domain->ops->deactivate(domain, irq_data);
  1355. if (irq_data->parent_data)
  1356. __irq_domain_deactivate_irq(irq_data->parent_data);
  1357. }
  1358. }
  1359. static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
  1360. {
  1361. int ret = 0;
  1362. if (irqd && irqd->domain) {
  1363. struct irq_domain *domain = irqd->domain;
  1364. if (irqd->parent_data)
  1365. ret = __irq_domain_activate_irq(irqd->parent_data,
  1366. reserve);
  1367. if (!ret && domain->ops->activate) {
  1368. ret = domain->ops->activate(domain, irqd, reserve);
  1369. /* Rollback in case of error */
  1370. if (ret && irqd->parent_data)
  1371. __irq_domain_deactivate_irq(irqd->parent_data);
  1372. }
  1373. }
  1374. return ret;
  1375. }
  1376. /**
  1377. * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  1378. * interrupt
  1379. * @irq_data: Outermost irq_data associated with interrupt
  1380. * @reserve: If set only reserve an interrupt vector instead of assigning one
  1381. *
  1382. * This is the second step to call domain_ops->activate to program interrupt
  1383. * controllers, so the interrupt could actually get delivered.
  1384. */
  1385. int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
  1386. {
  1387. int ret = 0;
  1388. if (!irqd_is_activated(irq_data))
  1389. ret = __irq_domain_activate_irq(irq_data, reserve);
  1390. if (!ret)
  1391. irqd_set_activated(irq_data);
  1392. return ret;
  1393. }
  1394. /**
  1395. * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
  1396. * deactivate interrupt
  1397. * @irq_data: outermost irq_data associated with interrupt
  1398. *
  1399. * It calls domain_ops->deactivate to program interrupt controllers to disable
  1400. * interrupt delivery.
  1401. */
  1402. void irq_domain_deactivate_irq(struct irq_data *irq_data)
  1403. {
  1404. if (irqd_is_activated(irq_data)) {
  1405. __irq_domain_deactivate_irq(irq_data);
  1406. irqd_clr_activated(irq_data);
  1407. }
  1408. }
  1409. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1410. {
  1411. /* Hierarchy irq_domains must implement callback alloc() */
  1412. if (domain->ops->alloc)
  1413. domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
  1414. }
  1415. /**
  1416. * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
  1417. * parent has MSI remapping support
  1418. * @domain: domain pointer
  1419. */
  1420. bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
  1421. {
  1422. for (; domain; domain = domain->parent) {
  1423. if (irq_domain_is_msi_remap(domain))
  1424. return true;
  1425. }
  1426. return false;
  1427. }
  1428. #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1429. /**
  1430. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  1431. * @domain: domain to match
  1432. * @virq: IRQ number to get irq_data
  1433. */
  1434. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  1435. unsigned int virq)
  1436. {
  1437. struct irq_data *irq_data = irq_get_irq_data(virq);
  1438. return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
  1439. }
  1440. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  1441. /**
  1442. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1443. * @domain: Interrupt domain to match
  1444. * @virq: IRQ number
  1445. * @hwirq: The hardware interrupt number
  1446. * @chip: The associated interrupt chip
  1447. * @chip_data: The associated interrupt chip data
  1448. * @handler: The interrupt flow handler
  1449. * @handler_data: The interrupt flow handler data
  1450. * @handler_name: The interrupt handler name
  1451. */
  1452. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1453. irq_hw_number_t hwirq, struct irq_chip *chip,
  1454. void *chip_data, irq_flow_handler_t handler,
  1455. void *handler_data, const char *handler_name)
  1456. {
  1457. irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
  1458. irq_set_chip_data(virq, chip_data);
  1459. irq_set_handler_data(virq, handler_data);
  1460. }
  1461. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1462. {
  1463. }
  1464. #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1465. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  1466. static struct dentry *domain_dir;
  1467. static void
  1468. irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
  1469. {
  1470. seq_printf(m, "%*sname: %s\n", ind, "", d->name);
  1471. seq_printf(m, "%*ssize: %u\n", ind + 1, "",
  1472. d->revmap_size + d->revmap_direct_max_irq);
  1473. seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
  1474. seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
  1475. if (d->ops && d->ops->debug_show)
  1476. d->ops->debug_show(m, d, NULL, ind + 1);
  1477. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1478. if (!d->parent)
  1479. return;
  1480. seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
  1481. irq_domain_debug_show_one(m, d->parent, ind + 4);
  1482. #endif
  1483. }
  1484. static int irq_domain_debug_show(struct seq_file *m, void *p)
  1485. {
  1486. struct irq_domain *d = m->private;
  1487. /* Default domain? Might be NULL */
  1488. if (!d) {
  1489. if (!irq_default_domain)
  1490. return 0;
  1491. d = irq_default_domain;
  1492. }
  1493. irq_domain_debug_show_one(m, d, 0);
  1494. return 0;
  1495. }
  1496. DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
  1497. static void debugfs_add_domain_dir(struct irq_domain *d)
  1498. {
  1499. if (!d->name || !domain_dir || d->debugfs_file)
  1500. return;
  1501. d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
  1502. &irq_domain_debug_fops);
  1503. }
  1504. static void debugfs_remove_domain_dir(struct irq_domain *d)
  1505. {
  1506. debugfs_remove(d->debugfs_file);
  1507. }
  1508. void __init irq_domain_debugfs_init(struct dentry *root)
  1509. {
  1510. struct irq_domain *d;
  1511. domain_dir = debugfs_create_dir("domains", root);
  1512. if (!domain_dir)
  1513. return;
  1514. debugfs_create_file("default", 0444, domain_dir, NULL,
  1515. &irq_domain_debug_fops);
  1516. mutex_lock(&irq_domain_mutex);
  1517. list_for_each_entry(d, &irq_domain_list, link)
  1518. debugfs_add_domain_dir(d);
  1519. mutex_unlock(&irq_domain_mutex);
  1520. }
  1521. #endif