amdgpu_irq.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. /**
  29. * DOC: Interrupt Handling
  30. *
  31. * Interrupts generated within GPU hardware raise interrupt requests that are
  32. * passed to amdgpu IRQ handler which is responsible for detecting source and
  33. * type of the interrupt and dispatching matching handlers. If handling an
  34. * interrupt requires calling kernel functions that may sleep processing is
  35. * dispatched to work handlers.
  36. *
  37. * If MSI functionality is not disabled by module parameter then MSI
  38. * support will be enabled.
  39. *
  40. * For GPU interrupt sources that may be driven by another driver, IRQ domain
  41. * support is used (with mapping between virtual and hardware IRQs).
  42. */
  43. #include <linux/irq.h>
  44. #include <drm/drmP.h>
  45. #include <drm/drm_crtc_helper.h>
  46. #include <drm/amdgpu_drm.h>
  47. #include "amdgpu.h"
  48. #include "amdgpu_ih.h"
  49. #include "atom.h"
  50. #include "amdgpu_connectors.h"
  51. #include "amdgpu_trace.h"
  52. #include <linux/pm_runtime.h>
  53. #ifdef CONFIG_DRM_AMD_DC
  54. #include "amdgpu_dm_irq.h"
  55. #endif
  56. #define AMDGPU_WAIT_IDLE_TIMEOUT 200
  57. /**
  58. * amdgpu_hotplug_work_func - work handler for display hotplug event
  59. *
  60. * @work: work struct pointer
  61. *
  62. * This is the hotplug event work handler (all ASICs).
  63. * The work gets scheduled from the IRQ handler if there
  64. * was a hotplug interrupt. It walks through the connector table
  65. * and calls hotplug handler for each connector. After this, it sends
  66. * a DRM hotplug event to alert userspace.
  67. *
  68. * This design approach is required in order to defer hotplug event handling
  69. * from the IRQ handler to a work handler because hotplug handler has to use
  70. * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
  71. * sleep).
  72. */
  73. static void amdgpu_hotplug_work_func(struct work_struct *work)
  74. {
  75. struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
  76. hotplug_work);
  77. struct drm_device *dev = adev->ddev;
  78. struct drm_mode_config *mode_config = &dev->mode_config;
  79. struct drm_connector *connector;
  80. mutex_lock(&mode_config->mutex);
  81. list_for_each_entry(connector, &mode_config->connector_list, head)
  82. amdgpu_connector_hotplug(connector);
  83. mutex_unlock(&mode_config->mutex);
  84. /* Just fire off a uevent and let userspace tell us what to do */
  85. drm_helper_hpd_irq_event(dev);
  86. }
  87. /**
  88. * amdgpu_irq_reset_work_func - execute GPU reset
  89. *
  90. * @work: work struct pointer
  91. *
  92. * Execute scheduled GPU reset (Cayman+).
  93. * This function is called when the IRQ handler thinks we need a GPU reset.
  94. */
  95. static void amdgpu_irq_reset_work_func(struct work_struct *work)
  96. {
  97. struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
  98. reset_work);
  99. if (!amdgpu_sriov_vf(adev))
  100. amdgpu_device_gpu_recover(adev, NULL, false);
  101. }
  102. /**
  103. * amdgpu_irq_disable_all - disable *all* interrupts
  104. *
  105. * @adev: amdgpu device pointer
  106. *
  107. * Disable all types of interrupts from all sources.
  108. */
  109. void amdgpu_irq_disable_all(struct amdgpu_device *adev)
  110. {
  111. unsigned long irqflags;
  112. unsigned i, j, k;
  113. int r;
  114. spin_lock_irqsave(&adev->irq.lock, irqflags);
  115. for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
  116. if (!adev->irq.client[i].sources)
  117. continue;
  118. for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
  119. struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
  120. if (!src || !src->funcs->set || !src->num_types)
  121. continue;
  122. for (k = 0; k < src->num_types; ++k) {
  123. atomic_set(&src->enabled_types[k], 0);
  124. r = src->funcs->set(adev, src, k,
  125. AMDGPU_IRQ_STATE_DISABLE);
  126. if (r)
  127. DRM_ERROR("error disabling interrupt (%d)\n",
  128. r);
  129. }
  130. }
  131. }
  132. spin_unlock_irqrestore(&adev->irq.lock, irqflags);
  133. }
  134. /**
  135. * amdgpu_irq_handler - IRQ handler
  136. *
  137. * @irq: IRQ number (unused)
  138. * @arg: pointer to DRM device
  139. *
  140. * IRQ handler for amdgpu driver (all ASICs).
  141. *
  142. * Returns:
  143. * result of handling the IRQ, as defined by &irqreturn_t
  144. */
  145. irqreturn_t amdgpu_irq_handler(int irq, void *arg)
  146. {
  147. struct drm_device *dev = (struct drm_device *) arg;
  148. struct amdgpu_device *adev = dev->dev_private;
  149. irqreturn_t ret;
  150. ret = amdgpu_ih_process(adev);
  151. if (ret == IRQ_HANDLED)
  152. pm_runtime_mark_last_busy(dev->dev);
  153. return ret;
  154. }
  155. /**
  156. * amdgpu_msi_ok - check whether MSI functionality is enabled
  157. *
  158. * @adev: amdgpu device pointer (unused)
  159. *
  160. * Checks whether MSI functionality has been disabled via module parameter
  161. * (all ASICs).
  162. *
  163. * Returns:
  164. * *true* if MSIs are allowed to be enabled or *false* otherwise
  165. */
  166. static bool amdgpu_msi_ok(struct amdgpu_device *adev)
  167. {
  168. if (amdgpu_msi == 1)
  169. return true;
  170. else if (amdgpu_msi == 0)
  171. return false;
  172. return true;
  173. }
  174. /**
  175. * amdgpu_irq_init - initialize interrupt handling
  176. *
  177. * @adev: amdgpu device pointer
  178. *
  179. * Sets up work functions for hotplug and reset interrupts, enables MSI
  180. * functionality, initializes vblank, hotplug and reset interrupt handling.
  181. *
  182. * Returns:
  183. * 0 on success or error code on failure
  184. */
  185. int amdgpu_irq_init(struct amdgpu_device *adev)
  186. {
  187. int r = 0;
  188. spin_lock_init(&adev->irq.lock);
  189. /* Enable MSI if not disabled by module parameter */
  190. adev->irq.msi_enabled = false;
  191. if (amdgpu_msi_ok(adev)) {
  192. int ret = pci_enable_msi(adev->pdev);
  193. if (!ret) {
  194. adev->irq.msi_enabled = true;
  195. dev_dbg(adev->dev, "amdgpu: using MSI.\n");
  196. }
  197. }
  198. if (!amdgpu_device_has_dc_support(adev)) {
  199. if (!adev->enable_virtual_display)
  200. /* Disable vblank IRQs aggressively for power-saving */
  201. /* XXX: can this be enabled for DC? */
  202. adev->ddev->vblank_disable_immediate = true;
  203. r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
  204. if (r)
  205. return r;
  206. /* Pre-DCE11 */
  207. INIT_WORK(&adev->hotplug_work,
  208. amdgpu_hotplug_work_func);
  209. }
  210. INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
  211. adev->irq.installed = true;
  212. r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
  213. if (r) {
  214. adev->irq.installed = false;
  215. if (!amdgpu_device_has_dc_support(adev))
  216. flush_work(&adev->hotplug_work);
  217. cancel_work_sync(&adev->reset_work);
  218. return r;
  219. }
  220. adev->ddev->max_vblank_count = 0x00ffffff;
  221. DRM_DEBUG("amdgpu: irq initialized.\n");
  222. return 0;
  223. }
  224. /**
  225. * amdgpu_irq_fini - shut down interrupt handling
  226. *
  227. * @adev: amdgpu device pointer
  228. *
  229. * Tears down work functions for hotplug and reset interrupts, disables MSI
  230. * functionality, shuts down vblank, hotplug and reset interrupt handling,
  231. * turns off interrupts from all sources (all ASICs).
  232. */
  233. void amdgpu_irq_fini(struct amdgpu_device *adev)
  234. {
  235. unsigned i, j;
  236. if (adev->irq.installed) {
  237. drm_irq_uninstall(adev->ddev);
  238. adev->irq.installed = false;
  239. if (adev->irq.msi_enabled)
  240. pci_disable_msi(adev->pdev);
  241. if (!amdgpu_device_has_dc_support(adev))
  242. flush_work(&adev->hotplug_work);
  243. cancel_work_sync(&adev->reset_work);
  244. }
  245. for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
  246. if (!adev->irq.client[i].sources)
  247. continue;
  248. for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
  249. struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
  250. if (!src)
  251. continue;
  252. kfree(src->enabled_types);
  253. src->enabled_types = NULL;
  254. if (src->data) {
  255. kfree(src->data);
  256. kfree(src);
  257. adev->irq.client[i].sources[j] = NULL;
  258. }
  259. }
  260. kfree(adev->irq.client[i].sources);
  261. adev->irq.client[i].sources = NULL;
  262. }
  263. }
  264. /**
  265. * amdgpu_irq_add_id - register IRQ source
  266. *
  267. * @adev: amdgpu device pointer
  268. * @client_id: client id
  269. * @src_id: source id
  270. * @source: IRQ source pointer
  271. *
  272. * Registers IRQ source on a client.
  273. *
  274. * Returns:
  275. * 0 on success or error code otherwise
  276. */
  277. int amdgpu_irq_add_id(struct amdgpu_device *adev,
  278. unsigned client_id, unsigned src_id,
  279. struct amdgpu_irq_src *source)
  280. {
  281. if (client_id >= AMDGPU_IH_CLIENTID_MAX)
  282. return -EINVAL;
  283. if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
  284. return -EINVAL;
  285. if (!source->funcs)
  286. return -EINVAL;
  287. if (!adev->irq.client[client_id].sources) {
  288. adev->irq.client[client_id].sources =
  289. kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
  290. sizeof(struct amdgpu_irq_src *),
  291. GFP_KERNEL);
  292. if (!adev->irq.client[client_id].sources)
  293. return -ENOMEM;
  294. }
  295. if (adev->irq.client[client_id].sources[src_id] != NULL)
  296. return -EINVAL;
  297. if (source->num_types && !source->enabled_types) {
  298. atomic_t *types;
  299. types = kcalloc(source->num_types, sizeof(atomic_t),
  300. GFP_KERNEL);
  301. if (!types)
  302. return -ENOMEM;
  303. source->enabled_types = types;
  304. }
  305. adev->irq.client[client_id].sources[src_id] = source;
  306. return 0;
  307. }
  308. /**
  309. * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
  310. *
  311. * @adev: amdgpu device pointer
  312. * @entry: interrupt vector pointer
  313. *
  314. * Dispatches IRQ to IP blocks.
  315. */
  316. void amdgpu_irq_dispatch(struct amdgpu_device *adev,
  317. struct amdgpu_iv_entry *entry)
  318. {
  319. unsigned client_id = entry->client_id;
  320. unsigned src_id = entry->src_id;
  321. struct amdgpu_irq_src *src;
  322. int r;
  323. trace_amdgpu_iv(entry);
  324. if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
  325. DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
  326. return;
  327. }
  328. if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
  329. DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
  330. return;
  331. }
  332. if (adev->irq.virq[src_id]) {
  333. generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
  334. } else {
  335. if (!adev->irq.client[client_id].sources) {
  336. DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
  337. client_id, src_id);
  338. return;
  339. }
  340. src = adev->irq.client[client_id].sources[src_id];
  341. if (!src) {
  342. DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
  343. return;
  344. }
  345. r = src->funcs->process(adev, src, entry);
  346. if (r)
  347. DRM_ERROR("error processing interrupt (%d)\n", r);
  348. }
  349. }
  350. /**
  351. * amdgpu_irq_update - update hardware interrupt state
  352. *
  353. * @adev: amdgpu device pointer
  354. * @src: interrupt source pointer
  355. * @type: type of interrupt
  356. *
  357. * Updates interrupt state for the specific source (all ASICs).
  358. */
  359. int amdgpu_irq_update(struct amdgpu_device *adev,
  360. struct amdgpu_irq_src *src, unsigned type)
  361. {
  362. unsigned long irqflags;
  363. enum amdgpu_interrupt_state state;
  364. int r;
  365. spin_lock_irqsave(&adev->irq.lock, irqflags);
  366. /* We need to determine after taking the lock, otherwise
  367. we might disable just enabled interrupts again */
  368. if (amdgpu_irq_enabled(adev, src, type))
  369. state = AMDGPU_IRQ_STATE_ENABLE;
  370. else
  371. state = AMDGPU_IRQ_STATE_DISABLE;
  372. r = src->funcs->set(adev, src, type, state);
  373. spin_unlock_irqrestore(&adev->irq.lock, irqflags);
  374. return r;
  375. }
  376. /**
  377. * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
  378. *
  379. * @adev: amdgpu device pointer
  380. *
  381. * Updates state of all types of interrupts on all sources on resume after
  382. * reset.
  383. */
  384. void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
  385. {
  386. int i, j, k;
  387. for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
  388. if (!adev->irq.client[i].sources)
  389. continue;
  390. for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
  391. struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
  392. if (!src)
  393. continue;
  394. for (k = 0; k < src->num_types; k++)
  395. amdgpu_irq_update(adev, src, k);
  396. }
  397. }
  398. }
  399. /**
  400. * amdgpu_irq_get - enable interrupt
  401. *
  402. * @adev: amdgpu device pointer
  403. * @src: interrupt source pointer
  404. * @type: type of interrupt
  405. *
  406. * Enables specified type of interrupt on the specified source (all ASICs).
  407. *
  408. * Returns:
  409. * 0 on success or error code otherwise
  410. */
  411. int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
  412. unsigned type)
  413. {
  414. if (!adev->ddev->irq_enabled)
  415. return -ENOENT;
  416. if (type >= src->num_types)
  417. return -EINVAL;
  418. if (!src->enabled_types || !src->funcs->set)
  419. return -EINVAL;
  420. if (atomic_inc_return(&src->enabled_types[type]) == 1)
  421. return amdgpu_irq_update(adev, src, type);
  422. return 0;
  423. }
  424. /**
  425. * amdgpu_irq_put - disable interrupt
  426. *
  427. * @adev: amdgpu device pointer
  428. * @src: interrupt source pointer
  429. * @type: type of interrupt
  430. *
  431. * Enables specified type of interrupt on the specified source (all ASICs).
  432. *
  433. * Returns:
  434. * 0 on success or error code otherwise
  435. */
  436. int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
  437. unsigned type)
  438. {
  439. if (!adev->ddev->irq_enabled)
  440. return -ENOENT;
  441. if (type >= src->num_types)
  442. return -EINVAL;
  443. if (!src->enabled_types || !src->funcs->set)
  444. return -EINVAL;
  445. if (atomic_dec_and_test(&src->enabled_types[type]))
  446. return amdgpu_irq_update(adev, src, type);
  447. return 0;
  448. }
  449. /**
  450. * amdgpu_irq_enabled - check whether interrupt is enabled or not
  451. *
  452. * @adev: amdgpu device pointer
  453. * @src: interrupt source pointer
  454. * @type: type of interrupt
  455. *
  456. * Checks whether the given type of interrupt is enabled on the given source.
  457. *
  458. * Returns:
  459. * *true* if interrupt is enabled, *false* if interrupt is disabled or on
  460. * invalid parameters
  461. */
  462. bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
  463. unsigned type)
  464. {
  465. if (!adev->ddev->irq_enabled)
  466. return false;
  467. if (type >= src->num_types)
  468. return false;
  469. if (!src->enabled_types || !src->funcs->set)
  470. return false;
  471. return !!atomic_read(&src->enabled_types[type]);
  472. }
  473. /* XXX: Generic IRQ handling */
  474. static void amdgpu_irq_mask(struct irq_data *irqd)
  475. {
  476. /* XXX */
  477. }
  478. static void amdgpu_irq_unmask(struct irq_data *irqd)
  479. {
  480. /* XXX */
  481. }
  482. /* amdgpu hardware interrupt chip descriptor */
  483. static struct irq_chip amdgpu_irq_chip = {
  484. .name = "amdgpu-ih",
  485. .irq_mask = amdgpu_irq_mask,
  486. .irq_unmask = amdgpu_irq_unmask,
  487. };
  488. /**
  489. * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
  490. *
  491. * @d: amdgpu IRQ domain pointer (unused)
  492. * @irq: virtual IRQ number
  493. * @hwirq: hardware irq number
  494. *
  495. * Current implementation assigns simple interrupt handler to the given virtual
  496. * IRQ.
  497. *
  498. * Returns:
  499. * 0 on success or error code otherwise
  500. */
  501. static int amdgpu_irqdomain_map(struct irq_domain *d,
  502. unsigned int irq, irq_hw_number_t hwirq)
  503. {
  504. if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
  505. return -EPERM;
  506. irq_set_chip_and_handler(irq,
  507. &amdgpu_irq_chip, handle_simple_irq);
  508. return 0;
  509. }
  510. /* Implementation of methods for amdgpu IRQ domain */
  511. static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
  512. .map = amdgpu_irqdomain_map,
  513. };
  514. /**
  515. * amdgpu_irq_add_domain - create a linear IRQ domain
  516. *
  517. * @adev: amdgpu device pointer
  518. *
  519. * Creates an IRQ domain for GPU interrupt sources
  520. * that may be driven by another driver (e.g., ACP).
  521. *
  522. * Returns:
  523. * 0 on success or error code otherwise
  524. */
  525. int amdgpu_irq_add_domain(struct amdgpu_device *adev)
  526. {
  527. adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
  528. &amdgpu_hw_irqdomain_ops, adev);
  529. if (!adev->irq.domain) {
  530. DRM_ERROR("GPU irq add domain failed\n");
  531. return -ENODEV;
  532. }
  533. return 0;
  534. }
  535. /**
  536. * amdgpu_irq_remove_domain - remove the IRQ domain
  537. *
  538. * @adev: amdgpu device pointer
  539. *
  540. * Removes the IRQ domain for GPU interrupt sources
  541. * that may be driven by another driver (e.g., ACP).
  542. */
  543. void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
  544. {
  545. if (adev->irq.domain) {
  546. irq_domain_remove(adev->irq.domain);
  547. adev->irq.domain = NULL;
  548. }
  549. }
  550. /**
  551. * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
  552. *
  553. * @adev: amdgpu device pointer
  554. * @src_id: IH source id
  555. *
  556. * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
  557. * Use this for components that generate a GPU interrupt, but are driven
  558. * by a different driver (e.g., ACP).
  559. *
  560. * Returns:
  561. * Linux IRQ
  562. */
  563. unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
  564. {
  565. adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
  566. return adev->irq.virq[src_id];
  567. }