msi_ia64.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MSI hooks for standard x86 apic
  4. */
  5. #include <linux/pci.h>
  6. #include <linux/irq.h>
  7. #include <linux/msi.h>
  8. #include <linux/dmar.h>
  9. #include <asm/smp.h>
  10. #include <asm/msidef.h>
  11. static struct irq_chip ia64_msi_chip;
  12. #ifdef CONFIG_SMP
  13. static int ia64_set_msi_irq_affinity(struct irq_data *idata,
  14. const cpumask_t *cpu_mask, bool force)
  15. {
  16. struct msi_msg msg;
  17. u32 addr, data;
  18. int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
  19. unsigned int irq = idata->irq;
  20. if (irq_prepare_move(irq, cpu))
  21. return -1;
  22. __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
  23. addr = msg.address_lo;
  24. addr &= MSI_ADDR_DEST_ID_MASK;
  25. addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  26. msg.address_lo = addr;
  27. data = msg.data;
  28. data &= MSI_DATA_VECTOR_MASK;
  29. data |= MSI_DATA_VECTOR(irq_to_vector(irq));
  30. msg.data = data;
  31. pci_write_msi_msg(irq, &msg);
  32. cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
  33. return 0;
  34. }
  35. #endif /* CONFIG_SMP */
  36. int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  37. {
  38. struct msi_msg msg;
  39. unsigned long dest_phys_id;
  40. int irq, vector;
  41. irq = create_irq();
  42. if (irq < 0)
  43. return irq;
  44. irq_set_msi_desc(irq, desc);
  45. dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
  46. cpu_online_mask));
  47. vector = irq_to_vector(irq);
  48. msg.address_hi = 0;
  49. msg.address_lo =
  50. MSI_ADDR_HEADER |
  51. MSI_ADDR_DEST_MODE_PHYS |
  52. MSI_ADDR_REDIRECTION_CPU |
  53. MSI_ADDR_DEST_ID_CPU(dest_phys_id);
  54. msg.data =
  55. MSI_DATA_TRIGGER_EDGE |
  56. MSI_DATA_LEVEL_ASSERT |
  57. MSI_DATA_DELIVERY_FIXED |
  58. MSI_DATA_VECTOR(vector);
  59. pci_write_msi_msg(irq, &msg);
  60. irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
  61. return 0;
  62. }
  63. void ia64_teardown_msi_irq(unsigned int irq)
  64. {
  65. destroy_irq(irq);
  66. }
  67. static void ia64_ack_msi_irq(struct irq_data *data)
  68. {
  69. irq_complete_move(data->irq);
  70. irq_move_irq(data);
  71. ia64_eoi();
  72. }
  73. static int ia64_msi_retrigger_irq(struct irq_data *data)
  74. {
  75. unsigned int vector = irq_to_vector(data->irq);
  76. ia64_resend_irq(vector);
  77. return 1;
  78. }
  79. /*
  80. * Generic ops used on most IA64 platforms.
  81. */
  82. static struct irq_chip ia64_msi_chip = {
  83. .name = "PCI-MSI",
  84. .irq_mask = pci_msi_mask_irq,
  85. .irq_unmask = pci_msi_unmask_irq,
  86. .irq_ack = ia64_ack_msi_irq,
  87. #ifdef CONFIG_SMP
  88. .irq_set_affinity = ia64_set_msi_irq_affinity,
  89. #endif
  90. .irq_retrigger = ia64_msi_retrigger_irq,
  91. };
  92. int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  93. {
  94. if (platform_setup_msi_irq)
  95. return platform_setup_msi_irq(pdev, desc);
  96. return ia64_setup_msi_irq(pdev, desc);
  97. }
  98. void arch_teardown_msi_irq(unsigned int irq)
  99. {
  100. if (platform_teardown_msi_irq)
  101. return platform_teardown_msi_irq(irq);
  102. return ia64_teardown_msi_irq(irq);
  103. }
  104. #ifdef CONFIG_INTEL_IOMMU
  105. #ifdef CONFIG_SMP
  106. static int dmar_msi_set_affinity(struct irq_data *data,
  107. const struct cpumask *mask, bool force)
  108. {
  109. unsigned int irq = data->irq;
  110. struct irq_cfg *cfg = irq_cfg + irq;
  111. struct msi_msg msg;
  112. int cpu = cpumask_first_and(mask, cpu_online_mask);
  113. if (irq_prepare_move(irq, cpu))
  114. return -1;
  115. dmar_msi_read(irq, &msg);
  116. msg.data &= ~MSI_DATA_VECTOR_MASK;
  117. msg.data |= MSI_DATA_VECTOR(cfg->vector);
  118. msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
  119. msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  120. dmar_msi_write(irq, &msg);
  121. cpumask_copy(irq_data_get_affinity_mask(data), mask);
  122. return 0;
  123. }
  124. #endif /* CONFIG_SMP */
  125. static struct irq_chip dmar_msi_type = {
  126. .name = "DMAR_MSI",
  127. .irq_unmask = dmar_msi_unmask,
  128. .irq_mask = dmar_msi_mask,
  129. .irq_ack = ia64_ack_msi_irq,
  130. #ifdef CONFIG_SMP
  131. .irq_set_affinity = dmar_msi_set_affinity,
  132. #endif
  133. .irq_retrigger = ia64_msi_retrigger_irq,
  134. };
  135. static void
  136. msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
  137. {
  138. struct irq_cfg *cfg = irq_cfg + irq;
  139. unsigned dest;
  140. dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
  141. cpu_online_mask));
  142. msg->address_hi = 0;
  143. msg->address_lo =
  144. MSI_ADDR_HEADER |
  145. MSI_ADDR_DEST_MODE_PHYS |
  146. MSI_ADDR_REDIRECTION_CPU |
  147. MSI_ADDR_DEST_ID_CPU(dest);
  148. msg->data =
  149. MSI_DATA_TRIGGER_EDGE |
  150. MSI_DATA_LEVEL_ASSERT |
  151. MSI_DATA_DELIVERY_FIXED |
  152. MSI_DATA_VECTOR(cfg->vector);
  153. }
  154. int dmar_alloc_hwirq(int id, int node, void *arg)
  155. {
  156. int irq;
  157. struct msi_msg msg;
  158. irq = create_irq();
  159. if (irq > 0) {
  160. irq_set_handler_data(irq, arg);
  161. irq_set_chip_and_handler_name(irq, &dmar_msi_type,
  162. handle_edge_irq, "edge");
  163. msi_compose_msg(NULL, irq, &msg);
  164. dmar_msi_write(irq, &msg);
  165. }
  166. return irq;
  167. }
  168. void dmar_free_hwirq(int irq)
  169. {
  170. irq_set_handler_data(irq, NULL);
  171. destroy_irq(irq);
  172. }
  173. #endif /* CONFIG_INTEL_IOMMU */