ehv_pic.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * Driver for ePAPR Embedded Hypervisor PIC
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Author: Ashish Kalra <ashish.kalra@freescale.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public License
  9. * version 2. This program is licensed "as is" without any warranty of any
  10. * kind, whether express or implied.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/irq.h>
  16. #include <linux/smp.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/of.h>
  21. #include <linux/of_address.h>
  22. #include <asm/io.h>
  23. #include <asm/irq.h>
  24. #include <asm/smp.h>
  25. #include <asm/machdep.h>
  26. #include <asm/ehv_pic.h>
  27. #include <asm/fsl_hcalls.h>
  28. static struct ehv_pic *global_ehv_pic;
  29. static DEFINE_SPINLOCK(ehv_pic_lock);
  30. static u32 hwirq_intspec[NR_EHV_PIC_INTS];
  31. static u32 __iomem *mpic_percpu_base_vaddr;
  32. #define IRQ_TYPE_MPIC_DIRECT 4
  33. #define MPIC_EOI 0x00B0
  34. /*
  35. * Linux descriptor level callbacks
  36. */
  37. void ehv_pic_unmask_irq(struct irq_data *d)
  38. {
  39. unsigned int src = virq_to_hw(d->irq);
  40. ev_int_set_mask(src, 0);
  41. }
  42. void ehv_pic_mask_irq(struct irq_data *d)
  43. {
  44. unsigned int src = virq_to_hw(d->irq);
  45. ev_int_set_mask(src, 1);
  46. }
  47. void ehv_pic_end_irq(struct irq_data *d)
  48. {
  49. unsigned int src = virq_to_hw(d->irq);
  50. ev_int_eoi(src);
  51. }
  52. void ehv_pic_direct_end_irq(struct irq_data *d)
  53. {
  54. out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
  55. }
  56. int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
  57. bool force)
  58. {
  59. unsigned int src = virq_to_hw(d->irq);
  60. unsigned int config, prio, cpu_dest;
  61. int cpuid = irq_choose_cpu(dest);
  62. unsigned long flags;
  63. spin_lock_irqsave(&ehv_pic_lock, flags);
  64. ev_int_get_config(src, &config, &prio, &cpu_dest);
  65. ev_int_set_config(src, config, prio, cpuid);
  66. spin_unlock_irqrestore(&ehv_pic_lock, flags);
  67. return IRQ_SET_MASK_OK;
  68. }
  69. static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
  70. {
  71. /* Now convert sense value */
  72. switch (type & IRQ_TYPE_SENSE_MASK) {
  73. case IRQ_TYPE_EDGE_RISING:
  74. return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
  75. EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
  76. case IRQ_TYPE_EDGE_FALLING:
  77. case IRQ_TYPE_EDGE_BOTH:
  78. return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
  79. EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
  80. case IRQ_TYPE_LEVEL_HIGH:
  81. return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
  82. EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
  83. case IRQ_TYPE_LEVEL_LOW:
  84. default:
  85. return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
  86. EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
  87. }
  88. }
  89. int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
  90. {
  91. unsigned int src = virq_to_hw(d->irq);
  92. unsigned int vecpri, vold, vnew, prio, cpu_dest;
  93. unsigned long flags;
  94. if (flow_type == IRQ_TYPE_NONE)
  95. flow_type = IRQ_TYPE_LEVEL_LOW;
  96. irqd_set_trigger_type(d, flow_type);
  97. vecpri = ehv_pic_type_to_vecpri(flow_type);
  98. spin_lock_irqsave(&ehv_pic_lock, flags);
  99. ev_int_get_config(src, &vold, &prio, &cpu_dest);
  100. vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
  101. EHV_PIC_INFO(VECPRI_SENSE_MASK));
  102. vnew |= vecpri;
  103. /*
  104. * TODO : Add specific interface call for platform to set
  105. * individual interrupt priorities.
  106. * platform currently using static/default priority for all ints
  107. */
  108. prio = 8;
  109. ev_int_set_config(src, vecpri, prio, cpu_dest);
  110. spin_unlock_irqrestore(&ehv_pic_lock, flags);
  111. return IRQ_SET_MASK_OK_NOCOPY;
  112. }
  113. static struct irq_chip ehv_pic_irq_chip = {
  114. .irq_mask = ehv_pic_mask_irq,
  115. .irq_unmask = ehv_pic_unmask_irq,
  116. .irq_eoi = ehv_pic_end_irq,
  117. .irq_set_type = ehv_pic_set_irq_type,
  118. };
  119. static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
  120. .irq_mask = ehv_pic_mask_irq,
  121. .irq_unmask = ehv_pic_unmask_irq,
  122. .irq_eoi = ehv_pic_direct_end_irq,
  123. .irq_set_type = ehv_pic_set_irq_type,
  124. };
  125. /* Return an interrupt vector or 0 if no interrupt is pending. */
  126. unsigned int ehv_pic_get_irq(void)
  127. {
  128. int irq;
  129. BUG_ON(global_ehv_pic == NULL);
  130. if (global_ehv_pic->coreint_flag)
  131. irq = mfspr(SPRN_EPR); /* if core int mode */
  132. else
  133. ev_int_iack(0, &irq); /* legacy mode */
  134. if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
  135. return 0;
  136. /*
  137. * this will also setup revmap[] in the slow path for the first
  138. * time, next calls will always use fast path by indexing revmap
  139. */
  140. return irq_linear_revmap(global_ehv_pic->irqhost, irq);
  141. }
  142. static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
  143. enum irq_domain_bus_token bus_token)
  144. {
  145. /* Exact match, unless ehv_pic node is NULL */
  146. struct device_node *of_node = irq_domain_get_of_node(h);
  147. return of_node == NULL || of_node == node;
  148. }
  149. static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
  150. irq_hw_number_t hw)
  151. {
  152. struct ehv_pic *ehv_pic = h->host_data;
  153. struct irq_chip *chip;
  154. /* Default chip */
  155. chip = &ehv_pic->hc_irq;
  156. if (mpic_percpu_base_vaddr)
  157. if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
  158. chip = &ehv_pic_direct_eoi_irq_chip;
  159. irq_set_chip_data(virq, chip);
  160. /*
  161. * using handle_fasteoi_irq as our irq handler, this will
  162. * only call the eoi callback and suitable for the MPIC
  163. * controller which set ISR/IPR automatically and clear the
  164. * highest priority active interrupt in ISR/IPR when we do
  165. * a specific eoi
  166. */
  167. irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
  168. /* Set default irq type */
  169. irq_set_irq_type(virq, IRQ_TYPE_NONE);
  170. return 0;
  171. }
  172. static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
  173. const u32 *intspec, unsigned int intsize,
  174. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  175. {
  176. /*
  177. * interrupt sense values coming from the guest device tree
  178. * interrupt specifiers can have four possible sense and
  179. * level encoding information and they need to
  180. * be translated between firmware type & linux type.
  181. */
  182. static unsigned char map_of_senses_to_linux_irqtype[4] = {
  183. IRQ_TYPE_EDGE_FALLING,
  184. IRQ_TYPE_EDGE_RISING,
  185. IRQ_TYPE_LEVEL_LOW,
  186. IRQ_TYPE_LEVEL_HIGH,
  187. };
  188. *out_hwirq = intspec[0];
  189. if (intsize > 1) {
  190. hwirq_intspec[intspec[0]] = intspec[1];
  191. *out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
  192. ~IRQ_TYPE_MPIC_DIRECT];
  193. } else {
  194. *out_flags = IRQ_TYPE_NONE;
  195. }
  196. return 0;
  197. }
  198. static const struct irq_domain_ops ehv_pic_host_ops = {
  199. .match = ehv_pic_host_match,
  200. .map = ehv_pic_host_map,
  201. .xlate = ehv_pic_host_xlate,
  202. };
  203. void __init ehv_pic_init(void)
  204. {
  205. struct device_node *np, *np2;
  206. struct ehv_pic *ehv_pic;
  207. int coreint_flag = 1;
  208. np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
  209. if (!np) {
  210. pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
  211. return;
  212. }
  213. if (!of_find_property(np, "has-external-proxy", NULL))
  214. coreint_flag = 0;
  215. ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
  216. if (!ehv_pic) {
  217. of_node_put(np);
  218. return;
  219. }
  220. ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
  221. &ehv_pic_host_ops, ehv_pic);
  222. if (!ehv_pic->irqhost) {
  223. of_node_put(np);
  224. kfree(ehv_pic);
  225. return;
  226. }
  227. np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
  228. if (np2) {
  229. mpic_percpu_base_vaddr = of_iomap(np2, 0);
  230. if (!mpic_percpu_base_vaddr)
  231. pr_err("ehv_pic_init: of_iomap failed\n");
  232. of_node_put(np2);
  233. }
  234. ehv_pic->hc_irq = ehv_pic_irq_chip;
  235. ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
  236. ehv_pic->coreint_flag = coreint_flag;
  237. global_ehv_pic = ehv_pic;
  238. irq_set_default_host(global_ehv_pic->irqhost);
  239. }