octeon-irq.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2016 Cavium, Inc.
  7. */
  8. #include <linux/of_address.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/irqdomain.h>
  11. #include <linux/bitops.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/percpu.h>
  14. #include <linux/slab.h>
  15. #include <linux/irq.h>
  16. #include <linux/smp.h>
  17. #include <linux/of.h>
  18. #include <asm/octeon/octeon.h>
  19. #include <asm/octeon/cvmx-ciu2-defs.h>
  20. #include <asm/octeon/cvmx-ciu3-defs.h>
  21. static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
  22. static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
  23. static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
  24. static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
  25. static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
  26. static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
  27. #define CIU3_MBOX_PER_CORE 10
  28. /*
  29. * The 8 most significant bits of the intsn identify the interrupt major block.
  30. * Each major block might use its own interrupt domain. Thus 256 domains are
  31. * needed.
  32. */
  33. #define MAX_CIU3_DOMAINS 256
  34. typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
  35. /* Information for each ciu3 in the system */
  36. struct octeon_ciu3_info {
  37. u64 ciu3_addr;
  38. int node;
  39. struct irq_domain *domain[MAX_CIU3_DOMAINS];
  40. octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
  41. };
  42. /* Each ciu3 in the system uses its own data (one ciu3 per node) */
  43. static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
  44. struct octeon_irq_ciu_domain_data {
  45. int num_sum; /* number of sum registers (2 or 3). */
  46. };
  47. /* Register offsets from ciu3_addr */
  48. #define CIU3_CONST 0x220
  49. #define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000)
  50. #define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
  51. #define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000)
  52. #define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
  53. #define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000)
  54. #define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000)
  55. #define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000)
  56. #define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000)
  57. static __read_mostly int octeon_irq_ciu_to_irq[8][64];
  58. struct octeon_ciu_chip_data {
  59. union {
  60. struct { /* only used for ciu3 */
  61. u64 ciu3_addr;
  62. unsigned int intsn;
  63. };
  64. struct { /* only used for ciu/ciu2 */
  65. u8 line;
  66. u8 bit;
  67. };
  68. };
  69. int gpio_line;
  70. int current_cpu; /* Next CPU expected to take this irq */
  71. int ciu_node; /* NUMA node number of the CIU */
  72. };
  73. struct octeon_core_chip_data {
  74. struct mutex core_irq_mutex;
  75. bool current_en;
  76. bool desired_en;
  77. u8 bit;
  78. };
  79. #define MIPS_CORE_IRQ_LINES 8
  80. static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
  81. static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
  82. struct irq_chip *chip,
  83. irq_flow_handler_t handler)
  84. {
  85. struct octeon_ciu_chip_data *cd;
  86. cd = kzalloc(sizeof(*cd), GFP_KERNEL);
  87. if (!cd)
  88. return -ENOMEM;
  89. irq_set_chip_and_handler(irq, chip, handler);
  90. cd->line = line;
  91. cd->bit = bit;
  92. cd->gpio_line = gpio_line;
  93. irq_set_chip_data(irq, cd);
  94. octeon_irq_ciu_to_irq[line][bit] = irq;
  95. return 0;
  96. }
  97. static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
  98. {
  99. struct irq_data *data = irq_get_irq_data(irq);
  100. struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
  101. irq_set_chip_data(irq, NULL);
  102. kfree(cd);
  103. }
  104. static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
  105. int irq, int line, int bit)
  106. {
  107. return irq_domain_associate(domain, irq, line << 6 | bit);
  108. }
  109. static int octeon_coreid_for_cpu(int cpu)
  110. {
  111. #ifdef CONFIG_SMP
  112. return cpu_logical_map(cpu);
  113. #else
  114. return cvmx_get_core_num();
  115. #endif
  116. }
  117. static int octeon_cpu_for_coreid(int coreid)
  118. {
  119. #ifdef CONFIG_SMP
  120. return cpu_number_map(coreid);
  121. #else
  122. return smp_processor_id();
  123. #endif
  124. }
  125. static void octeon_irq_core_ack(struct irq_data *data)
  126. {
  127. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  128. unsigned int bit = cd->bit;
  129. /*
  130. * We don't need to disable IRQs to make these atomic since
  131. * they are already disabled earlier in the low level
  132. * interrupt code.
  133. */
  134. clear_c0_status(0x100 << bit);
  135. /* The two user interrupts must be cleared manually. */
  136. if (bit < 2)
  137. clear_c0_cause(0x100 << bit);
  138. }
  139. static void octeon_irq_core_eoi(struct irq_data *data)
  140. {
  141. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  142. /*
  143. * We don't need to disable IRQs to make these atomic since
  144. * they are already disabled earlier in the low level
  145. * interrupt code.
  146. */
  147. set_c0_status(0x100 << cd->bit);
  148. }
  149. static void octeon_irq_core_set_enable_local(void *arg)
  150. {
  151. struct irq_data *data = arg;
  152. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  153. unsigned int mask = 0x100 << cd->bit;
  154. /*
  155. * Interrupts are already disabled, so these are atomic.
  156. */
  157. if (cd->desired_en)
  158. set_c0_status(mask);
  159. else
  160. clear_c0_status(mask);
  161. }
  162. static void octeon_irq_core_disable(struct irq_data *data)
  163. {
  164. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  165. cd->desired_en = false;
  166. }
  167. static void octeon_irq_core_enable(struct irq_data *data)
  168. {
  169. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  170. cd->desired_en = true;
  171. }
  172. static void octeon_irq_core_bus_lock(struct irq_data *data)
  173. {
  174. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  175. mutex_lock(&cd->core_irq_mutex);
  176. }
  177. static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
  178. {
  179. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  180. if (cd->desired_en != cd->current_en) {
  181. on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
  182. cd->current_en = cd->desired_en;
  183. }
  184. mutex_unlock(&cd->core_irq_mutex);
  185. }
  186. static struct irq_chip octeon_irq_chip_core = {
  187. .name = "Core",
  188. .irq_enable = octeon_irq_core_enable,
  189. .irq_disable = octeon_irq_core_disable,
  190. .irq_ack = octeon_irq_core_ack,
  191. .irq_eoi = octeon_irq_core_eoi,
  192. .irq_bus_lock = octeon_irq_core_bus_lock,
  193. .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
  194. .irq_cpu_online = octeon_irq_core_eoi,
  195. .irq_cpu_offline = octeon_irq_core_ack,
  196. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  197. };
  198. static void __init octeon_irq_init_core(void)
  199. {
  200. int i;
  201. int irq;
  202. struct octeon_core_chip_data *cd;
  203. for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
  204. cd = &octeon_irq_core_chip_data[i];
  205. cd->current_en = false;
  206. cd->desired_en = false;
  207. cd->bit = i;
  208. mutex_init(&cd->core_irq_mutex);
  209. irq = OCTEON_IRQ_SW0 + i;
  210. irq_set_chip_data(irq, cd);
  211. irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
  212. handle_percpu_irq);
  213. }
  214. }
  215. static int next_cpu_for_irq(struct irq_data *data)
  216. {
  217. #ifdef CONFIG_SMP
  218. int cpu;
  219. struct cpumask *mask = irq_data_get_affinity_mask(data);
  220. int weight = cpumask_weight(mask);
  221. struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
  222. if (weight > 1) {
  223. cpu = cd->current_cpu;
  224. for (;;) {
  225. cpu = cpumask_next(cpu, mask);
  226. if (cpu >= nr_cpu_ids) {
  227. cpu = -1;
  228. continue;
  229. } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
  230. break;
  231. }
  232. }
  233. } else if (weight == 1) {
  234. cpu = cpumask_first(mask);
  235. } else {
  236. cpu = smp_processor_id();
  237. }
  238. cd->current_cpu = cpu;
  239. return cpu;
  240. #else
  241. return smp_processor_id();
  242. #endif
  243. }
  244. static void octeon_irq_ciu_enable(struct irq_data *data)
  245. {
  246. int cpu = next_cpu_for_irq(data);
  247. int coreid = octeon_coreid_for_cpu(cpu);
  248. unsigned long *pen;
  249. unsigned long flags;
  250. struct octeon_ciu_chip_data *cd;
  251. raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  252. cd = irq_data_get_irq_chip_data(data);
  253. raw_spin_lock_irqsave(lock, flags);
  254. if (cd->line == 0) {
  255. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  256. __set_bit(cd->bit, pen);
  257. /*
  258. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  259. * enabling the irq.
  260. */
  261. wmb();
  262. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  263. } else {
  264. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  265. __set_bit(cd->bit, pen);
  266. /*
  267. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  268. * enabling the irq.
  269. */
  270. wmb();
  271. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  272. }
  273. raw_spin_unlock_irqrestore(lock, flags);
  274. }
  275. static void octeon_irq_ciu_enable_local(struct irq_data *data)
  276. {
  277. unsigned long *pen;
  278. unsigned long flags;
  279. struct octeon_ciu_chip_data *cd;
  280. raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
  281. cd = irq_data_get_irq_chip_data(data);
  282. raw_spin_lock_irqsave(lock, flags);
  283. if (cd->line == 0) {
  284. pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
  285. __set_bit(cd->bit, pen);
  286. /*
  287. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  288. * enabling the irq.
  289. */
  290. wmb();
  291. cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
  292. } else {
  293. pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
  294. __set_bit(cd->bit, pen);
  295. /*
  296. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  297. * enabling the irq.
  298. */
  299. wmb();
  300. cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
  301. }
  302. raw_spin_unlock_irqrestore(lock, flags);
  303. }
  304. static void octeon_irq_ciu_disable_local(struct irq_data *data)
  305. {
  306. unsigned long *pen;
  307. unsigned long flags;
  308. struct octeon_ciu_chip_data *cd;
  309. raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
  310. cd = irq_data_get_irq_chip_data(data);
  311. raw_spin_lock_irqsave(lock, flags);
  312. if (cd->line == 0) {
  313. pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
  314. __clear_bit(cd->bit, pen);
  315. /*
  316. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  317. * enabling the irq.
  318. */
  319. wmb();
  320. cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
  321. } else {
  322. pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
  323. __clear_bit(cd->bit, pen);
  324. /*
  325. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  326. * enabling the irq.
  327. */
  328. wmb();
  329. cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
  330. }
  331. raw_spin_unlock_irqrestore(lock, flags);
  332. }
  333. static void octeon_irq_ciu_disable_all(struct irq_data *data)
  334. {
  335. unsigned long flags;
  336. unsigned long *pen;
  337. int cpu;
  338. struct octeon_ciu_chip_data *cd;
  339. raw_spinlock_t *lock;
  340. cd = irq_data_get_irq_chip_data(data);
  341. for_each_online_cpu(cpu) {
  342. int coreid = octeon_coreid_for_cpu(cpu);
  343. lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  344. if (cd->line == 0)
  345. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  346. else
  347. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  348. raw_spin_lock_irqsave(lock, flags);
  349. __clear_bit(cd->bit, pen);
  350. /*
  351. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  352. * enabling the irq.
  353. */
  354. wmb();
  355. if (cd->line == 0)
  356. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  357. else
  358. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  359. raw_spin_unlock_irqrestore(lock, flags);
  360. }
  361. }
  362. static void octeon_irq_ciu_enable_all(struct irq_data *data)
  363. {
  364. unsigned long flags;
  365. unsigned long *pen;
  366. int cpu;
  367. struct octeon_ciu_chip_data *cd;
  368. raw_spinlock_t *lock;
  369. cd = irq_data_get_irq_chip_data(data);
  370. for_each_online_cpu(cpu) {
  371. int coreid = octeon_coreid_for_cpu(cpu);
  372. lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  373. if (cd->line == 0)
  374. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  375. else
  376. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  377. raw_spin_lock_irqsave(lock, flags);
  378. __set_bit(cd->bit, pen);
  379. /*
  380. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  381. * enabling the irq.
  382. */
  383. wmb();
  384. if (cd->line == 0)
  385. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  386. else
  387. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  388. raw_spin_unlock_irqrestore(lock, flags);
  389. }
  390. }
  391. /*
  392. * Enable the irq on the next core in the affinity set for chips that
  393. * have the EN*_W1{S,C} registers.
  394. */
  395. static void octeon_irq_ciu_enable_v2(struct irq_data *data)
  396. {
  397. u64 mask;
  398. int cpu = next_cpu_for_irq(data);
  399. struct octeon_ciu_chip_data *cd;
  400. cd = irq_data_get_irq_chip_data(data);
  401. mask = 1ull << (cd->bit);
  402. /*
  403. * Called under the desc lock, so these should never get out
  404. * of sync.
  405. */
  406. if (cd->line == 0) {
  407. int index = octeon_coreid_for_cpu(cpu) * 2;
  408. set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
  409. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  410. } else {
  411. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  412. set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  413. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  414. }
  415. }
  416. /*
  417. * Enable the irq in the sum2 registers.
  418. */
  419. static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
  420. {
  421. u64 mask;
  422. int cpu = next_cpu_for_irq(data);
  423. int index = octeon_coreid_for_cpu(cpu);
  424. struct octeon_ciu_chip_data *cd;
  425. cd = irq_data_get_irq_chip_data(data);
  426. mask = 1ull << (cd->bit);
  427. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
  428. }
  429. /*
  430. * Disable the irq in the sum2 registers.
  431. */
  432. static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
  433. {
  434. u64 mask;
  435. int cpu = next_cpu_for_irq(data);
  436. int index = octeon_coreid_for_cpu(cpu);
  437. struct octeon_ciu_chip_data *cd;
  438. cd = irq_data_get_irq_chip_data(data);
  439. mask = 1ull << (cd->bit);
  440. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
  441. }
  442. static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
  443. {
  444. u64 mask;
  445. int cpu = next_cpu_for_irq(data);
  446. int index = octeon_coreid_for_cpu(cpu);
  447. struct octeon_ciu_chip_data *cd;
  448. cd = irq_data_get_irq_chip_data(data);
  449. mask = 1ull << (cd->bit);
  450. cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
  451. }
  452. static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
  453. {
  454. int cpu;
  455. struct octeon_ciu_chip_data *cd;
  456. u64 mask;
  457. cd = irq_data_get_irq_chip_data(data);
  458. mask = 1ull << (cd->bit);
  459. for_each_online_cpu(cpu) {
  460. int coreid = octeon_coreid_for_cpu(cpu);
  461. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
  462. }
  463. }
  464. /*
  465. * Enable the irq on the current CPU for chips that
  466. * have the EN*_W1{S,C} registers.
  467. */
  468. static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
  469. {
  470. u64 mask;
  471. struct octeon_ciu_chip_data *cd;
  472. cd = irq_data_get_irq_chip_data(data);
  473. mask = 1ull << (cd->bit);
  474. if (cd->line == 0) {
  475. int index = cvmx_get_core_num() * 2;
  476. set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
  477. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  478. } else {
  479. int index = cvmx_get_core_num() * 2 + 1;
  480. set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
  481. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  482. }
  483. }
  484. static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
  485. {
  486. u64 mask;
  487. struct octeon_ciu_chip_data *cd;
  488. cd = irq_data_get_irq_chip_data(data);
  489. mask = 1ull << (cd->bit);
  490. if (cd->line == 0) {
  491. int index = cvmx_get_core_num() * 2;
  492. clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
  493. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  494. } else {
  495. int index = cvmx_get_core_num() * 2 + 1;
  496. clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
  497. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  498. }
  499. }
  500. /*
  501. * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
  502. */
  503. static void octeon_irq_ciu_ack(struct irq_data *data)
  504. {
  505. u64 mask;
  506. struct octeon_ciu_chip_data *cd;
  507. cd = irq_data_get_irq_chip_data(data);
  508. mask = 1ull << (cd->bit);
  509. if (cd->line == 0) {
  510. int index = cvmx_get_core_num() * 2;
  511. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  512. } else {
  513. cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
  514. }
  515. }
  516. /*
  517. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  518. * registers.
  519. */
  520. static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
  521. {
  522. int cpu;
  523. u64 mask;
  524. struct octeon_ciu_chip_data *cd;
  525. cd = irq_data_get_irq_chip_data(data);
  526. mask = 1ull << (cd->bit);
  527. if (cd->line == 0) {
  528. for_each_online_cpu(cpu) {
  529. int index = octeon_coreid_for_cpu(cpu) * 2;
  530. clear_bit(cd->bit,
  531. &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
  532. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  533. }
  534. } else {
  535. for_each_online_cpu(cpu) {
  536. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  537. clear_bit(cd->bit,
  538. &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  539. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  540. }
  541. }
  542. }
  543. /*
  544. * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
  545. * registers.
  546. */
  547. static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
  548. {
  549. int cpu;
  550. u64 mask;
  551. struct octeon_ciu_chip_data *cd;
  552. cd = irq_data_get_irq_chip_data(data);
  553. mask = 1ull << (cd->bit);
  554. if (cd->line == 0) {
  555. for_each_online_cpu(cpu) {
  556. int index = octeon_coreid_for_cpu(cpu) * 2;
  557. set_bit(cd->bit,
  558. &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
  559. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  560. }
  561. } else {
  562. for_each_online_cpu(cpu) {
  563. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  564. set_bit(cd->bit,
  565. &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  566. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  567. }
  568. }
  569. }
  570. static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
  571. {
  572. irqd_set_trigger_type(data, t);
  573. if (t & IRQ_TYPE_EDGE_BOTH)
  574. irq_set_handler_locked(data, handle_edge_irq);
  575. else
  576. irq_set_handler_locked(data, handle_level_irq);
  577. return IRQ_SET_MASK_OK;
  578. }
  579. static void octeon_irq_gpio_setup(struct irq_data *data)
  580. {
  581. union cvmx_gpio_bit_cfgx cfg;
  582. struct octeon_ciu_chip_data *cd;
  583. u32 t = irqd_get_trigger_type(data);
  584. cd = irq_data_get_irq_chip_data(data);
  585. cfg.u64 = 0;
  586. cfg.s.int_en = 1;
  587. cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
  588. cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
  589. /* 140 nS glitch filter*/
  590. cfg.s.fil_cnt = 7;
  591. cfg.s.fil_sel = 3;
  592. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
  593. }
  594. static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
  595. {
  596. octeon_irq_gpio_setup(data);
  597. octeon_irq_ciu_enable_v2(data);
  598. }
  599. static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
  600. {
  601. octeon_irq_gpio_setup(data);
  602. octeon_irq_ciu_enable(data);
  603. }
  604. static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
  605. {
  606. irqd_set_trigger_type(data, t);
  607. octeon_irq_gpio_setup(data);
  608. if (t & IRQ_TYPE_EDGE_BOTH)
  609. irq_set_handler_locked(data, handle_edge_irq);
  610. else
  611. irq_set_handler_locked(data, handle_level_irq);
  612. return IRQ_SET_MASK_OK;
  613. }
  614. static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
  615. {
  616. struct octeon_ciu_chip_data *cd;
  617. cd = irq_data_get_irq_chip_data(data);
  618. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
  619. octeon_irq_ciu_disable_all_v2(data);
  620. }
  621. static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
  622. {
  623. struct octeon_ciu_chip_data *cd;
  624. cd = irq_data_get_irq_chip_data(data);
  625. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
  626. octeon_irq_ciu_disable_all(data);
  627. }
  628. static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
  629. {
  630. struct octeon_ciu_chip_data *cd;
  631. u64 mask;
  632. cd = irq_data_get_irq_chip_data(data);
  633. mask = 1ull << (cd->gpio_line);
  634. cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
  635. }
  636. #ifdef CONFIG_SMP
  637. static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
  638. {
  639. int cpu = smp_processor_id();
  640. cpumask_t new_affinity;
  641. struct cpumask *mask = irq_data_get_affinity_mask(data);
  642. if (!cpumask_test_cpu(cpu, mask))
  643. return;
  644. if (cpumask_weight(mask) > 1) {
  645. /*
  646. * It has multi CPU affinity, just remove this CPU
  647. * from the affinity set.
  648. */
  649. cpumask_copy(&new_affinity, mask);
  650. cpumask_clear_cpu(cpu, &new_affinity);
  651. } else {
  652. /* Otherwise, put it on lowest numbered online CPU. */
  653. cpumask_clear(&new_affinity);
  654. cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
  655. }
  656. irq_set_affinity_locked(data, &new_affinity, false);
  657. }
  658. static int octeon_irq_ciu_set_affinity(struct irq_data *data,
  659. const struct cpumask *dest, bool force)
  660. {
  661. int cpu;
  662. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  663. unsigned long flags;
  664. struct octeon_ciu_chip_data *cd;
  665. unsigned long *pen;
  666. raw_spinlock_t *lock;
  667. cd = irq_data_get_irq_chip_data(data);
  668. /*
  669. * For non-v2 CIU, we will allow only single CPU affinity.
  670. * This removes the need to do locking in the .ack/.eoi
  671. * functions.
  672. */
  673. if (cpumask_weight(dest) != 1)
  674. return -EINVAL;
  675. if (!enable_one)
  676. return 0;
  677. for_each_online_cpu(cpu) {
  678. int coreid = octeon_coreid_for_cpu(cpu);
  679. lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  680. raw_spin_lock_irqsave(lock, flags);
  681. if (cd->line == 0)
  682. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  683. else
  684. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  685. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  686. enable_one = 0;
  687. __set_bit(cd->bit, pen);
  688. } else {
  689. __clear_bit(cd->bit, pen);
  690. }
  691. /*
  692. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  693. * enabling the irq.
  694. */
  695. wmb();
  696. if (cd->line == 0)
  697. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  698. else
  699. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  700. raw_spin_unlock_irqrestore(lock, flags);
  701. }
  702. return 0;
  703. }
  704. /*
  705. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  706. * registers.
  707. */
  708. static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
  709. const struct cpumask *dest,
  710. bool force)
  711. {
  712. int cpu;
  713. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  714. u64 mask;
  715. struct octeon_ciu_chip_data *cd;
  716. if (!enable_one)
  717. return 0;
  718. cd = irq_data_get_irq_chip_data(data);
  719. mask = 1ull << cd->bit;
  720. if (cd->line == 0) {
  721. for_each_online_cpu(cpu) {
  722. unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  723. int index = octeon_coreid_for_cpu(cpu) * 2;
  724. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  725. enable_one = false;
  726. set_bit(cd->bit, pen);
  727. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  728. } else {
  729. clear_bit(cd->bit, pen);
  730. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  731. }
  732. }
  733. } else {
  734. for_each_online_cpu(cpu) {
  735. unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  736. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  737. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  738. enable_one = false;
  739. set_bit(cd->bit, pen);
  740. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  741. } else {
  742. clear_bit(cd->bit, pen);
  743. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  744. }
  745. }
  746. }
  747. return 0;
  748. }
  749. static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
  750. const struct cpumask *dest,
  751. bool force)
  752. {
  753. int cpu;
  754. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  755. u64 mask;
  756. struct octeon_ciu_chip_data *cd;
  757. if (!enable_one)
  758. return 0;
  759. cd = irq_data_get_irq_chip_data(data);
  760. mask = 1ull << cd->bit;
  761. for_each_online_cpu(cpu) {
  762. int index = octeon_coreid_for_cpu(cpu);
  763. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  764. enable_one = false;
  765. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
  766. } else {
  767. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
  768. }
  769. }
  770. return 0;
  771. }
  772. #endif
  773. static unsigned int edge_startup(struct irq_data *data)
  774. {
  775. /* ack any pending edge-irq at startup, so there is
  776. * an _edge_ to fire on when the event reappears.
  777. */
  778. data->chip->irq_ack(data);
  779. data->chip->irq_enable(data);
  780. return 0;
  781. }
  782. /*
  783. * Newer octeon chips have support for lockless CIU operation.
  784. */
  785. static struct irq_chip octeon_irq_chip_ciu_v2 = {
  786. .name = "CIU",
  787. .irq_enable = octeon_irq_ciu_enable_v2,
  788. .irq_disable = octeon_irq_ciu_disable_all_v2,
  789. .irq_mask = octeon_irq_ciu_disable_local_v2,
  790. .irq_unmask = octeon_irq_ciu_enable_v2,
  791. #ifdef CONFIG_SMP
  792. .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
  793. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  794. #endif
  795. };
  796. static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
  797. .name = "CIU",
  798. .irq_enable = octeon_irq_ciu_enable_v2,
  799. .irq_disable = octeon_irq_ciu_disable_all_v2,
  800. .irq_ack = octeon_irq_ciu_ack,
  801. .irq_mask = octeon_irq_ciu_disable_local_v2,
  802. .irq_unmask = octeon_irq_ciu_enable_v2,
  803. #ifdef CONFIG_SMP
  804. .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
  805. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  806. #endif
  807. };
  808. /*
  809. * Newer octeon chips have support for lockless CIU operation.
  810. */
  811. static struct irq_chip octeon_irq_chip_ciu_sum2 = {
  812. .name = "CIU",
  813. .irq_enable = octeon_irq_ciu_enable_sum2,
  814. .irq_disable = octeon_irq_ciu_disable_all_sum2,
  815. .irq_mask = octeon_irq_ciu_disable_local_sum2,
  816. .irq_unmask = octeon_irq_ciu_enable_sum2,
  817. #ifdef CONFIG_SMP
  818. .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
  819. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  820. #endif
  821. };
  822. static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
  823. .name = "CIU",
  824. .irq_enable = octeon_irq_ciu_enable_sum2,
  825. .irq_disable = octeon_irq_ciu_disable_all_sum2,
  826. .irq_ack = octeon_irq_ciu_ack_sum2,
  827. .irq_mask = octeon_irq_ciu_disable_local_sum2,
  828. .irq_unmask = octeon_irq_ciu_enable_sum2,
  829. #ifdef CONFIG_SMP
  830. .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
  831. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  832. #endif
  833. };
  834. static struct irq_chip octeon_irq_chip_ciu = {
  835. .name = "CIU",
  836. .irq_enable = octeon_irq_ciu_enable,
  837. .irq_disable = octeon_irq_ciu_disable_all,
  838. .irq_mask = octeon_irq_ciu_disable_local,
  839. .irq_unmask = octeon_irq_ciu_enable,
  840. #ifdef CONFIG_SMP
  841. .irq_set_affinity = octeon_irq_ciu_set_affinity,
  842. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  843. #endif
  844. };
  845. static struct irq_chip octeon_irq_chip_ciu_edge = {
  846. .name = "CIU",
  847. .irq_enable = octeon_irq_ciu_enable,
  848. .irq_disable = octeon_irq_ciu_disable_all,
  849. .irq_ack = octeon_irq_ciu_ack,
  850. .irq_mask = octeon_irq_ciu_disable_local,
  851. .irq_unmask = octeon_irq_ciu_enable,
  852. #ifdef CONFIG_SMP
  853. .irq_set_affinity = octeon_irq_ciu_set_affinity,
  854. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  855. #endif
  856. };
  857. /* The mbox versions don't do any affinity or round-robin. */
  858. static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
  859. .name = "CIU-M",
  860. .irq_enable = octeon_irq_ciu_enable_all_v2,
  861. .irq_disable = octeon_irq_ciu_disable_all_v2,
  862. .irq_ack = octeon_irq_ciu_disable_local_v2,
  863. .irq_eoi = octeon_irq_ciu_enable_local_v2,
  864. .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
  865. .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
  866. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  867. };
  868. static struct irq_chip octeon_irq_chip_ciu_mbox = {
  869. .name = "CIU-M",
  870. .irq_enable = octeon_irq_ciu_enable_all,
  871. .irq_disable = octeon_irq_ciu_disable_all,
  872. .irq_ack = octeon_irq_ciu_disable_local,
  873. .irq_eoi = octeon_irq_ciu_enable_local,
  874. .irq_cpu_online = octeon_irq_ciu_enable_local,
  875. .irq_cpu_offline = octeon_irq_ciu_disable_local,
  876. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  877. };
  878. static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
  879. .name = "CIU-GPIO",
  880. .irq_enable = octeon_irq_ciu_enable_gpio_v2,
  881. .irq_disable = octeon_irq_ciu_disable_gpio_v2,
  882. .irq_ack = octeon_irq_ciu_gpio_ack,
  883. .irq_mask = octeon_irq_ciu_disable_local_v2,
  884. .irq_unmask = octeon_irq_ciu_enable_v2,
  885. .irq_set_type = octeon_irq_ciu_gpio_set_type,
  886. #ifdef CONFIG_SMP
  887. .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
  888. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  889. #endif
  890. .flags = IRQCHIP_SET_TYPE_MASKED,
  891. };
  892. static struct irq_chip octeon_irq_chip_ciu_gpio = {
  893. .name = "CIU-GPIO",
  894. .irq_enable = octeon_irq_ciu_enable_gpio,
  895. .irq_disable = octeon_irq_ciu_disable_gpio,
  896. .irq_mask = octeon_irq_ciu_disable_local,
  897. .irq_unmask = octeon_irq_ciu_enable,
  898. .irq_ack = octeon_irq_ciu_gpio_ack,
  899. .irq_set_type = octeon_irq_ciu_gpio_set_type,
  900. #ifdef CONFIG_SMP
  901. .irq_set_affinity = octeon_irq_ciu_set_affinity,
  902. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  903. #endif
  904. .flags = IRQCHIP_SET_TYPE_MASKED,
  905. };
  906. /*
  907. * Watchdog interrupts are special. They are associated with a single
  908. * core, so we hardwire the affinity to that core.
  909. */
  910. static void octeon_irq_ciu_wd_enable(struct irq_data *data)
  911. {
  912. unsigned long flags;
  913. unsigned long *pen;
  914. int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  915. int cpu = octeon_cpu_for_coreid(coreid);
  916. raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  917. raw_spin_lock_irqsave(lock, flags);
  918. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  919. __set_bit(coreid, pen);
  920. /*
  921. * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
  922. * the irq.
  923. */
  924. wmb();
  925. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  926. raw_spin_unlock_irqrestore(lock, flags);
  927. }
  928. /*
  929. * Watchdog interrupts are special. They are associated with a single
  930. * core, so we hardwire the affinity to that core.
  931. */
  932. static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
  933. {
  934. int coreid = data->irq - OCTEON_IRQ_WDOG0;
  935. int cpu = octeon_cpu_for_coreid(coreid);
  936. set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  937. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
  938. }
  939. static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
  940. .name = "CIU-W",
  941. .irq_enable = octeon_irq_ciu1_wd_enable_v2,
  942. .irq_disable = octeon_irq_ciu_disable_all_v2,
  943. .irq_mask = octeon_irq_ciu_disable_local_v2,
  944. .irq_unmask = octeon_irq_ciu_enable_local_v2,
  945. };
  946. static struct irq_chip octeon_irq_chip_ciu_wd = {
  947. .name = "CIU-W",
  948. .irq_enable = octeon_irq_ciu_wd_enable,
  949. .irq_disable = octeon_irq_ciu_disable_all,
  950. .irq_mask = octeon_irq_ciu_disable_local,
  951. .irq_unmask = octeon_irq_ciu_enable_local,
  952. };
  953. static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
  954. {
  955. bool edge = false;
  956. if (line == 0)
  957. switch (bit) {
  958. case 48 ... 49: /* GMX DRP */
  959. case 50: /* IPD_DRP */
  960. case 52 ... 55: /* Timers */
  961. case 58: /* MPI */
  962. edge = true;
  963. break;
  964. default:
  965. break;
  966. }
  967. else /* line == 1 */
  968. switch (bit) {
  969. case 47: /* PTP */
  970. edge = true;
  971. break;
  972. default:
  973. break;
  974. }
  975. return edge;
  976. }
  977. struct octeon_irq_gpio_domain_data {
  978. unsigned int base_hwirq;
  979. };
  980. static int octeon_irq_gpio_xlat(struct irq_domain *d,
  981. struct device_node *node,
  982. const u32 *intspec,
  983. unsigned int intsize,
  984. unsigned long *out_hwirq,
  985. unsigned int *out_type)
  986. {
  987. unsigned int type;
  988. unsigned int pin;
  989. unsigned int trigger;
  990. if (irq_domain_get_of_node(d) != node)
  991. return -EINVAL;
  992. if (intsize < 2)
  993. return -EINVAL;
  994. pin = intspec[0];
  995. if (pin >= 16)
  996. return -EINVAL;
  997. trigger = intspec[1];
  998. switch (trigger) {
  999. case 1:
  1000. type = IRQ_TYPE_EDGE_RISING;
  1001. break;
  1002. case 2:
  1003. type = IRQ_TYPE_EDGE_FALLING;
  1004. break;
  1005. case 4:
  1006. type = IRQ_TYPE_LEVEL_HIGH;
  1007. break;
  1008. case 8:
  1009. type = IRQ_TYPE_LEVEL_LOW;
  1010. break;
  1011. default:
  1012. pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
  1013. node->name,
  1014. trigger);
  1015. type = IRQ_TYPE_LEVEL_LOW;
  1016. break;
  1017. }
  1018. *out_type = type;
  1019. *out_hwirq = pin;
  1020. return 0;
  1021. }
  1022. static int octeon_irq_ciu_xlat(struct irq_domain *d,
  1023. struct device_node *node,
  1024. const u32 *intspec,
  1025. unsigned int intsize,
  1026. unsigned long *out_hwirq,
  1027. unsigned int *out_type)
  1028. {
  1029. unsigned int ciu, bit;
  1030. struct octeon_irq_ciu_domain_data *dd = d->host_data;
  1031. ciu = intspec[0];
  1032. bit = intspec[1];
  1033. if (ciu >= dd->num_sum || bit > 63)
  1034. return -EINVAL;
  1035. *out_hwirq = (ciu << 6) | bit;
  1036. *out_type = 0;
  1037. return 0;
  1038. }
  1039. static struct irq_chip *octeon_irq_ciu_chip;
  1040. static struct irq_chip *octeon_irq_ciu_chip_edge;
  1041. static struct irq_chip *octeon_irq_gpio_chip;
  1042. static int octeon_irq_ciu_map(struct irq_domain *d,
  1043. unsigned int virq, irq_hw_number_t hw)
  1044. {
  1045. int rv;
  1046. unsigned int line = hw >> 6;
  1047. unsigned int bit = hw & 63;
  1048. struct octeon_irq_ciu_domain_data *dd = d->host_data;
  1049. if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
  1050. return -EINVAL;
  1051. if (line == 2) {
  1052. if (octeon_irq_ciu_is_edge(line, bit))
  1053. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1054. &octeon_irq_chip_ciu_sum2_edge,
  1055. handle_edge_irq);
  1056. else
  1057. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1058. &octeon_irq_chip_ciu_sum2,
  1059. handle_level_irq);
  1060. } else {
  1061. if (octeon_irq_ciu_is_edge(line, bit))
  1062. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1063. octeon_irq_ciu_chip_edge,
  1064. handle_edge_irq);
  1065. else
  1066. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1067. octeon_irq_ciu_chip,
  1068. handle_level_irq);
  1069. }
  1070. return rv;
  1071. }
  1072. static int octeon_irq_gpio_map(struct irq_domain *d,
  1073. unsigned int virq, irq_hw_number_t hw)
  1074. {
  1075. struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
  1076. unsigned int line, bit;
  1077. int r;
  1078. line = (hw + gpiod->base_hwirq) >> 6;
  1079. bit = (hw + gpiod->base_hwirq) & 63;
  1080. if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
  1081. octeon_irq_ciu_to_irq[line][bit] != 0)
  1082. return -EINVAL;
  1083. /*
  1084. * Default to handle_level_irq. If the DT contains a different
  1085. * trigger type, it will call the irq_set_type callback and
  1086. * the handler gets updated.
  1087. */
  1088. r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
  1089. octeon_irq_gpio_chip, handle_level_irq);
  1090. return r;
  1091. }
  1092. static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
  1093. .map = octeon_irq_ciu_map,
  1094. .unmap = octeon_irq_free_cd,
  1095. .xlate = octeon_irq_ciu_xlat,
  1096. };
  1097. static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
  1098. .map = octeon_irq_gpio_map,
  1099. .unmap = octeon_irq_free_cd,
  1100. .xlate = octeon_irq_gpio_xlat,
  1101. };
  1102. static void octeon_irq_ip2_ciu(void)
  1103. {
  1104. const unsigned long core_id = cvmx_get_core_num();
  1105. u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
  1106. ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
  1107. if (likely(ciu_sum)) {
  1108. int bit = fls64(ciu_sum) - 1;
  1109. int irq = octeon_irq_ciu_to_irq[0][bit];
  1110. if (likely(irq))
  1111. do_IRQ(irq);
  1112. else
  1113. spurious_interrupt();
  1114. } else {
  1115. spurious_interrupt();
  1116. }
  1117. }
  1118. static void octeon_irq_ip3_ciu(void)
  1119. {
  1120. u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
  1121. ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
  1122. if (likely(ciu_sum)) {
  1123. int bit = fls64(ciu_sum) - 1;
  1124. int irq = octeon_irq_ciu_to_irq[1][bit];
  1125. if (likely(irq))
  1126. do_IRQ(irq);
  1127. else
  1128. spurious_interrupt();
  1129. } else {
  1130. spurious_interrupt();
  1131. }
  1132. }
  1133. static void octeon_irq_ip4_ciu(void)
  1134. {
  1135. int coreid = cvmx_get_core_num();
  1136. u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
  1137. u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
  1138. ciu_sum &= ciu_en;
  1139. if (likely(ciu_sum)) {
  1140. int bit = fls64(ciu_sum) - 1;
  1141. int irq = octeon_irq_ciu_to_irq[2][bit];
  1142. if (likely(irq))
  1143. do_IRQ(irq);
  1144. else
  1145. spurious_interrupt();
  1146. } else {
  1147. spurious_interrupt();
  1148. }
  1149. }
  1150. static bool octeon_irq_use_ip4;
  1151. static void octeon_irq_local_enable_ip4(void *arg)
  1152. {
  1153. set_c0_status(STATUSF_IP4);
  1154. }
  1155. static void octeon_irq_ip4_mask(void)
  1156. {
  1157. clear_c0_status(STATUSF_IP4);
  1158. spurious_interrupt();
  1159. }
  1160. static void (*octeon_irq_ip2)(void);
  1161. static void (*octeon_irq_ip3)(void);
  1162. static void (*octeon_irq_ip4)(void);
  1163. void (*octeon_irq_setup_secondary)(void);
  1164. void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
  1165. {
  1166. octeon_irq_ip4 = h;
  1167. octeon_irq_use_ip4 = true;
  1168. on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
  1169. }
  1170. static void octeon_irq_percpu_enable(void)
  1171. {
  1172. irq_cpu_online();
  1173. }
  1174. static void octeon_irq_init_ciu_percpu(void)
  1175. {
  1176. int coreid = cvmx_get_core_num();
  1177. __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
  1178. __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
  1179. wmb();
  1180. raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
  1181. /*
  1182. * Disable All CIU Interrupts. The ones we need will be
  1183. * enabled later. Read the SUM register so we know the write
  1184. * completed.
  1185. */
  1186. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
  1187. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
  1188. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
  1189. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
  1190. cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
  1191. }
  1192. static void octeon_irq_init_ciu2_percpu(void)
  1193. {
  1194. u64 regx, ipx;
  1195. int coreid = cvmx_get_core_num();
  1196. u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
  1197. /*
  1198. * Disable All CIU2 Interrupts. The ones we need will be
  1199. * enabled later. Read the SUM register so we know the write
  1200. * completed.
  1201. *
  1202. * There are 9 registers and 3 IPX levels with strides 0x1000
  1203. * and 0x200 respectivly. Use loops to clear them.
  1204. */
  1205. for (regx = 0; regx <= 0x8000; regx += 0x1000) {
  1206. for (ipx = 0; ipx <= 0x400; ipx += 0x200)
  1207. cvmx_write_csr(base + regx + ipx, 0);
  1208. }
  1209. cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
  1210. }
  1211. static void octeon_irq_setup_secondary_ciu(void)
  1212. {
  1213. octeon_irq_init_ciu_percpu();
  1214. octeon_irq_percpu_enable();
  1215. /* Enable the CIU lines */
  1216. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1217. if (octeon_irq_use_ip4)
  1218. set_c0_status(STATUSF_IP4);
  1219. else
  1220. clear_c0_status(STATUSF_IP4);
  1221. }
  1222. static void octeon_irq_setup_secondary_ciu2(void)
  1223. {
  1224. octeon_irq_init_ciu2_percpu();
  1225. octeon_irq_percpu_enable();
  1226. /* Enable the CIU lines */
  1227. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1228. if (octeon_irq_use_ip4)
  1229. set_c0_status(STATUSF_IP4);
  1230. else
  1231. clear_c0_status(STATUSF_IP4);
  1232. }
  1233. static int __init octeon_irq_init_ciu(
  1234. struct device_node *ciu_node, struct device_node *parent)
  1235. {
  1236. unsigned int i, r;
  1237. struct irq_chip *chip;
  1238. struct irq_chip *chip_edge;
  1239. struct irq_chip *chip_mbox;
  1240. struct irq_chip *chip_wd;
  1241. struct irq_domain *ciu_domain = NULL;
  1242. struct octeon_irq_ciu_domain_data *dd;
  1243. dd = kzalloc(sizeof(*dd), GFP_KERNEL);
  1244. if (!dd)
  1245. return -ENOMEM;
  1246. octeon_irq_init_ciu_percpu();
  1247. octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
  1248. octeon_irq_ip2 = octeon_irq_ip2_ciu;
  1249. octeon_irq_ip3 = octeon_irq_ip3_ciu;
  1250. if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
  1251. && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  1252. octeon_irq_ip4 = octeon_irq_ip4_ciu;
  1253. dd->num_sum = 3;
  1254. octeon_irq_use_ip4 = true;
  1255. } else {
  1256. octeon_irq_ip4 = octeon_irq_ip4_mask;
  1257. dd->num_sum = 2;
  1258. octeon_irq_use_ip4 = false;
  1259. }
  1260. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  1261. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  1262. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
  1263. OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
  1264. chip = &octeon_irq_chip_ciu_v2;
  1265. chip_edge = &octeon_irq_chip_ciu_v2_edge;
  1266. chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
  1267. chip_wd = &octeon_irq_chip_ciu_wd_v2;
  1268. octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
  1269. } else {
  1270. chip = &octeon_irq_chip_ciu;
  1271. chip_edge = &octeon_irq_chip_ciu_edge;
  1272. chip_mbox = &octeon_irq_chip_ciu_mbox;
  1273. chip_wd = &octeon_irq_chip_ciu_wd;
  1274. octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
  1275. }
  1276. octeon_irq_ciu_chip = chip;
  1277. octeon_irq_ciu_chip_edge = chip_edge;
  1278. /* Mips internal */
  1279. octeon_irq_init_core();
  1280. ciu_domain = irq_domain_add_tree(
  1281. ciu_node, &octeon_irq_domain_ciu_ops, dd);
  1282. irq_set_default_host(ciu_domain);
  1283. /* CIU_0 */
  1284. for (i = 0; i < 16; i++) {
  1285. r = octeon_irq_force_ciu_mapping(
  1286. ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
  1287. if (r)
  1288. goto err;
  1289. }
  1290. r = octeon_irq_set_ciu_mapping(
  1291. OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
  1292. if (r)
  1293. goto err;
  1294. r = octeon_irq_set_ciu_mapping(
  1295. OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
  1296. if (r)
  1297. goto err;
  1298. for (i = 0; i < 4; i++) {
  1299. r = octeon_irq_force_ciu_mapping(
  1300. ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
  1301. if (r)
  1302. goto err;
  1303. }
  1304. for (i = 0; i < 4; i++) {
  1305. r = octeon_irq_force_ciu_mapping(
  1306. ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
  1307. if (r)
  1308. goto err;
  1309. }
  1310. r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
  1311. if (r)
  1312. goto err;
  1313. r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
  1314. if (r)
  1315. goto err;
  1316. for (i = 0; i < 4; i++) {
  1317. r = octeon_irq_force_ciu_mapping(
  1318. ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
  1319. if (r)
  1320. goto err;
  1321. }
  1322. r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
  1323. if (r)
  1324. goto err;
  1325. /* CIU_1 */
  1326. for (i = 0; i < 16; i++) {
  1327. r = octeon_irq_set_ciu_mapping(
  1328. i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
  1329. handle_level_irq);
  1330. if (r)
  1331. goto err;
  1332. }
  1333. /* Enable the CIU lines */
  1334. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1335. if (octeon_irq_use_ip4)
  1336. set_c0_status(STATUSF_IP4);
  1337. else
  1338. clear_c0_status(STATUSF_IP4);
  1339. return 0;
  1340. err:
  1341. return r;
  1342. }
  1343. static int __init octeon_irq_init_gpio(
  1344. struct device_node *gpio_node, struct device_node *parent)
  1345. {
  1346. struct octeon_irq_gpio_domain_data *gpiod;
  1347. u32 interrupt_cells;
  1348. unsigned int base_hwirq;
  1349. int r;
  1350. r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
  1351. if (r)
  1352. return r;
  1353. if (interrupt_cells == 1) {
  1354. u32 v;
  1355. r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
  1356. if (r) {
  1357. pr_warn("No \"interrupts\" property.\n");
  1358. return r;
  1359. }
  1360. base_hwirq = v;
  1361. } else if (interrupt_cells == 2) {
  1362. u32 v0, v1;
  1363. r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
  1364. if (r) {
  1365. pr_warn("No \"interrupts\" property.\n");
  1366. return r;
  1367. }
  1368. r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
  1369. if (r) {
  1370. pr_warn("No \"interrupts\" property.\n");
  1371. return r;
  1372. }
  1373. base_hwirq = (v0 << 6) | v1;
  1374. } else {
  1375. pr_warn("Bad \"#interrupt-cells\" property: %u\n",
  1376. interrupt_cells);
  1377. return -EINVAL;
  1378. }
  1379. gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
  1380. if (gpiod) {
  1381. /* gpio domain host_data is the base hwirq number. */
  1382. gpiod->base_hwirq = base_hwirq;
  1383. irq_domain_add_linear(
  1384. gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
  1385. } else {
  1386. pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
  1387. return -ENOMEM;
  1388. }
  1389. /*
  1390. * Clear the OF_POPULATED flag that was set by of_irq_init()
  1391. * so that all GPIO devices will be probed.
  1392. */
  1393. of_node_clear_flag(gpio_node, OF_POPULATED);
  1394. return 0;
  1395. }
  1396. /*
  1397. * Watchdog interrupts are special. They are associated with a single
  1398. * core, so we hardwire the affinity to that core.
  1399. */
  1400. static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
  1401. {
  1402. u64 mask;
  1403. u64 en_addr;
  1404. int coreid = data->irq - OCTEON_IRQ_WDOG0;
  1405. struct octeon_ciu_chip_data *cd;
  1406. cd = irq_data_get_irq_chip_data(data);
  1407. mask = 1ull << (cd->bit);
  1408. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
  1409. (0x1000ull * cd->line);
  1410. cvmx_write_csr(en_addr, mask);
  1411. }
  1412. static void octeon_irq_ciu2_enable(struct irq_data *data)
  1413. {
  1414. u64 mask;
  1415. u64 en_addr;
  1416. int cpu = next_cpu_for_irq(data);
  1417. int coreid = octeon_coreid_for_cpu(cpu);
  1418. struct octeon_ciu_chip_data *cd;
  1419. cd = irq_data_get_irq_chip_data(data);
  1420. mask = 1ull << (cd->bit);
  1421. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
  1422. (0x1000ull * cd->line);
  1423. cvmx_write_csr(en_addr, mask);
  1424. }
  1425. static void octeon_irq_ciu2_enable_local(struct irq_data *data)
  1426. {
  1427. u64 mask;
  1428. u64 en_addr;
  1429. int coreid = cvmx_get_core_num();
  1430. struct octeon_ciu_chip_data *cd;
  1431. cd = irq_data_get_irq_chip_data(data);
  1432. mask = 1ull << (cd->bit);
  1433. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
  1434. (0x1000ull * cd->line);
  1435. cvmx_write_csr(en_addr, mask);
  1436. }
  1437. static void octeon_irq_ciu2_disable_local(struct irq_data *data)
  1438. {
  1439. u64 mask;
  1440. u64 en_addr;
  1441. int coreid = cvmx_get_core_num();
  1442. struct octeon_ciu_chip_data *cd;
  1443. cd = irq_data_get_irq_chip_data(data);
  1444. mask = 1ull << (cd->bit);
  1445. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
  1446. (0x1000ull * cd->line);
  1447. cvmx_write_csr(en_addr, mask);
  1448. }
  1449. static void octeon_irq_ciu2_ack(struct irq_data *data)
  1450. {
  1451. u64 mask;
  1452. u64 en_addr;
  1453. int coreid = cvmx_get_core_num();
  1454. struct octeon_ciu_chip_data *cd;
  1455. cd = irq_data_get_irq_chip_data(data);
  1456. mask = 1ull << (cd->bit);
  1457. en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
  1458. cvmx_write_csr(en_addr, mask);
  1459. }
  1460. static void octeon_irq_ciu2_disable_all(struct irq_data *data)
  1461. {
  1462. int cpu;
  1463. u64 mask;
  1464. struct octeon_ciu_chip_data *cd;
  1465. cd = irq_data_get_irq_chip_data(data);
  1466. mask = 1ull << (cd->bit);
  1467. for_each_online_cpu(cpu) {
  1468. u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
  1469. octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
  1470. cvmx_write_csr(en_addr, mask);
  1471. }
  1472. }
  1473. static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
  1474. {
  1475. int cpu;
  1476. u64 mask;
  1477. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1478. for_each_online_cpu(cpu) {
  1479. u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
  1480. octeon_coreid_for_cpu(cpu));
  1481. cvmx_write_csr(en_addr, mask);
  1482. }
  1483. }
  1484. static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
  1485. {
  1486. int cpu;
  1487. u64 mask;
  1488. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1489. for_each_online_cpu(cpu) {
  1490. u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
  1491. octeon_coreid_for_cpu(cpu));
  1492. cvmx_write_csr(en_addr, mask);
  1493. }
  1494. }
  1495. static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
  1496. {
  1497. u64 mask;
  1498. u64 en_addr;
  1499. int coreid = cvmx_get_core_num();
  1500. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1501. en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
  1502. cvmx_write_csr(en_addr, mask);
  1503. }
  1504. static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
  1505. {
  1506. u64 mask;
  1507. u64 en_addr;
  1508. int coreid = cvmx_get_core_num();
  1509. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1510. en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
  1511. cvmx_write_csr(en_addr, mask);
  1512. }
  1513. #ifdef CONFIG_SMP
  1514. static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
  1515. const struct cpumask *dest, bool force)
  1516. {
  1517. int cpu;
  1518. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  1519. u64 mask;
  1520. struct octeon_ciu_chip_data *cd;
  1521. if (!enable_one)
  1522. return 0;
  1523. cd = irq_data_get_irq_chip_data(data);
  1524. mask = 1ull << cd->bit;
  1525. for_each_online_cpu(cpu) {
  1526. u64 en_addr;
  1527. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  1528. enable_one = false;
  1529. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
  1530. octeon_coreid_for_cpu(cpu)) +
  1531. (0x1000ull * cd->line);
  1532. } else {
  1533. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
  1534. octeon_coreid_for_cpu(cpu)) +
  1535. (0x1000ull * cd->line);
  1536. }
  1537. cvmx_write_csr(en_addr, mask);
  1538. }
  1539. return 0;
  1540. }
  1541. #endif
  1542. static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
  1543. {
  1544. octeon_irq_gpio_setup(data);
  1545. octeon_irq_ciu2_enable(data);
  1546. }
  1547. static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
  1548. {
  1549. struct octeon_ciu_chip_data *cd;
  1550. cd = irq_data_get_irq_chip_data(data);
  1551. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
  1552. octeon_irq_ciu2_disable_all(data);
  1553. }
  1554. static struct irq_chip octeon_irq_chip_ciu2 = {
  1555. .name = "CIU2-E",
  1556. .irq_enable = octeon_irq_ciu2_enable,
  1557. .irq_disable = octeon_irq_ciu2_disable_all,
  1558. .irq_mask = octeon_irq_ciu2_disable_local,
  1559. .irq_unmask = octeon_irq_ciu2_enable,
  1560. #ifdef CONFIG_SMP
  1561. .irq_set_affinity = octeon_irq_ciu2_set_affinity,
  1562. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  1563. #endif
  1564. };
  1565. static struct irq_chip octeon_irq_chip_ciu2_edge = {
  1566. .name = "CIU2-E",
  1567. .irq_enable = octeon_irq_ciu2_enable,
  1568. .irq_disable = octeon_irq_ciu2_disable_all,
  1569. .irq_ack = octeon_irq_ciu2_ack,
  1570. .irq_mask = octeon_irq_ciu2_disable_local,
  1571. .irq_unmask = octeon_irq_ciu2_enable,
  1572. #ifdef CONFIG_SMP
  1573. .irq_set_affinity = octeon_irq_ciu2_set_affinity,
  1574. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  1575. #endif
  1576. };
  1577. static struct irq_chip octeon_irq_chip_ciu2_mbox = {
  1578. .name = "CIU2-M",
  1579. .irq_enable = octeon_irq_ciu2_mbox_enable_all,
  1580. .irq_disable = octeon_irq_ciu2_mbox_disable_all,
  1581. .irq_ack = octeon_irq_ciu2_mbox_disable_local,
  1582. .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
  1583. .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
  1584. .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
  1585. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  1586. };
  1587. static struct irq_chip octeon_irq_chip_ciu2_wd = {
  1588. .name = "CIU2-W",
  1589. .irq_enable = octeon_irq_ciu2_wd_enable,
  1590. .irq_disable = octeon_irq_ciu2_disable_all,
  1591. .irq_mask = octeon_irq_ciu2_disable_local,
  1592. .irq_unmask = octeon_irq_ciu2_enable_local,
  1593. };
  1594. static struct irq_chip octeon_irq_chip_ciu2_gpio = {
  1595. .name = "CIU-GPIO",
  1596. .irq_enable = octeon_irq_ciu2_enable_gpio,
  1597. .irq_disable = octeon_irq_ciu2_disable_gpio,
  1598. .irq_ack = octeon_irq_ciu_gpio_ack,
  1599. .irq_mask = octeon_irq_ciu2_disable_local,
  1600. .irq_unmask = octeon_irq_ciu2_enable,
  1601. .irq_set_type = octeon_irq_ciu_gpio_set_type,
  1602. #ifdef CONFIG_SMP
  1603. .irq_set_affinity = octeon_irq_ciu2_set_affinity,
  1604. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  1605. #endif
  1606. .flags = IRQCHIP_SET_TYPE_MASKED,
  1607. };
  1608. static int octeon_irq_ciu2_xlat(struct irq_domain *d,
  1609. struct device_node *node,
  1610. const u32 *intspec,
  1611. unsigned int intsize,
  1612. unsigned long *out_hwirq,
  1613. unsigned int *out_type)
  1614. {
  1615. unsigned int ciu, bit;
  1616. ciu = intspec[0];
  1617. bit = intspec[1];
  1618. *out_hwirq = (ciu << 6) | bit;
  1619. *out_type = 0;
  1620. return 0;
  1621. }
  1622. static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
  1623. {
  1624. bool edge = false;
  1625. if (line == 3) /* MIO */
  1626. switch (bit) {
  1627. case 2: /* IPD_DRP */
  1628. case 8 ... 11: /* Timers */
  1629. case 48: /* PTP */
  1630. edge = true;
  1631. break;
  1632. default:
  1633. break;
  1634. }
  1635. else if (line == 6) /* PKT */
  1636. switch (bit) {
  1637. case 52 ... 53: /* ILK_DRP */
  1638. case 8 ... 12: /* GMX_DRP */
  1639. edge = true;
  1640. break;
  1641. default:
  1642. break;
  1643. }
  1644. return edge;
  1645. }
  1646. static int octeon_irq_ciu2_map(struct irq_domain *d,
  1647. unsigned int virq, irq_hw_number_t hw)
  1648. {
  1649. unsigned int line = hw >> 6;
  1650. unsigned int bit = hw & 63;
  1651. /*
  1652. * Don't map irq if it is reserved for GPIO.
  1653. * (Line 7 are the GPIO lines.)
  1654. */
  1655. if (line == 7)
  1656. return 0;
  1657. if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
  1658. return -EINVAL;
  1659. if (octeon_irq_ciu2_is_edge(line, bit))
  1660. octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1661. &octeon_irq_chip_ciu2_edge,
  1662. handle_edge_irq);
  1663. else
  1664. octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1665. &octeon_irq_chip_ciu2,
  1666. handle_level_irq);
  1667. return 0;
  1668. }
  1669. static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
  1670. .map = octeon_irq_ciu2_map,
  1671. .unmap = octeon_irq_free_cd,
  1672. .xlate = octeon_irq_ciu2_xlat,
  1673. };
  1674. static void octeon_irq_ciu2(void)
  1675. {
  1676. int line;
  1677. int bit;
  1678. int irq;
  1679. u64 src_reg, src, sum;
  1680. const unsigned long core_id = cvmx_get_core_num();
  1681. sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
  1682. if (unlikely(!sum))
  1683. goto spurious;
  1684. line = fls64(sum) - 1;
  1685. src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
  1686. src = cvmx_read_csr(src_reg);
  1687. if (unlikely(!src))
  1688. goto spurious;
  1689. bit = fls64(src) - 1;
  1690. irq = octeon_irq_ciu_to_irq[line][bit];
  1691. if (unlikely(!irq))
  1692. goto spurious;
  1693. do_IRQ(irq);
  1694. goto out;
  1695. spurious:
  1696. spurious_interrupt();
  1697. out:
  1698. /* CN68XX pass 1.x has an errata that accessing the ACK registers
  1699. can stop interrupts from propagating */
  1700. if (OCTEON_IS_MODEL(OCTEON_CN68XX))
  1701. cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
  1702. else
  1703. cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
  1704. return;
  1705. }
  1706. static void octeon_irq_ciu2_mbox(void)
  1707. {
  1708. int line;
  1709. const unsigned long core_id = cvmx_get_core_num();
  1710. u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
  1711. if (unlikely(!sum))
  1712. goto spurious;
  1713. line = fls64(sum) - 1;
  1714. do_IRQ(OCTEON_IRQ_MBOX0 + line);
  1715. goto out;
  1716. spurious:
  1717. spurious_interrupt();
  1718. out:
  1719. /* CN68XX pass 1.x has an errata that accessing the ACK registers
  1720. can stop interrupts from propagating */
  1721. if (OCTEON_IS_MODEL(OCTEON_CN68XX))
  1722. cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
  1723. else
  1724. cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
  1725. return;
  1726. }
  1727. static int __init octeon_irq_init_ciu2(
  1728. struct device_node *ciu_node, struct device_node *parent)
  1729. {
  1730. unsigned int i, r;
  1731. struct irq_domain *ciu_domain = NULL;
  1732. octeon_irq_init_ciu2_percpu();
  1733. octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
  1734. octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
  1735. octeon_irq_ip2 = octeon_irq_ciu2;
  1736. octeon_irq_ip3 = octeon_irq_ciu2_mbox;
  1737. octeon_irq_ip4 = octeon_irq_ip4_mask;
  1738. /* Mips internal */
  1739. octeon_irq_init_core();
  1740. ciu_domain = irq_domain_add_tree(
  1741. ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
  1742. irq_set_default_host(ciu_domain);
  1743. /* CUI2 */
  1744. for (i = 0; i < 64; i++) {
  1745. r = octeon_irq_force_ciu_mapping(
  1746. ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
  1747. if (r)
  1748. goto err;
  1749. }
  1750. for (i = 0; i < 32; i++) {
  1751. r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
  1752. &octeon_irq_chip_ciu2_wd, handle_level_irq);
  1753. if (r)
  1754. goto err;
  1755. }
  1756. for (i = 0; i < 4; i++) {
  1757. r = octeon_irq_force_ciu_mapping(
  1758. ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
  1759. if (r)
  1760. goto err;
  1761. }
  1762. for (i = 0; i < 4; i++) {
  1763. r = octeon_irq_force_ciu_mapping(
  1764. ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
  1765. if (r)
  1766. goto err;
  1767. }
  1768. for (i = 0; i < 4; i++) {
  1769. r = octeon_irq_force_ciu_mapping(
  1770. ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
  1771. if (r)
  1772. goto err;
  1773. }
  1774. irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1775. irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1776. irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1777. irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1778. /* Enable the CIU lines */
  1779. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1780. clear_c0_status(STATUSF_IP4);
  1781. return 0;
  1782. err:
  1783. return r;
  1784. }
  1785. struct octeon_irq_cib_host_data {
  1786. raw_spinlock_t lock;
  1787. u64 raw_reg;
  1788. u64 en_reg;
  1789. int max_bits;
  1790. };
  1791. struct octeon_irq_cib_chip_data {
  1792. struct octeon_irq_cib_host_data *host_data;
  1793. int bit;
  1794. };
  1795. static void octeon_irq_cib_enable(struct irq_data *data)
  1796. {
  1797. unsigned long flags;
  1798. u64 en;
  1799. struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
  1800. struct octeon_irq_cib_host_data *host_data = cd->host_data;
  1801. raw_spin_lock_irqsave(&host_data->lock, flags);
  1802. en = cvmx_read_csr(host_data->en_reg);
  1803. en |= 1ull << cd->bit;
  1804. cvmx_write_csr(host_data->en_reg, en);
  1805. raw_spin_unlock_irqrestore(&host_data->lock, flags);
  1806. }
  1807. static void octeon_irq_cib_disable(struct irq_data *data)
  1808. {
  1809. unsigned long flags;
  1810. u64 en;
  1811. struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
  1812. struct octeon_irq_cib_host_data *host_data = cd->host_data;
  1813. raw_spin_lock_irqsave(&host_data->lock, flags);
  1814. en = cvmx_read_csr(host_data->en_reg);
  1815. en &= ~(1ull << cd->bit);
  1816. cvmx_write_csr(host_data->en_reg, en);
  1817. raw_spin_unlock_irqrestore(&host_data->lock, flags);
  1818. }
  1819. static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
  1820. {
  1821. irqd_set_trigger_type(data, t);
  1822. return IRQ_SET_MASK_OK;
  1823. }
  1824. static struct irq_chip octeon_irq_chip_cib = {
  1825. .name = "CIB",
  1826. .irq_enable = octeon_irq_cib_enable,
  1827. .irq_disable = octeon_irq_cib_disable,
  1828. .irq_mask = octeon_irq_cib_disable,
  1829. .irq_unmask = octeon_irq_cib_enable,
  1830. .irq_set_type = octeon_irq_cib_set_type,
  1831. };
  1832. static int octeon_irq_cib_xlat(struct irq_domain *d,
  1833. struct device_node *node,
  1834. const u32 *intspec,
  1835. unsigned int intsize,
  1836. unsigned long *out_hwirq,
  1837. unsigned int *out_type)
  1838. {
  1839. unsigned int type = 0;
  1840. if (intsize == 2)
  1841. type = intspec[1];
  1842. switch (type) {
  1843. case 0: /* unofficial value, but we might as well let it work. */
  1844. case 4: /* official value for level triggering. */
  1845. *out_type = IRQ_TYPE_LEVEL_HIGH;
  1846. break;
  1847. case 1: /* official value for edge triggering. */
  1848. *out_type = IRQ_TYPE_EDGE_RISING;
  1849. break;
  1850. default: /* Nothing else is acceptable. */
  1851. return -EINVAL;
  1852. }
  1853. *out_hwirq = intspec[0];
  1854. return 0;
  1855. }
  1856. static int octeon_irq_cib_map(struct irq_domain *d,
  1857. unsigned int virq, irq_hw_number_t hw)
  1858. {
  1859. struct octeon_irq_cib_host_data *host_data = d->host_data;
  1860. struct octeon_irq_cib_chip_data *cd;
  1861. if (hw >= host_data->max_bits) {
  1862. pr_err("ERROR: %s mapping %u is to big!\n",
  1863. irq_domain_get_of_node(d)->name, (unsigned)hw);
  1864. return -EINVAL;
  1865. }
  1866. cd = kzalloc(sizeof(*cd), GFP_KERNEL);
  1867. cd->host_data = host_data;
  1868. cd->bit = hw;
  1869. irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
  1870. handle_simple_irq);
  1871. irq_set_chip_data(virq, cd);
  1872. return 0;
  1873. }
  1874. static struct irq_domain_ops octeon_irq_domain_cib_ops = {
  1875. .map = octeon_irq_cib_map,
  1876. .unmap = octeon_irq_free_cd,
  1877. .xlate = octeon_irq_cib_xlat,
  1878. };
  1879. /* Chain to real handler. */
  1880. static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
  1881. {
  1882. u64 en;
  1883. u64 raw;
  1884. u64 bits;
  1885. int i;
  1886. int irq;
  1887. struct irq_domain *cib_domain = data;
  1888. struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
  1889. en = cvmx_read_csr(host_data->en_reg);
  1890. raw = cvmx_read_csr(host_data->raw_reg);
  1891. bits = en & raw;
  1892. for (i = 0; i < host_data->max_bits; i++) {
  1893. if ((bits & 1ull << i) == 0)
  1894. continue;
  1895. irq = irq_find_mapping(cib_domain, i);
  1896. if (!irq) {
  1897. unsigned long flags;
  1898. pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
  1899. i, host_data->raw_reg);
  1900. raw_spin_lock_irqsave(&host_data->lock, flags);
  1901. en = cvmx_read_csr(host_data->en_reg);
  1902. en &= ~(1ull << i);
  1903. cvmx_write_csr(host_data->en_reg, en);
  1904. cvmx_write_csr(host_data->raw_reg, 1ull << i);
  1905. raw_spin_unlock_irqrestore(&host_data->lock, flags);
  1906. } else {
  1907. struct irq_desc *desc = irq_to_desc(irq);
  1908. struct irq_data *irq_data = irq_desc_get_irq_data(desc);
  1909. /* If edge, acknowledge the bit we will be sending. */
  1910. if (irqd_get_trigger_type(irq_data) &
  1911. IRQ_TYPE_EDGE_BOTH)
  1912. cvmx_write_csr(host_data->raw_reg, 1ull << i);
  1913. generic_handle_irq_desc(desc);
  1914. }
  1915. }
  1916. return IRQ_HANDLED;
  1917. }
  1918. static int __init octeon_irq_init_cib(struct device_node *ciu_node,
  1919. struct device_node *parent)
  1920. {
  1921. const __be32 *addr;
  1922. u32 val;
  1923. struct octeon_irq_cib_host_data *host_data;
  1924. int parent_irq;
  1925. int r;
  1926. struct irq_domain *cib_domain;
  1927. parent_irq = irq_of_parse_and_map(ciu_node, 0);
  1928. if (!parent_irq) {
  1929. pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
  1930. ciu_node->name);
  1931. return -EINVAL;
  1932. }
  1933. host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
  1934. if (!host_data)
  1935. return -ENOMEM;
  1936. raw_spin_lock_init(&host_data->lock);
  1937. addr = of_get_address(ciu_node, 0, NULL, NULL);
  1938. if (!addr) {
  1939. pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
  1940. return -EINVAL;
  1941. }
  1942. host_data->raw_reg = (u64)phys_to_virt(
  1943. of_translate_address(ciu_node, addr));
  1944. addr = of_get_address(ciu_node, 1, NULL, NULL);
  1945. if (!addr) {
  1946. pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
  1947. return -EINVAL;
  1948. }
  1949. host_data->en_reg = (u64)phys_to_virt(
  1950. of_translate_address(ciu_node, addr));
  1951. r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
  1952. if (r) {
  1953. pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
  1954. ciu_node->name);
  1955. return r;
  1956. }
  1957. host_data->max_bits = val;
  1958. cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
  1959. &octeon_irq_domain_cib_ops,
  1960. host_data);
  1961. if (!cib_domain) {
  1962. pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
  1963. return -ENOMEM;
  1964. }
  1965. cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
  1966. cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
  1967. r = request_irq(parent_irq, octeon_irq_cib_handler,
  1968. IRQF_NO_THREAD, "cib", cib_domain);
  1969. if (r) {
  1970. pr_err("request_irq cib failed %d\n", r);
  1971. return r;
  1972. }
  1973. pr_info("CIB interrupt controller probed: %llx %d\n",
  1974. host_data->raw_reg, host_data->max_bits);
  1975. return 0;
  1976. }
  1977. int octeon_irq_ciu3_xlat(struct irq_domain *d,
  1978. struct device_node *node,
  1979. const u32 *intspec,
  1980. unsigned int intsize,
  1981. unsigned long *out_hwirq,
  1982. unsigned int *out_type)
  1983. {
  1984. struct octeon_ciu3_info *ciu3_info = d->host_data;
  1985. unsigned int hwirq, type, intsn_major;
  1986. union cvmx_ciu3_iscx_ctl isc;
  1987. if (intsize < 2)
  1988. return -EINVAL;
  1989. hwirq = intspec[0];
  1990. type = intspec[1];
  1991. if (hwirq >= (1 << 20))
  1992. return -EINVAL;
  1993. intsn_major = hwirq >> 12;
  1994. switch (intsn_major) {
  1995. case 0x04: /* Software handled separately. */
  1996. return -EINVAL;
  1997. default:
  1998. break;
  1999. }
  2000. isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
  2001. if (!isc.s.imp)
  2002. return -EINVAL;
  2003. switch (type) {
  2004. case 4: /* official value for level triggering. */
  2005. *out_type = IRQ_TYPE_LEVEL_HIGH;
  2006. break;
  2007. case 0: /* unofficial value, but we might as well let it work. */
  2008. case 1: /* official value for edge triggering. */
  2009. *out_type = IRQ_TYPE_EDGE_RISING;
  2010. break;
  2011. default: /* Nothing else is acceptable. */
  2012. return -EINVAL;
  2013. }
  2014. *out_hwirq = hwirq;
  2015. return 0;
  2016. }
  2017. void octeon_irq_ciu3_enable(struct irq_data *data)
  2018. {
  2019. int cpu;
  2020. union cvmx_ciu3_iscx_ctl isc_ctl;
  2021. union cvmx_ciu3_iscx_w1c isc_w1c;
  2022. u64 isc_ctl_addr;
  2023. struct octeon_ciu_chip_data *cd;
  2024. cpu = next_cpu_for_irq(data);
  2025. cd = irq_data_get_irq_chip_data(data);
  2026. isc_w1c.u64 = 0;
  2027. isc_w1c.s.en = 1;
  2028. cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
  2029. isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
  2030. isc_ctl.u64 = 0;
  2031. isc_ctl.s.en = 1;
  2032. isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
  2033. cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
  2034. cvmx_read_csr(isc_ctl_addr);
  2035. }
  2036. void octeon_irq_ciu3_disable(struct irq_data *data)
  2037. {
  2038. u64 isc_ctl_addr;
  2039. union cvmx_ciu3_iscx_w1c isc_w1c;
  2040. struct octeon_ciu_chip_data *cd;
  2041. cd = irq_data_get_irq_chip_data(data);
  2042. isc_w1c.u64 = 0;
  2043. isc_w1c.s.en = 1;
  2044. isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
  2045. cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
  2046. cvmx_write_csr(isc_ctl_addr, 0);
  2047. cvmx_read_csr(isc_ctl_addr);
  2048. }
  2049. void octeon_irq_ciu3_ack(struct irq_data *data)
  2050. {
  2051. u64 isc_w1c_addr;
  2052. union cvmx_ciu3_iscx_w1c isc_w1c;
  2053. struct octeon_ciu_chip_data *cd;
  2054. u32 trigger_type = irqd_get_trigger_type(data);
  2055. /*
  2056. * We use a single irq_chip, so we have to do nothing to ack a
  2057. * level interrupt.
  2058. */
  2059. if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
  2060. return;
  2061. cd = irq_data_get_irq_chip_data(data);
  2062. isc_w1c.u64 = 0;
  2063. isc_w1c.s.raw = 1;
  2064. isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
  2065. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2066. cvmx_read_csr(isc_w1c_addr);
  2067. }
  2068. void octeon_irq_ciu3_mask(struct irq_data *data)
  2069. {
  2070. union cvmx_ciu3_iscx_w1c isc_w1c;
  2071. u64 isc_w1c_addr;
  2072. struct octeon_ciu_chip_data *cd;
  2073. cd = irq_data_get_irq_chip_data(data);
  2074. isc_w1c.u64 = 0;
  2075. isc_w1c.s.en = 1;
  2076. isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
  2077. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2078. cvmx_read_csr(isc_w1c_addr);
  2079. }
  2080. void octeon_irq_ciu3_mask_ack(struct irq_data *data)
  2081. {
  2082. union cvmx_ciu3_iscx_w1c isc_w1c;
  2083. u64 isc_w1c_addr;
  2084. struct octeon_ciu_chip_data *cd;
  2085. u32 trigger_type = irqd_get_trigger_type(data);
  2086. cd = irq_data_get_irq_chip_data(data);
  2087. isc_w1c.u64 = 0;
  2088. isc_w1c.s.en = 1;
  2089. /*
  2090. * We use a single irq_chip, so only ack an edge (!level)
  2091. * interrupt.
  2092. */
  2093. if (trigger_type & IRQ_TYPE_EDGE_BOTH)
  2094. isc_w1c.s.raw = 1;
  2095. isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
  2096. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2097. cvmx_read_csr(isc_w1c_addr);
  2098. }
  2099. #ifdef CONFIG_SMP
  2100. int octeon_irq_ciu3_set_affinity(struct irq_data *data,
  2101. const struct cpumask *dest, bool force)
  2102. {
  2103. union cvmx_ciu3_iscx_ctl isc_ctl;
  2104. union cvmx_ciu3_iscx_w1c isc_w1c;
  2105. u64 isc_ctl_addr;
  2106. int cpu;
  2107. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  2108. struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
  2109. if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
  2110. return -EINVAL;
  2111. if (!enable_one)
  2112. return IRQ_SET_MASK_OK;
  2113. cd = irq_data_get_irq_chip_data(data);
  2114. cpu = cpumask_first(dest);
  2115. if (cpu >= nr_cpu_ids)
  2116. cpu = smp_processor_id();
  2117. cd->current_cpu = cpu;
  2118. isc_w1c.u64 = 0;
  2119. isc_w1c.s.en = 1;
  2120. cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
  2121. isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
  2122. isc_ctl.u64 = 0;
  2123. isc_ctl.s.en = 1;
  2124. isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
  2125. cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
  2126. cvmx_read_csr(isc_ctl_addr);
  2127. return IRQ_SET_MASK_OK;
  2128. }
  2129. #endif
  2130. static struct irq_chip octeon_irq_chip_ciu3 = {
  2131. .name = "CIU3",
  2132. .irq_startup = edge_startup,
  2133. .irq_enable = octeon_irq_ciu3_enable,
  2134. .irq_disable = octeon_irq_ciu3_disable,
  2135. .irq_ack = octeon_irq_ciu3_ack,
  2136. .irq_mask = octeon_irq_ciu3_mask,
  2137. .irq_mask_ack = octeon_irq_ciu3_mask_ack,
  2138. .irq_unmask = octeon_irq_ciu3_enable,
  2139. .irq_set_type = octeon_irq_ciu_set_type,
  2140. #ifdef CONFIG_SMP
  2141. .irq_set_affinity = octeon_irq_ciu3_set_affinity,
  2142. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  2143. #endif
  2144. };
  2145. int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
  2146. irq_hw_number_t hw, struct irq_chip *chip)
  2147. {
  2148. struct octeon_ciu3_info *ciu3_info = d->host_data;
  2149. struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
  2150. ciu3_info->node);
  2151. if (!cd)
  2152. return -ENOMEM;
  2153. cd->intsn = hw;
  2154. cd->current_cpu = -1;
  2155. cd->ciu3_addr = ciu3_info->ciu3_addr;
  2156. cd->ciu_node = ciu3_info->node;
  2157. irq_set_chip_and_handler(virq, chip, handle_edge_irq);
  2158. irq_set_chip_data(virq, cd);
  2159. return 0;
  2160. }
  2161. static int octeon_irq_ciu3_map(struct irq_domain *d,
  2162. unsigned int virq, irq_hw_number_t hw)
  2163. {
  2164. return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
  2165. }
  2166. static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
  2167. .map = octeon_irq_ciu3_map,
  2168. .unmap = octeon_irq_free_cd,
  2169. .xlate = octeon_irq_ciu3_xlat,
  2170. };
  2171. static void octeon_irq_ciu3_ip2(void)
  2172. {
  2173. union cvmx_ciu3_destx_pp_int dest_pp_int;
  2174. struct octeon_ciu3_info *ciu3_info;
  2175. u64 ciu3_addr;
  2176. ciu3_info = __this_cpu_read(octeon_ciu3_info);
  2177. ciu3_addr = ciu3_info->ciu3_addr;
  2178. dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
  2179. if (likely(dest_pp_int.s.intr)) {
  2180. irq_hw_number_t intsn = dest_pp_int.s.intsn;
  2181. irq_hw_number_t hw;
  2182. struct irq_domain *domain;
  2183. /* Get the domain to use from the major block */
  2184. int block = intsn >> 12;
  2185. int ret;
  2186. domain = ciu3_info->domain[block];
  2187. if (ciu3_info->intsn2hw[block])
  2188. hw = ciu3_info->intsn2hw[block](domain, intsn);
  2189. else
  2190. hw = intsn;
  2191. ret = handle_domain_irq(domain, hw, NULL);
  2192. if (ret < 0) {
  2193. union cvmx_ciu3_iscx_w1c isc_w1c;
  2194. u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
  2195. isc_w1c.u64 = 0;
  2196. isc_w1c.s.en = 1;
  2197. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2198. cvmx_read_csr(isc_w1c_addr);
  2199. spurious_interrupt();
  2200. }
  2201. } else {
  2202. spurious_interrupt();
  2203. }
  2204. }
  2205. /*
  2206. * 10 mbox per core starting from zero.
  2207. * Base mbox is core * 10
  2208. */
  2209. static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
  2210. {
  2211. /* SW (mbox) are 0x04 in bits 12..19 */
  2212. return 0x04000 + CIU3_MBOX_PER_CORE * core;
  2213. }
  2214. static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
  2215. {
  2216. return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
  2217. }
  2218. static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
  2219. {
  2220. int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
  2221. return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
  2222. }
  2223. static void octeon_irq_ciu3_mbox(void)
  2224. {
  2225. union cvmx_ciu3_destx_pp_int dest_pp_int;
  2226. struct octeon_ciu3_info *ciu3_info;
  2227. u64 ciu3_addr;
  2228. int core = cvmx_get_local_core_num();
  2229. ciu3_info = __this_cpu_read(octeon_ciu3_info);
  2230. ciu3_addr = ciu3_info->ciu3_addr;
  2231. dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
  2232. if (likely(dest_pp_int.s.intr)) {
  2233. irq_hw_number_t intsn = dest_pp_int.s.intsn;
  2234. int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
  2235. if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
  2236. do_IRQ(mbox + OCTEON_IRQ_MBOX0);
  2237. } else {
  2238. union cvmx_ciu3_iscx_w1c isc_w1c;
  2239. u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
  2240. isc_w1c.u64 = 0;
  2241. isc_w1c.s.en = 1;
  2242. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2243. cvmx_read_csr(isc_w1c_addr);
  2244. spurious_interrupt();
  2245. }
  2246. } else {
  2247. spurious_interrupt();
  2248. }
  2249. }
  2250. void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
  2251. {
  2252. struct octeon_ciu3_info *ciu3_info;
  2253. unsigned int intsn;
  2254. union cvmx_ciu3_iscx_w1s isc_w1s;
  2255. u64 isc_w1s_addr;
  2256. if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
  2257. return;
  2258. intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
  2259. ciu3_info = per_cpu(octeon_ciu3_info, cpu);
  2260. isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
  2261. isc_w1s.u64 = 0;
  2262. isc_w1s.s.raw = 1;
  2263. cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
  2264. cvmx_read_csr(isc_w1s_addr);
  2265. }
  2266. static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
  2267. {
  2268. struct octeon_ciu3_info *ciu3_info;
  2269. unsigned int intsn;
  2270. u64 isc_ctl_addr, isc_w1c_addr;
  2271. union cvmx_ciu3_iscx_ctl isc_ctl;
  2272. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2273. intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
  2274. ciu3_info = per_cpu(octeon_ciu3_info, cpu);
  2275. isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
  2276. isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
  2277. isc_ctl.u64 = 0;
  2278. isc_ctl.s.en = 1;
  2279. cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
  2280. cvmx_write_csr(isc_ctl_addr, 0);
  2281. if (en) {
  2282. unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
  2283. isc_ctl.u64 = 0;
  2284. isc_ctl.s.en = 1;
  2285. isc_ctl.s.idt = idt;
  2286. cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
  2287. }
  2288. cvmx_read_csr(isc_ctl_addr);
  2289. }
  2290. static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
  2291. {
  2292. int cpu;
  2293. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2294. WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
  2295. for_each_online_cpu(cpu)
  2296. octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
  2297. }
  2298. static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
  2299. {
  2300. int cpu;
  2301. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2302. WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
  2303. for_each_online_cpu(cpu)
  2304. octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
  2305. }
  2306. static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
  2307. {
  2308. struct octeon_ciu3_info *ciu3_info;
  2309. unsigned int intsn;
  2310. u64 isc_w1c_addr;
  2311. union cvmx_ciu3_iscx_w1c isc_w1c;
  2312. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2313. intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
  2314. isc_w1c.u64 = 0;
  2315. isc_w1c.s.raw = 1;
  2316. ciu3_info = __this_cpu_read(octeon_ciu3_info);
  2317. isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
  2318. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2319. cvmx_read_csr(isc_w1c_addr);
  2320. }
  2321. static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
  2322. {
  2323. octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
  2324. }
  2325. static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
  2326. {
  2327. octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
  2328. }
  2329. static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
  2330. {
  2331. u64 b = ciu3_info->ciu3_addr;
  2332. int idt_ip2, idt_ip3, idt_ip4;
  2333. int unused_idt2;
  2334. int core = cvmx_get_local_core_num();
  2335. int i;
  2336. __this_cpu_write(octeon_ciu3_info, ciu3_info);
  2337. /*
  2338. * 4 idt per core starting from 1 because zero is reserved.
  2339. * Base idt per core is 4 * core + 1
  2340. */
  2341. idt_ip2 = core * 4 + 1;
  2342. idt_ip3 = core * 4 + 2;
  2343. idt_ip4 = core * 4 + 3;
  2344. unused_idt2 = core * 4 + 4;
  2345. __this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
  2346. __this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
  2347. /* ip2 interrupts for this CPU */
  2348. cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
  2349. cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
  2350. cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
  2351. /* ip3 interrupts for this CPU */
  2352. cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
  2353. cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
  2354. cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
  2355. /* ip4 interrupts for this CPU */
  2356. cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
  2357. cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
  2358. cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
  2359. cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
  2360. cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
  2361. cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
  2362. for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
  2363. unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
  2364. cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
  2365. cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
  2366. }
  2367. return 0;
  2368. }
  2369. static void octeon_irq_setup_secondary_ciu3(void)
  2370. {
  2371. struct octeon_ciu3_info *ciu3_info;
  2372. ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
  2373. octeon_irq_ciu3_alloc_resources(ciu3_info);
  2374. irq_cpu_online();
  2375. /* Enable the CIU lines */
  2376. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  2377. if (octeon_irq_use_ip4)
  2378. set_c0_status(STATUSF_IP4);
  2379. else
  2380. clear_c0_status(STATUSF_IP4);
  2381. }
  2382. static struct irq_chip octeon_irq_chip_ciu3_mbox = {
  2383. .name = "CIU3-M",
  2384. .irq_enable = octeon_irq_ciu3_mbox_enable,
  2385. .irq_disable = octeon_irq_ciu3_mbox_disable,
  2386. .irq_ack = octeon_irq_ciu3_mbox_ack,
  2387. .irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
  2388. .irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
  2389. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  2390. };
  2391. static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
  2392. struct device_node *parent)
  2393. {
  2394. int i;
  2395. int node;
  2396. struct irq_domain *domain;
  2397. struct octeon_ciu3_info *ciu3_info;
  2398. const __be32 *zero_addr;
  2399. u64 base_addr;
  2400. union cvmx_ciu3_const consts;
  2401. node = 0; /* of_node_to_nid(ciu_node); */
  2402. ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
  2403. if (!ciu3_info)
  2404. return -ENOMEM;
  2405. zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
  2406. if (WARN_ON(!zero_addr))
  2407. return -EINVAL;
  2408. base_addr = of_translate_address(ciu_node, zero_addr);
  2409. base_addr = (u64)phys_to_virt(base_addr);
  2410. ciu3_info->ciu3_addr = base_addr;
  2411. ciu3_info->node = node;
  2412. consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
  2413. octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
  2414. octeon_irq_ip2 = octeon_irq_ciu3_ip2;
  2415. octeon_irq_ip3 = octeon_irq_ciu3_mbox;
  2416. octeon_irq_ip4 = octeon_irq_ip4_mask;
  2417. if (node == cvmx_get_node_num()) {
  2418. /* Mips internal */
  2419. octeon_irq_init_core();
  2420. /* Only do per CPU things if it is the CIU of the boot node. */
  2421. i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
  2422. WARN_ON(i < 0);
  2423. for (i = 0; i < 8; i++)
  2424. irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
  2425. &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
  2426. }
  2427. /*
  2428. * Initialize all domains to use the default domain. Specific major
  2429. * blocks will overwrite the default domain as needed.
  2430. */
  2431. domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
  2432. ciu3_info);
  2433. for (i = 0; i < MAX_CIU3_DOMAINS; i++)
  2434. ciu3_info->domain[i] = domain;
  2435. octeon_ciu3_info_per_node[node] = ciu3_info;
  2436. if (node == cvmx_get_node_num()) {
  2437. /* Only do per CPU things if it is the CIU of the boot node. */
  2438. octeon_irq_ciu3_alloc_resources(ciu3_info);
  2439. if (node == 0)
  2440. irq_set_default_host(domain);
  2441. octeon_irq_use_ip4 = false;
  2442. /* Enable the CIU lines */
  2443. set_c0_status(STATUSF_IP2 | STATUSF_IP3);
  2444. clear_c0_status(STATUSF_IP4);
  2445. }
  2446. return 0;
  2447. }
  2448. static struct of_device_id ciu_types[] __initdata = {
  2449. {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
  2450. {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
  2451. {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
  2452. {.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
  2453. {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
  2454. {}
  2455. };
  2456. void __init arch_init_irq(void)
  2457. {
  2458. #ifdef CONFIG_SMP
  2459. /* Set the default affinity to the boot cpu. */
  2460. cpumask_clear(irq_default_affinity);
  2461. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  2462. #endif
  2463. of_irq_init(ciu_types);
  2464. }
  2465. asmlinkage void plat_irq_dispatch(void)
  2466. {
  2467. unsigned long cop0_cause;
  2468. unsigned long cop0_status;
  2469. while (1) {
  2470. cop0_cause = read_c0_cause();
  2471. cop0_status = read_c0_status();
  2472. cop0_cause &= cop0_status;
  2473. cop0_cause &= ST0_IM;
  2474. if (cop0_cause & STATUSF_IP2)
  2475. octeon_irq_ip2();
  2476. else if (cop0_cause & STATUSF_IP3)
  2477. octeon_irq_ip3();
  2478. else if (cop0_cause & STATUSF_IP4)
  2479. octeon_irq_ip4();
  2480. else if (cop0_cause)
  2481. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  2482. else
  2483. break;
  2484. }
  2485. }
  2486. #ifdef CONFIG_HOTPLUG_CPU
  2487. void octeon_fixup_irqs(void)
  2488. {
  2489. irq_cpu_offline();
  2490. }
  2491. #endif /* CONFIG_HOTPLUG_CPU */