irq.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7. * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/module.h>
  13. #include <linux/irq.h>
  14. #include <linux/spinlock.h>
  15. #include <asm/irq_cpu.h>
  16. #include <asm/mipsregs.h>
  17. #include <bcm63xx_cpu.h>
  18. #include <bcm63xx_regs.h>
  19. #include <bcm63xx_io.h>
  20. #include <bcm63xx_irq.h>
  21. static DEFINE_SPINLOCK(ipic_lock);
  22. static DEFINE_SPINLOCK(epic_lock);
  23. static u32 irq_stat_addr[2];
  24. static u32 irq_mask_addr[2];
  25. static void (*dispatch_internal)(int cpu);
  26. static int is_ext_irq_cascaded;
  27. static unsigned int ext_irq_count;
  28. static unsigned int ext_irq_start, ext_irq_end;
  29. static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
  30. static void (*internal_irq_mask)(struct irq_data *d);
  31. static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
  32. static inline u32 get_ext_irq_perf_reg(int irq)
  33. {
  34. if (irq < 4)
  35. return ext_irq_cfg_reg1;
  36. return ext_irq_cfg_reg2;
  37. }
  38. static inline void handle_internal(int intbit)
  39. {
  40. if (is_ext_irq_cascaded &&
  41. intbit >= ext_irq_start && intbit <= ext_irq_end)
  42. do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
  43. else
  44. do_IRQ(intbit + IRQ_INTERNAL_BASE);
  45. }
  46. static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
  47. const struct cpumask *m)
  48. {
  49. bool enable = cpu_online(cpu);
  50. #ifdef CONFIG_SMP
  51. if (m)
  52. enable &= cpumask_test_cpu(cpu, m);
  53. else if (irqd_affinity_was_set(d))
  54. enable &= cpumask_test_cpu(cpu, d->affinity);
  55. #endif
  56. return enable;
  57. }
  58. /*
  59. * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
  60. * prioritize any interrupt relatively to another. the static counter
  61. * will resume the loop where it ended the last time we left this
  62. * function.
  63. */
  64. #define BUILD_IPIC_INTERNAL(width) \
  65. void __dispatch_internal_##width(int cpu) \
  66. { \
  67. u32 pending[width / 32]; \
  68. unsigned int src, tgt; \
  69. bool irqs_pending = false; \
  70. static unsigned int i[2]; \
  71. unsigned int *next = &i[cpu]; \
  72. unsigned long flags; \
  73. \
  74. /* read registers in reverse order */ \
  75. spin_lock_irqsave(&ipic_lock, flags); \
  76. for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
  77. u32 val; \
  78. \
  79. val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
  80. val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
  81. pending[--tgt] = val; \
  82. \
  83. if (val) \
  84. irqs_pending = true; \
  85. } \
  86. spin_unlock_irqrestore(&ipic_lock, flags); \
  87. \
  88. if (!irqs_pending) \
  89. return; \
  90. \
  91. while (1) { \
  92. unsigned int to_call = *next; \
  93. \
  94. *next = (*next + 1) & (width - 1); \
  95. if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
  96. handle_internal(to_call); \
  97. break; \
  98. } \
  99. } \
  100. } \
  101. \
  102. static void __internal_irq_mask_##width(struct irq_data *d) \
  103. { \
  104. u32 val; \
  105. unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
  106. unsigned reg = (irq / 32) ^ (width/32 - 1); \
  107. unsigned bit = irq & 0x1f; \
  108. unsigned long flags; \
  109. int cpu; \
  110. \
  111. spin_lock_irqsave(&ipic_lock, flags); \
  112. for_each_present_cpu(cpu) { \
  113. if (!irq_mask_addr[cpu]) \
  114. break; \
  115. \
  116. val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
  117. val &= ~(1 << bit); \
  118. bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
  119. } \
  120. spin_unlock_irqrestore(&ipic_lock, flags); \
  121. } \
  122. \
  123. static void __internal_irq_unmask_##width(struct irq_data *d, \
  124. const struct cpumask *m) \
  125. { \
  126. u32 val; \
  127. unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
  128. unsigned reg = (irq / 32) ^ (width/32 - 1); \
  129. unsigned bit = irq & 0x1f; \
  130. unsigned long flags; \
  131. int cpu; \
  132. \
  133. spin_lock_irqsave(&ipic_lock, flags); \
  134. for_each_present_cpu(cpu) { \
  135. if (!irq_mask_addr[cpu]) \
  136. break; \
  137. \
  138. val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
  139. if (enable_irq_for_cpu(cpu, d, m)) \
  140. val |= (1 << bit); \
  141. else \
  142. val &= ~(1 << bit); \
  143. bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
  144. } \
  145. spin_unlock_irqrestore(&ipic_lock, flags); \
  146. }
  147. BUILD_IPIC_INTERNAL(32);
  148. BUILD_IPIC_INTERNAL(64);
  149. asmlinkage void plat_irq_dispatch(void)
  150. {
  151. u32 cause;
  152. do {
  153. cause = read_c0_cause() & read_c0_status() & ST0_IM;
  154. if (!cause)
  155. break;
  156. if (cause & CAUSEF_IP7)
  157. do_IRQ(7);
  158. if (cause & CAUSEF_IP0)
  159. do_IRQ(0);
  160. if (cause & CAUSEF_IP1)
  161. do_IRQ(1);
  162. if (cause & CAUSEF_IP2)
  163. dispatch_internal(0);
  164. if (is_ext_irq_cascaded) {
  165. if (cause & CAUSEF_IP3)
  166. dispatch_internal(1);
  167. } else {
  168. if (cause & CAUSEF_IP3)
  169. do_IRQ(IRQ_EXT_0);
  170. if (cause & CAUSEF_IP4)
  171. do_IRQ(IRQ_EXT_1);
  172. if (cause & CAUSEF_IP5)
  173. do_IRQ(IRQ_EXT_2);
  174. if (cause & CAUSEF_IP6)
  175. do_IRQ(IRQ_EXT_3);
  176. }
  177. } while (1);
  178. }
  179. /*
  180. * internal IRQs operations: only mask/unmask on PERF irq mask
  181. * register.
  182. */
  183. static void bcm63xx_internal_irq_mask(struct irq_data *d)
  184. {
  185. internal_irq_mask(d);
  186. }
  187. static void bcm63xx_internal_irq_unmask(struct irq_data *d)
  188. {
  189. internal_irq_unmask(d, NULL);
  190. }
  191. /*
  192. * external IRQs operations: mask/unmask and clear on PERF external
  193. * irq control register.
  194. */
  195. static void bcm63xx_external_irq_mask(struct irq_data *d)
  196. {
  197. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  198. u32 reg, regaddr;
  199. unsigned long flags;
  200. regaddr = get_ext_irq_perf_reg(irq);
  201. spin_lock_irqsave(&epic_lock, flags);
  202. reg = bcm_perf_readl(regaddr);
  203. if (BCMCPU_IS_6348())
  204. reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
  205. else
  206. reg &= ~EXTIRQ_CFG_MASK(irq % 4);
  207. bcm_perf_writel(reg, regaddr);
  208. spin_unlock_irqrestore(&epic_lock, flags);
  209. if (is_ext_irq_cascaded)
  210. internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
  211. }
  212. static void bcm63xx_external_irq_unmask(struct irq_data *d)
  213. {
  214. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  215. u32 reg, regaddr;
  216. unsigned long flags;
  217. regaddr = get_ext_irq_perf_reg(irq);
  218. spin_lock_irqsave(&epic_lock, flags);
  219. reg = bcm_perf_readl(regaddr);
  220. if (BCMCPU_IS_6348())
  221. reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
  222. else
  223. reg |= EXTIRQ_CFG_MASK(irq % 4);
  224. bcm_perf_writel(reg, regaddr);
  225. spin_unlock_irqrestore(&epic_lock, flags);
  226. if (is_ext_irq_cascaded)
  227. internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
  228. NULL);
  229. }
  230. static void bcm63xx_external_irq_clear(struct irq_data *d)
  231. {
  232. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  233. u32 reg, regaddr;
  234. unsigned long flags;
  235. regaddr = get_ext_irq_perf_reg(irq);
  236. spin_lock_irqsave(&epic_lock, flags);
  237. reg = bcm_perf_readl(regaddr);
  238. if (BCMCPU_IS_6348())
  239. reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
  240. else
  241. reg |= EXTIRQ_CFG_CLEAR(irq % 4);
  242. bcm_perf_writel(reg, regaddr);
  243. spin_unlock_irqrestore(&epic_lock, flags);
  244. }
  245. static int bcm63xx_external_irq_set_type(struct irq_data *d,
  246. unsigned int flow_type)
  247. {
  248. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  249. u32 reg, regaddr;
  250. int levelsense, sense, bothedge;
  251. unsigned long flags;
  252. flow_type &= IRQ_TYPE_SENSE_MASK;
  253. if (flow_type == IRQ_TYPE_NONE)
  254. flow_type = IRQ_TYPE_LEVEL_LOW;
  255. levelsense = sense = bothedge = 0;
  256. switch (flow_type) {
  257. case IRQ_TYPE_EDGE_BOTH:
  258. bothedge = 1;
  259. break;
  260. case IRQ_TYPE_EDGE_RISING:
  261. sense = 1;
  262. break;
  263. case IRQ_TYPE_EDGE_FALLING:
  264. break;
  265. case IRQ_TYPE_LEVEL_HIGH:
  266. levelsense = 1;
  267. sense = 1;
  268. break;
  269. case IRQ_TYPE_LEVEL_LOW:
  270. levelsense = 1;
  271. break;
  272. default:
  273. printk(KERN_ERR "bogus flow type combination given !\n");
  274. return -EINVAL;
  275. }
  276. regaddr = get_ext_irq_perf_reg(irq);
  277. spin_lock_irqsave(&epic_lock, flags);
  278. reg = bcm_perf_readl(regaddr);
  279. irq %= 4;
  280. switch (bcm63xx_get_cpu_id()) {
  281. case BCM6348_CPU_ID:
  282. if (levelsense)
  283. reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
  284. else
  285. reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
  286. if (sense)
  287. reg |= EXTIRQ_CFG_SENSE_6348(irq);
  288. else
  289. reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
  290. if (bothedge)
  291. reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
  292. else
  293. reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
  294. break;
  295. case BCM3368_CPU_ID:
  296. case BCM6328_CPU_ID:
  297. case BCM6338_CPU_ID:
  298. case BCM6345_CPU_ID:
  299. case BCM6358_CPU_ID:
  300. case BCM6362_CPU_ID:
  301. case BCM6368_CPU_ID:
  302. if (levelsense)
  303. reg |= EXTIRQ_CFG_LEVELSENSE(irq);
  304. else
  305. reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
  306. if (sense)
  307. reg |= EXTIRQ_CFG_SENSE(irq);
  308. else
  309. reg &= ~EXTIRQ_CFG_SENSE(irq);
  310. if (bothedge)
  311. reg |= EXTIRQ_CFG_BOTHEDGE(irq);
  312. else
  313. reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
  314. break;
  315. default:
  316. BUG();
  317. }
  318. bcm_perf_writel(reg, regaddr);
  319. spin_unlock_irqrestore(&epic_lock, flags);
  320. irqd_set_trigger_type(d, flow_type);
  321. if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
  322. __irq_set_handler_locked(d->irq, handle_level_irq);
  323. else
  324. __irq_set_handler_locked(d->irq, handle_edge_irq);
  325. return IRQ_SET_MASK_OK_NOCOPY;
  326. }
  327. #ifdef CONFIG_SMP
  328. static int bcm63xx_internal_set_affinity(struct irq_data *data,
  329. const struct cpumask *dest,
  330. bool force)
  331. {
  332. if (!irqd_irq_disabled(data))
  333. internal_irq_unmask(data, dest);
  334. return 0;
  335. }
  336. #endif
  337. static struct irq_chip bcm63xx_internal_irq_chip = {
  338. .name = "bcm63xx_ipic",
  339. .irq_mask = bcm63xx_internal_irq_mask,
  340. .irq_unmask = bcm63xx_internal_irq_unmask,
  341. };
  342. static struct irq_chip bcm63xx_external_irq_chip = {
  343. .name = "bcm63xx_epic",
  344. .irq_ack = bcm63xx_external_irq_clear,
  345. .irq_mask = bcm63xx_external_irq_mask,
  346. .irq_unmask = bcm63xx_external_irq_unmask,
  347. .irq_set_type = bcm63xx_external_irq_set_type,
  348. };
  349. static struct irqaction cpu_ip2_cascade_action = {
  350. .handler = no_action,
  351. .name = "cascade_ip2",
  352. .flags = IRQF_NO_THREAD,
  353. };
  354. #ifdef CONFIG_SMP
  355. static struct irqaction cpu_ip3_cascade_action = {
  356. .handler = no_action,
  357. .name = "cascade_ip3",
  358. .flags = IRQF_NO_THREAD,
  359. };
  360. #endif
  361. static struct irqaction cpu_ext_cascade_action = {
  362. .handler = no_action,
  363. .name = "cascade_extirq",
  364. .flags = IRQF_NO_THREAD,
  365. };
  366. static void bcm63xx_init_irq(void)
  367. {
  368. int irq_bits;
  369. irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
  370. irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
  371. irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
  372. irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
  373. switch (bcm63xx_get_cpu_id()) {
  374. case BCM3368_CPU_ID:
  375. irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
  376. irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
  377. irq_stat_addr[1] = 0;
  378. irq_mask_addr[1] = 0;
  379. irq_bits = 32;
  380. ext_irq_count = 4;
  381. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
  382. break;
  383. case BCM6328_CPU_ID:
  384. irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
  385. irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
  386. irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
  387. irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
  388. irq_bits = 64;
  389. ext_irq_count = 4;
  390. is_ext_irq_cascaded = 1;
  391. ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  392. ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
  393. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
  394. break;
  395. case BCM6338_CPU_ID:
  396. irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
  397. irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
  398. irq_stat_addr[1] = 0;
  399. irq_mask_addr[1] = 0;
  400. irq_bits = 32;
  401. ext_irq_count = 4;
  402. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
  403. break;
  404. case BCM6345_CPU_ID:
  405. irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
  406. irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
  407. irq_stat_addr[1] = 0;
  408. irq_mask_addr[1] = 0;
  409. irq_bits = 32;
  410. ext_irq_count = 4;
  411. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
  412. break;
  413. case BCM6348_CPU_ID:
  414. irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
  415. irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
  416. irq_stat_addr[1] = 0;
  417. irq_mask_addr[1] = 0;
  418. irq_bits = 32;
  419. ext_irq_count = 4;
  420. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
  421. break;
  422. case BCM6358_CPU_ID:
  423. irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
  424. irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
  425. irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
  426. irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
  427. irq_bits = 32;
  428. ext_irq_count = 4;
  429. is_ext_irq_cascaded = 1;
  430. ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  431. ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
  432. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
  433. break;
  434. case BCM6362_CPU_ID:
  435. irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
  436. irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
  437. irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
  438. irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
  439. irq_bits = 64;
  440. ext_irq_count = 4;
  441. is_ext_irq_cascaded = 1;
  442. ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  443. ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
  444. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
  445. break;
  446. case BCM6368_CPU_ID:
  447. irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
  448. irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
  449. irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
  450. irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
  451. irq_bits = 64;
  452. ext_irq_count = 6;
  453. is_ext_irq_cascaded = 1;
  454. ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  455. ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
  456. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
  457. ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
  458. break;
  459. default:
  460. BUG();
  461. }
  462. if (irq_bits == 32) {
  463. dispatch_internal = __dispatch_internal_32;
  464. internal_irq_mask = __internal_irq_mask_32;
  465. internal_irq_unmask = __internal_irq_unmask_32;
  466. } else {
  467. dispatch_internal = __dispatch_internal_64;
  468. internal_irq_mask = __internal_irq_mask_64;
  469. internal_irq_unmask = __internal_irq_unmask_64;
  470. }
  471. }
  472. void __init arch_init_irq(void)
  473. {
  474. int i;
  475. bcm63xx_init_irq();
  476. mips_cpu_irq_init();
  477. for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
  478. irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
  479. handle_level_irq);
  480. for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
  481. irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
  482. handle_edge_irq);
  483. if (!is_ext_irq_cascaded) {
  484. for (i = 3; i < 3 + ext_irq_count; ++i)
  485. setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
  486. }
  487. setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
  488. #ifdef CONFIG_SMP
  489. if (is_ext_irq_cascaded) {
  490. setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
  491. bcm63xx_internal_irq_chip.irq_set_affinity =
  492. bcm63xx_internal_set_affinity;
  493. cpumask_clear(irq_default_affinity);
  494. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  495. }
  496. #endif
  497. }