ixp4xx_qmgr.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Intel IXP4xx Queue Manager driver for Linux
  3. *
  4. * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. */
  10. #include <linux/ioport.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <mach/qmgr.h>
  15. struct qmgr_regs __iomem *qmgr_regs;
  16. static struct resource *mem_res;
  17. static spinlock_t qmgr_lock;
  18. static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
  19. static void (*irq_handlers[QUEUES])(void *pdev);
  20. static void *irq_pdevs[QUEUES];
  21. #if DEBUG_QMGR
  22. char qmgr_queue_descs[QUEUES][32];
  23. #endif
  24. void qmgr_set_irq(unsigned int queue, int src,
  25. void (*handler)(void *pdev), void *pdev)
  26. {
  27. unsigned long flags;
  28. spin_lock_irqsave(&qmgr_lock, flags);
  29. if (queue < HALF_QUEUES) {
  30. u32 __iomem *reg;
  31. int bit;
  32. BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
  33. reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
  34. bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
  35. __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
  36. reg);
  37. } else
  38. /* IRQ source for queues 32-63 is fixed */
  39. BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
  40. irq_handlers[queue] = handler;
  41. irq_pdevs[queue] = pdev;
  42. spin_unlock_irqrestore(&qmgr_lock, flags);
  43. }
  44. static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
  45. {
  46. int i, ret = 0;
  47. u32 en_bitmap, src, stat;
  48. /* ACK - it may clear any bits so don't rely on it */
  49. __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
  50. en_bitmap = qmgr_regs->irqen[0];
  51. while (en_bitmap) {
  52. i = __fls(en_bitmap); /* number of the last "low" queue */
  53. en_bitmap &= ~BIT(i);
  54. src = qmgr_regs->irqsrc[i >> 3];
  55. stat = qmgr_regs->stat1[i >> 3];
  56. if (src & 4) /* the IRQ condition is inverted */
  57. stat = ~stat;
  58. if (stat & BIT(src & 3)) {
  59. irq_handlers[i](irq_pdevs[i]);
  60. ret = IRQ_HANDLED;
  61. }
  62. }
  63. return ret;
  64. }
  65. static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
  66. {
  67. int i, ret = 0;
  68. u32 req_bitmap;
  69. /* ACK - it may clear any bits so don't rely on it */
  70. __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
  71. req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
  72. while (req_bitmap) {
  73. i = __fls(req_bitmap); /* number of the last "high" queue */
  74. req_bitmap &= ~BIT(i);
  75. irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
  76. ret = IRQ_HANDLED;
  77. }
  78. return ret;
  79. }
  80. static irqreturn_t qmgr_irq(int irq, void *pdev)
  81. {
  82. int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
  83. u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
  84. if (!req_bitmap)
  85. return 0;
  86. __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
  87. while (req_bitmap) {
  88. i = __fls(req_bitmap); /* number of the last queue */
  89. req_bitmap &= ~BIT(i);
  90. i += half * HALF_QUEUES;
  91. irq_handlers[i](irq_pdevs[i]);
  92. }
  93. return IRQ_HANDLED;
  94. }
  95. void qmgr_enable_irq(unsigned int queue)
  96. {
  97. unsigned long flags;
  98. int half = queue / 32;
  99. u32 mask = 1 << (queue & (HALF_QUEUES - 1));
  100. spin_lock_irqsave(&qmgr_lock, flags);
  101. __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
  102. &qmgr_regs->irqen[half]);
  103. spin_unlock_irqrestore(&qmgr_lock, flags);
  104. }
  105. void qmgr_disable_irq(unsigned int queue)
  106. {
  107. unsigned long flags;
  108. int half = queue / 32;
  109. u32 mask = 1 << (queue & (HALF_QUEUES - 1));
  110. spin_lock_irqsave(&qmgr_lock, flags);
  111. __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
  112. &qmgr_regs->irqen[half]);
  113. __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
  114. spin_unlock_irqrestore(&qmgr_lock, flags);
  115. }
  116. static inline void shift_mask(u32 *mask)
  117. {
  118. mask[3] = mask[3] << 1 | mask[2] >> 31;
  119. mask[2] = mask[2] << 1 | mask[1] >> 31;
  120. mask[1] = mask[1] << 1 | mask[0] >> 31;
  121. mask[0] <<= 1;
  122. }
  123. #if DEBUG_QMGR
  124. int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
  125. unsigned int nearly_empty_watermark,
  126. unsigned int nearly_full_watermark,
  127. const char *desc_format, const char* name)
  128. #else
  129. int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
  130. unsigned int nearly_empty_watermark,
  131. unsigned int nearly_full_watermark)
  132. #endif
  133. {
  134. u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
  135. int err;
  136. BUG_ON(queue >= QUEUES);
  137. if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
  138. return -EINVAL;
  139. switch (len) {
  140. case 16:
  141. cfg = 0 << 24;
  142. mask[0] = 0x1;
  143. break;
  144. case 32:
  145. cfg = 1 << 24;
  146. mask[0] = 0x3;
  147. break;
  148. case 64:
  149. cfg = 2 << 24;
  150. mask[0] = 0xF;
  151. break;
  152. case 128:
  153. cfg = 3 << 24;
  154. mask[0] = 0xFF;
  155. break;
  156. default:
  157. return -EINVAL;
  158. }
  159. cfg |= nearly_empty_watermark << 26;
  160. cfg |= nearly_full_watermark << 29;
  161. len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
  162. mask[1] = mask[2] = mask[3] = 0;
  163. if (!try_module_get(THIS_MODULE))
  164. return -ENODEV;
  165. spin_lock_irq(&qmgr_lock);
  166. if (__raw_readl(&qmgr_regs->sram[queue])) {
  167. err = -EBUSY;
  168. goto err;
  169. }
  170. while (1) {
  171. if (!(used_sram_bitmap[0] & mask[0]) &&
  172. !(used_sram_bitmap[1] & mask[1]) &&
  173. !(used_sram_bitmap[2] & mask[2]) &&
  174. !(used_sram_bitmap[3] & mask[3]))
  175. break; /* found free space */
  176. addr++;
  177. shift_mask(mask);
  178. if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
  179. printk(KERN_ERR "qmgr: no free SRAM space for"
  180. " queue %i\n", queue);
  181. err = -ENOMEM;
  182. goto err;
  183. }
  184. }
  185. used_sram_bitmap[0] |= mask[0];
  186. used_sram_bitmap[1] |= mask[1];
  187. used_sram_bitmap[2] |= mask[2];
  188. used_sram_bitmap[3] |= mask[3];
  189. __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
  190. #if DEBUG_QMGR
  191. snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
  192. desc_format, name);
  193. printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
  194. qmgr_queue_descs[queue], queue, addr);
  195. #endif
  196. spin_unlock_irq(&qmgr_lock);
  197. return 0;
  198. err:
  199. spin_unlock_irq(&qmgr_lock);
  200. module_put(THIS_MODULE);
  201. return err;
  202. }
  203. void qmgr_release_queue(unsigned int queue)
  204. {
  205. u32 cfg, addr, mask[4];
  206. BUG_ON(queue >= QUEUES); /* not in valid range */
  207. spin_lock_irq(&qmgr_lock);
  208. cfg = __raw_readl(&qmgr_regs->sram[queue]);
  209. addr = (cfg >> 14) & 0xFF;
  210. BUG_ON(!addr); /* not requested */
  211. switch ((cfg >> 24) & 3) {
  212. case 0: mask[0] = 0x1; break;
  213. case 1: mask[0] = 0x3; break;
  214. case 2: mask[0] = 0xF; break;
  215. case 3: mask[0] = 0xFF; break;
  216. }
  217. mask[1] = mask[2] = mask[3] = 0;
  218. while (addr--)
  219. shift_mask(mask);
  220. #if DEBUG_QMGR
  221. printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
  222. qmgr_queue_descs[queue], queue);
  223. qmgr_queue_descs[queue][0] = '\x0';
  224. #endif
  225. while ((addr = qmgr_get_entry(queue)))
  226. printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
  227. queue, addr);
  228. __raw_writel(0, &qmgr_regs->sram[queue]);
  229. used_sram_bitmap[0] &= ~mask[0];
  230. used_sram_bitmap[1] &= ~mask[1];
  231. used_sram_bitmap[2] &= ~mask[2];
  232. used_sram_bitmap[3] &= ~mask[3];
  233. irq_handlers[queue] = NULL; /* catch IRQ bugs */
  234. spin_unlock_irq(&qmgr_lock);
  235. module_put(THIS_MODULE);
  236. }
  237. static int qmgr_init(void)
  238. {
  239. int i, err;
  240. irq_handler_t handler1, handler2;
  241. mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
  242. IXP4XX_QMGR_REGION_SIZE,
  243. "IXP4xx Queue Manager");
  244. if (mem_res == NULL)
  245. return -EBUSY;
  246. qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
  247. if (qmgr_regs == NULL) {
  248. err = -ENOMEM;
  249. goto error_map;
  250. }
  251. /* reset qmgr registers */
  252. for (i = 0; i < 4; i++) {
  253. __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
  254. __raw_writel(0, &qmgr_regs->irqsrc[i]);
  255. }
  256. for (i = 0; i < 2; i++) {
  257. __raw_writel(0, &qmgr_regs->stat2[i]);
  258. __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
  259. __raw_writel(0, &qmgr_regs->irqen[i]);
  260. }
  261. __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
  262. __raw_writel(0, &qmgr_regs->statf_h);
  263. for (i = 0; i < QUEUES; i++)
  264. __raw_writel(0, &qmgr_regs->sram[i]);
  265. if (cpu_is_ixp42x_rev_a0()) {
  266. handler1 = qmgr_irq1_a0;
  267. handler2 = qmgr_irq2_a0;
  268. } else
  269. handler1 = handler2 = qmgr_irq;
  270. err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager",
  271. NULL);
  272. if (err) {
  273. printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
  274. IRQ_IXP4XX_QM1, err);
  275. goto error_irq;
  276. }
  277. err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager",
  278. NULL);
  279. if (err) {
  280. printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
  281. IRQ_IXP4XX_QM2, err);
  282. goto error_irq2;
  283. }
  284. used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
  285. spin_lock_init(&qmgr_lock);
  286. printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
  287. return 0;
  288. error_irq2:
  289. free_irq(IRQ_IXP4XX_QM1, NULL);
  290. error_irq:
  291. iounmap(qmgr_regs);
  292. error_map:
  293. release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
  294. return err;
  295. }
  296. static void qmgr_remove(void)
  297. {
  298. free_irq(IRQ_IXP4XX_QM1, NULL);
  299. free_irq(IRQ_IXP4XX_QM2, NULL);
  300. synchronize_irq(IRQ_IXP4XX_QM1);
  301. synchronize_irq(IRQ_IXP4XX_QM2);
  302. iounmap(qmgr_regs);
  303. release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
  304. }
  305. module_init(qmgr_init);
  306. module_exit(qmgr_remove);
  307. MODULE_LICENSE("GPL v2");
  308. MODULE_AUTHOR("Krzysztof Halasa");
  309. EXPORT_SYMBOL(qmgr_regs);
  310. EXPORT_SYMBOL(qmgr_set_irq);
  311. EXPORT_SYMBOL(qmgr_enable_irq);
  312. EXPORT_SYMBOL(qmgr_disable_irq);
  313. #if DEBUG_QMGR
  314. EXPORT_SYMBOL(qmgr_queue_descs);
  315. EXPORT_SYMBOL(qmgr_request_queue);
  316. #else
  317. EXPORT_SYMBOL(__qmgr_request_queue);
  318. #endif
  319. EXPORT_SYMBOL(qmgr_release_queue);