ipz_pt_fn.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * internal queue handling
  5. *
  6. * Authors: Waleri Fomin <fomin@de.ibm.com>
  7. * Reinhard Ernst <rernst@de.ibm.com>
  8. * Christoph Raisch <raisch@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  13. * BSD.
  14. *
  15. * OpenIB BSD License
  16. *
  17. * Redistribution and use in source and binary forms, with or without
  18. * modification, are permitted provided that the following conditions are met:
  19. *
  20. * Redistributions of source code must retain the above copyright notice, this
  21. * list of conditions and the following disclaimer.
  22. *
  23. * Redistributions in binary form must reproduce the above copyright notice,
  24. * this list of conditions and the following disclaimer in the documentation
  25. * and/or other materials
  26. * provided with the distribution.
  27. *
  28. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  29. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  30. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  31. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  32. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  33. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  34. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  35. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  36. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  37. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  38. * POSSIBILITY OF SUCH DAMAGE.
  39. */
  40. #include <linux/slab.h>
  41. #include "ehca_tools.h"
  42. #include "ipz_pt_fn.h"
  43. #include "ehca_classes.h"
  44. #define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
  45. struct kmem_cache *small_qp_cache;
  46. void *ipz_qpageit_get_inc(struct ipz_queue *queue)
  47. {
  48. void *ret = ipz_qeit_get(queue);
  49. queue->current_q_offset += queue->pagesize;
  50. if (queue->current_q_offset > queue->queue_length) {
  51. queue->current_q_offset -= queue->pagesize;
  52. ret = NULL;
  53. }
  54. if (((u64)ret) % queue->pagesize) {
  55. ehca_gen_err("ERROR!! not at PAGE-Boundary");
  56. return NULL;
  57. }
  58. return ret;
  59. }
  60. void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
  61. {
  62. void *ret = ipz_qeit_get(queue);
  63. u64 last_entry_in_q = queue->queue_length - queue->qe_size;
  64. queue->current_q_offset += queue->qe_size;
  65. if (queue->current_q_offset > last_entry_in_q) {
  66. queue->current_q_offset = 0;
  67. queue->toggle_state = (~queue->toggle_state) & 1;
  68. }
  69. return ret;
  70. }
  71. int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
  72. {
  73. int i;
  74. for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
  75. u64 page = __pa(queue->queue_pages[i]);
  76. if (addr >= page && addr < page + queue->pagesize) {
  77. *q_offset = addr - page + i * queue->pagesize;
  78. return 0;
  79. }
  80. }
  81. return -EINVAL;
  82. }
  83. #if PAGE_SHIFT < EHCA_PAGESHIFT
  84. #error Kernel pages must be at least as large than eHCA pages (4K) !
  85. #endif
  86. /*
  87. * allocate pages for queue:
  88. * outer loop allocates whole kernel pages (page aligned) and
  89. * inner loop divides a kernel page into smaller hca queue pages
  90. */
  91. static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
  92. {
  93. int k, f = 0;
  94. u8 *kpage;
  95. while (f < nr_of_pages) {
  96. kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
  97. if (!kpage)
  98. goto out;
  99. for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
  100. queue->queue_pages[f] = (struct ipz_page *)kpage;
  101. kpage += EHCA_PAGESIZE;
  102. f++;
  103. }
  104. }
  105. return 1;
  106. out:
  107. for (f = 0; f < nr_of_pages && queue->queue_pages[f];
  108. f += PAGES_PER_KPAGE)
  109. free_page((unsigned long)(queue->queue_pages)[f]);
  110. return 0;
  111. }
  112. static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
  113. {
  114. int order = ilog2(queue->pagesize) - 9;
  115. struct ipz_small_queue_page *page;
  116. unsigned long bit;
  117. mutex_lock(&pd->lock);
  118. if (!list_empty(&pd->free[order]))
  119. page = list_entry(pd->free[order].next,
  120. struct ipz_small_queue_page, list);
  121. else {
  122. page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
  123. if (!page)
  124. goto out;
  125. page->page = get_zeroed_page(GFP_KERNEL);
  126. if (!page->page) {
  127. kmem_cache_free(small_qp_cache, page);
  128. goto out;
  129. }
  130. list_add(&page->list, &pd->free[order]);
  131. }
  132. bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
  133. __set_bit(bit, page->bitmap);
  134. page->fill++;
  135. if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
  136. list_move(&page->list, &pd->full[order]);
  137. mutex_unlock(&pd->lock);
  138. queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
  139. queue->small_page = page;
  140. queue->offset = bit << (order + 9);
  141. return 1;
  142. out:
  143. ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
  144. mutex_unlock(&pd->lock);
  145. return 0;
  146. }
  147. static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
  148. {
  149. int order = ilog2(queue->pagesize) - 9;
  150. struct ipz_small_queue_page *page = queue->small_page;
  151. unsigned long bit;
  152. int free_page = 0;
  153. bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
  154. >> (order + 9);
  155. mutex_lock(&pd->lock);
  156. __clear_bit(bit, page->bitmap);
  157. page->fill--;
  158. if (page->fill == 0) {
  159. list_del(&page->list);
  160. free_page = 1;
  161. }
  162. if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
  163. /* the page was full until we freed the chunk */
  164. list_move_tail(&page->list, &pd->free[order]);
  165. mutex_unlock(&pd->lock);
  166. if (free_page) {
  167. free_page(page->page);
  168. kmem_cache_free(small_qp_cache, page);
  169. }
  170. }
  171. int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
  172. const u32 nr_of_pages, const u32 pagesize,
  173. const u32 qe_size, const u32 nr_of_sg,
  174. int is_small)
  175. {
  176. if (pagesize > PAGE_SIZE) {
  177. ehca_gen_err("FATAL ERROR: pagesize=%x "
  178. "is greater than kernel page size", pagesize);
  179. return 0;
  180. }
  181. /* init queue fields */
  182. queue->queue_length = nr_of_pages * pagesize;
  183. queue->pagesize = pagesize;
  184. queue->qe_size = qe_size;
  185. queue->act_nr_of_sg = nr_of_sg;
  186. queue->current_q_offset = 0;
  187. queue->toggle_state = 1;
  188. queue->small_page = NULL;
  189. /* allocate queue page pointers */
  190. queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
  191. GFP_KERNEL | __GFP_NOWARN);
  192. if (!queue->queue_pages) {
  193. queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
  194. if (!queue->queue_pages) {
  195. ehca_gen_err("Couldn't allocate queue page list");
  196. return 0;
  197. }
  198. }
  199. /* allocate actual queue pages */
  200. if (is_small) {
  201. if (!alloc_small_queue_page(queue, pd))
  202. goto ipz_queue_ctor_exit0;
  203. } else
  204. if (!alloc_queue_pages(queue, nr_of_pages))
  205. goto ipz_queue_ctor_exit0;
  206. return 1;
  207. ipz_queue_ctor_exit0:
  208. ehca_gen_err("Couldn't alloc pages queue=%p "
  209. "nr_of_pages=%x", queue, nr_of_pages);
  210. kvfree(queue->queue_pages);
  211. return 0;
  212. }
  213. int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
  214. {
  215. int i, nr_pages;
  216. if (!queue || !queue->queue_pages) {
  217. ehca_gen_dbg("queue or queue_pages is NULL");
  218. return 0;
  219. }
  220. if (queue->small_page)
  221. free_small_queue_page(queue, pd);
  222. else {
  223. nr_pages = queue->queue_length / queue->pagesize;
  224. for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
  225. free_page((unsigned long)queue->queue_pages[i]);
  226. }
  227. kvfree(queue->queue_pages);
  228. return 1;
  229. }
  230. int ehca_init_small_qp_cache(void)
  231. {
  232. small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
  233. sizeof(struct ipz_small_queue_page),
  234. 0, SLAB_HWCACHE_ALIGN, NULL);
  235. if (!small_qp_cache)
  236. return -ENOMEM;
  237. return 0;
  238. }
  239. void ehca_cleanup_small_qp_cache(void)
  240. {
  241. kmem_cache_destroy(small_qp_cache);
  242. }