cache.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Intel Corporation. All rights reserved.
  4. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  5. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/module.h>
  36. #include <linux/errno.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <rdma/ib_cache.h>
  40. #include "core_priv.h"
  41. struct ib_pkey_cache {
  42. int table_len;
  43. u16 table[0];
  44. };
  45. struct ib_gid_cache {
  46. int table_len;
  47. union ib_gid table[0];
  48. };
  49. struct ib_update_work {
  50. struct work_struct work;
  51. struct ib_device *device;
  52. u8 port_num;
  53. };
  54. int ib_get_cached_gid(struct ib_device *device,
  55. u8 port_num,
  56. int index,
  57. union ib_gid *gid)
  58. {
  59. struct ib_gid_cache *cache;
  60. unsigned long flags;
  61. int ret = 0;
  62. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  63. return -EINVAL;
  64. read_lock_irqsave(&device->cache.lock, flags);
  65. cache = device->cache.gid_cache[port_num - rdma_start_port(device)];
  66. if (index < 0 || index >= cache->table_len)
  67. ret = -EINVAL;
  68. else
  69. *gid = cache->table[index];
  70. read_unlock_irqrestore(&device->cache.lock, flags);
  71. return ret;
  72. }
  73. EXPORT_SYMBOL(ib_get_cached_gid);
  74. int ib_find_cached_gid(struct ib_device *device,
  75. const union ib_gid *gid,
  76. u8 *port_num,
  77. u16 *index)
  78. {
  79. struct ib_gid_cache *cache;
  80. unsigned long flags;
  81. int p, i;
  82. int ret = -ENOENT;
  83. *port_num = -1;
  84. if (index)
  85. *index = -1;
  86. read_lock_irqsave(&device->cache.lock, flags);
  87. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
  88. cache = device->cache.gid_cache[p];
  89. for (i = 0; i < cache->table_len; ++i) {
  90. if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
  91. *port_num = p + rdma_start_port(device);
  92. if (index)
  93. *index = i;
  94. ret = 0;
  95. goto found;
  96. }
  97. }
  98. }
  99. found:
  100. read_unlock_irqrestore(&device->cache.lock, flags);
  101. return ret;
  102. }
  103. EXPORT_SYMBOL(ib_find_cached_gid);
  104. int ib_get_cached_pkey(struct ib_device *device,
  105. u8 port_num,
  106. int index,
  107. u16 *pkey)
  108. {
  109. struct ib_pkey_cache *cache;
  110. unsigned long flags;
  111. int ret = 0;
  112. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  113. return -EINVAL;
  114. read_lock_irqsave(&device->cache.lock, flags);
  115. cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
  116. if (index < 0 || index >= cache->table_len)
  117. ret = -EINVAL;
  118. else
  119. *pkey = cache->table[index];
  120. read_unlock_irqrestore(&device->cache.lock, flags);
  121. return ret;
  122. }
  123. EXPORT_SYMBOL(ib_get_cached_pkey);
  124. int ib_find_cached_pkey(struct ib_device *device,
  125. u8 port_num,
  126. u16 pkey,
  127. u16 *index)
  128. {
  129. struct ib_pkey_cache *cache;
  130. unsigned long flags;
  131. int i;
  132. int ret = -ENOENT;
  133. int partial_ix = -1;
  134. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  135. return -EINVAL;
  136. read_lock_irqsave(&device->cache.lock, flags);
  137. cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
  138. *index = -1;
  139. for (i = 0; i < cache->table_len; ++i)
  140. if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
  141. if (cache->table[i] & 0x8000) {
  142. *index = i;
  143. ret = 0;
  144. break;
  145. } else
  146. partial_ix = i;
  147. }
  148. if (ret && partial_ix >= 0) {
  149. *index = partial_ix;
  150. ret = 0;
  151. }
  152. read_unlock_irqrestore(&device->cache.lock, flags);
  153. return ret;
  154. }
  155. EXPORT_SYMBOL(ib_find_cached_pkey);
  156. int ib_find_exact_cached_pkey(struct ib_device *device,
  157. u8 port_num,
  158. u16 pkey,
  159. u16 *index)
  160. {
  161. struct ib_pkey_cache *cache;
  162. unsigned long flags;
  163. int i;
  164. int ret = -ENOENT;
  165. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  166. return -EINVAL;
  167. read_lock_irqsave(&device->cache.lock, flags);
  168. cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
  169. *index = -1;
  170. for (i = 0; i < cache->table_len; ++i)
  171. if (cache->table[i] == pkey) {
  172. *index = i;
  173. ret = 0;
  174. break;
  175. }
  176. read_unlock_irqrestore(&device->cache.lock, flags);
  177. return ret;
  178. }
  179. EXPORT_SYMBOL(ib_find_exact_cached_pkey);
  180. int ib_get_cached_lmc(struct ib_device *device,
  181. u8 port_num,
  182. u8 *lmc)
  183. {
  184. unsigned long flags;
  185. int ret = 0;
  186. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  187. return -EINVAL;
  188. read_lock_irqsave(&device->cache.lock, flags);
  189. *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
  190. read_unlock_irqrestore(&device->cache.lock, flags);
  191. return ret;
  192. }
  193. EXPORT_SYMBOL(ib_get_cached_lmc);
  194. static void ib_cache_update(struct ib_device *device,
  195. u8 port)
  196. {
  197. struct ib_port_attr *tprops = NULL;
  198. struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
  199. struct ib_gid_cache *gid_cache = NULL, *old_gid_cache;
  200. int i;
  201. int ret;
  202. tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
  203. if (!tprops)
  204. return;
  205. ret = ib_query_port(device, port, tprops);
  206. if (ret) {
  207. printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
  208. ret, device->name);
  209. goto err;
  210. }
  211. pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
  212. sizeof *pkey_cache->table, GFP_KERNEL);
  213. if (!pkey_cache)
  214. goto err;
  215. pkey_cache->table_len = tprops->pkey_tbl_len;
  216. gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
  217. sizeof *gid_cache->table, GFP_KERNEL);
  218. if (!gid_cache)
  219. goto err;
  220. gid_cache->table_len = tprops->gid_tbl_len;
  221. for (i = 0; i < pkey_cache->table_len; ++i) {
  222. ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
  223. if (ret) {
  224. printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
  225. ret, device->name, i);
  226. goto err;
  227. }
  228. }
  229. for (i = 0; i < gid_cache->table_len; ++i) {
  230. ret = ib_query_gid(device, port, i, gid_cache->table + i);
  231. if (ret) {
  232. printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
  233. ret, device->name, i);
  234. goto err;
  235. }
  236. }
  237. write_lock_irq(&device->cache.lock);
  238. old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
  239. old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)];
  240. device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
  241. device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;
  242. device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
  243. write_unlock_irq(&device->cache.lock);
  244. kfree(old_pkey_cache);
  245. kfree(old_gid_cache);
  246. kfree(tprops);
  247. return;
  248. err:
  249. kfree(pkey_cache);
  250. kfree(gid_cache);
  251. kfree(tprops);
  252. }
  253. static void ib_cache_task(struct work_struct *_work)
  254. {
  255. struct ib_update_work *work =
  256. container_of(_work, struct ib_update_work, work);
  257. ib_cache_update(work->device, work->port_num);
  258. kfree(work);
  259. }
  260. static void ib_cache_event(struct ib_event_handler *handler,
  261. struct ib_event *event)
  262. {
  263. struct ib_update_work *work;
  264. if (event->event == IB_EVENT_PORT_ERR ||
  265. event->event == IB_EVENT_PORT_ACTIVE ||
  266. event->event == IB_EVENT_LID_CHANGE ||
  267. event->event == IB_EVENT_PKEY_CHANGE ||
  268. event->event == IB_EVENT_SM_CHANGE ||
  269. event->event == IB_EVENT_CLIENT_REREGISTER ||
  270. event->event == IB_EVENT_GID_CHANGE) {
  271. work = kmalloc(sizeof *work, GFP_ATOMIC);
  272. if (work) {
  273. INIT_WORK(&work->work, ib_cache_task);
  274. work->device = event->device;
  275. work->port_num = event->element.port_num;
  276. queue_work(ib_wq, &work->work);
  277. }
  278. }
  279. }
  280. static void ib_cache_setup_one(struct ib_device *device)
  281. {
  282. int p;
  283. rwlock_init(&device->cache.lock);
  284. device->cache.pkey_cache =
  285. kmalloc(sizeof *device->cache.pkey_cache *
  286. (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
  287. device->cache.gid_cache =
  288. kmalloc(sizeof *device->cache.gid_cache *
  289. (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
  290. device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
  291. (rdma_end_port(device) -
  292. rdma_start_port(device) + 1),
  293. GFP_KERNEL);
  294. if (!device->cache.pkey_cache || !device->cache.gid_cache ||
  295. !device->cache.lmc_cache) {
  296. printk(KERN_WARNING "Couldn't allocate cache "
  297. "for %s\n", device->name);
  298. goto err;
  299. }
  300. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
  301. device->cache.pkey_cache[p] = NULL;
  302. device->cache.gid_cache [p] = NULL;
  303. ib_cache_update(device, p + rdma_start_port(device));
  304. }
  305. INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
  306. device, ib_cache_event);
  307. if (ib_register_event_handler(&device->cache.event_handler))
  308. goto err_cache;
  309. return;
  310. err_cache:
  311. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
  312. kfree(device->cache.pkey_cache[p]);
  313. kfree(device->cache.gid_cache[p]);
  314. }
  315. err:
  316. kfree(device->cache.pkey_cache);
  317. kfree(device->cache.gid_cache);
  318. kfree(device->cache.lmc_cache);
  319. }
  320. static void ib_cache_cleanup_one(struct ib_device *device)
  321. {
  322. int p;
  323. ib_unregister_event_handler(&device->cache.event_handler);
  324. flush_workqueue(ib_wq);
  325. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
  326. kfree(device->cache.pkey_cache[p]);
  327. kfree(device->cache.gid_cache[p]);
  328. }
  329. kfree(device->cache.pkey_cache);
  330. kfree(device->cache.gid_cache);
  331. kfree(device->cache.lmc_cache);
  332. }
  333. static struct ib_client cache_client = {
  334. .name = "cache",
  335. .add = ib_cache_setup_one,
  336. .remove = ib_cache_cleanup_one
  337. };
  338. int __init ib_cache_setup(void)
  339. {
  340. return ib_register_client(&cache_client);
  341. }
  342. void __exit ib_cache_cleanup(void)
  343. {
  344. ib_unregister_client(&cache_client);
  345. }