qib_verbs_mcast.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/rculist.h>
  34. #include "qib.h"
  35. /**
  36. * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
  37. * @qp: the QP to link
  38. */
  39. static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
  40. {
  41. struct qib_mcast_qp *mqp;
  42. mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
  43. if (!mqp)
  44. goto bail;
  45. mqp->qp = qp;
  46. atomic_inc(&qp->refcount);
  47. bail:
  48. return mqp;
  49. }
  50. static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
  51. {
  52. struct qib_qp *qp = mqp->qp;
  53. /* Notify qib_destroy_qp() if it is waiting. */
  54. if (atomic_dec_and_test(&qp->refcount))
  55. wake_up(&qp->wait);
  56. kfree(mqp);
  57. }
  58. /**
  59. * qib_mcast_alloc - allocate the multicast GID structure
  60. * @mgid: the multicast GID
  61. *
  62. * A list of QPs will be attached to this structure.
  63. */
  64. static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
  65. {
  66. struct qib_mcast *mcast;
  67. mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
  68. if (!mcast)
  69. goto bail;
  70. mcast->mgid = *mgid;
  71. INIT_LIST_HEAD(&mcast->qp_list);
  72. init_waitqueue_head(&mcast->wait);
  73. atomic_set(&mcast->refcount, 0);
  74. mcast->n_attached = 0;
  75. bail:
  76. return mcast;
  77. }
  78. static void qib_mcast_free(struct qib_mcast *mcast)
  79. {
  80. struct qib_mcast_qp *p, *tmp;
  81. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
  82. qib_mcast_qp_free(p);
  83. kfree(mcast);
  84. }
  85. /**
  86. * qib_mcast_find - search the global table for the given multicast GID
  87. * @ibp: the IB port structure
  88. * @mgid: the multicast GID to search for
  89. *
  90. * Returns NULL if not found.
  91. *
  92. * The caller is responsible for decrementing the reference count if found.
  93. */
  94. struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
  95. {
  96. struct rb_node *n;
  97. unsigned long flags;
  98. struct qib_mcast *mcast;
  99. spin_lock_irqsave(&ibp->lock, flags);
  100. n = ibp->mcast_tree.rb_node;
  101. while (n) {
  102. int ret;
  103. mcast = rb_entry(n, struct qib_mcast, rb_node);
  104. ret = memcmp(mgid->raw, mcast->mgid.raw,
  105. sizeof(union ib_gid));
  106. if (ret < 0)
  107. n = n->rb_left;
  108. else if (ret > 0)
  109. n = n->rb_right;
  110. else {
  111. atomic_inc(&mcast->refcount);
  112. spin_unlock_irqrestore(&ibp->lock, flags);
  113. goto bail;
  114. }
  115. }
  116. spin_unlock_irqrestore(&ibp->lock, flags);
  117. mcast = NULL;
  118. bail:
  119. return mcast;
  120. }
  121. /**
  122. * qib_mcast_add - insert mcast GID into table and attach QP struct
  123. * @mcast: the mcast GID table
  124. * @mqp: the QP to attach
  125. *
  126. * Return zero if both were added. Return EEXIST if the GID was already in
  127. * the table but the QP was added. Return ESRCH if the QP was already
  128. * attached and neither structure was added.
  129. */
  130. static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
  131. struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
  132. {
  133. struct rb_node **n = &ibp->mcast_tree.rb_node;
  134. struct rb_node *pn = NULL;
  135. int ret;
  136. spin_lock_irq(&ibp->lock);
  137. while (*n) {
  138. struct qib_mcast *tmcast;
  139. struct qib_mcast_qp *p;
  140. pn = *n;
  141. tmcast = rb_entry(pn, struct qib_mcast, rb_node);
  142. ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
  143. sizeof(union ib_gid));
  144. if (ret < 0) {
  145. n = &pn->rb_left;
  146. continue;
  147. }
  148. if (ret > 0) {
  149. n = &pn->rb_right;
  150. continue;
  151. }
  152. /* Search the QP list to see if this is already there. */
  153. list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
  154. if (p->qp == mqp->qp) {
  155. ret = ESRCH;
  156. goto bail;
  157. }
  158. }
  159. if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
  160. ret = ENOMEM;
  161. goto bail;
  162. }
  163. tmcast->n_attached++;
  164. list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
  165. ret = EEXIST;
  166. goto bail;
  167. }
  168. spin_lock(&dev->n_mcast_grps_lock);
  169. if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
  170. spin_unlock(&dev->n_mcast_grps_lock);
  171. ret = ENOMEM;
  172. goto bail;
  173. }
  174. dev->n_mcast_grps_allocated++;
  175. spin_unlock(&dev->n_mcast_grps_lock);
  176. mcast->n_attached++;
  177. list_add_tail_rcu(&mqp->list, &mcast->qp_list);
  178. atomic_inc(&mcast->refcount);
  179. rb_link_node(&mcast->rb_node, pn, n);
  180. rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
  181. ret = 0;
  182. bail:
  183. spin_unlock_irq(&ibp->lock);
  184. return ret;
  185. }
  186. int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  187. {
  188. struct qib_qp *qp = to_iqp(ibqp);
  189. struct qib_ibdev *dev = to_idev(ibqp->device);
  190. struct qib_ibport *ibp;
  191. struct qib_mcast *mcast;
  192. struct qib_mcast_qp *mqp;
  193. int ret;
  194. if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
  195. ret = -EINVAL;
  196. goto bail;
  197. }
  198. /*
  199. * Allocate data structures since its better to do this outside of
  200. * spin locks and it will most likely be needed.
  201. */
  202. mcast = qib_mcast_alloc(gid);
  203. if (mcast == NULL) {
  204. ret = -ENOMEM;
  205. goto bail;
  206. }
  207. mqp = qib_mcast_qp_alloc(qp);
  208. if (mqp == NULL) {
  209. qib_mcast_free(mcast);
  210. ret = -ENOMEM;
  211. goto bail;
  212. }
  213. ibp = to_iport(ibqp->device, qp->port_num);
  214. switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
  215. case ESRCH:
  216. /* Neither was used: OK to attach the same QP twice. */
  217. qib_mcast_qp_free(mqp);
  218. qib_mcast_free(mcast);
  219. break;
  220. case EEXIST: /* The mcast wasn't used */
  221. qib_mcast_free(mcast);
  222. break;
  223. case ENOMEM:
  224. /* Exceeded the maximum number of mcast groups. */
  225. qib_mcast_qp_free(mqp);
  226. qib_mcast_free(mcast);
  227. ret = -ENOMEM;
  228. goto bail;
  229. default:
  230. break;
  231. }
  232. ret = 0;
  233. bail:
  234. return ret;
  235. }
  236. int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  237. {
  238. struct qib_qp *qp = to_iqp(ibqp);
  239. struct qib_ibdev *dev = to_idev(ibqp->device);
  240. struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
  241. struct qib_mcast *mcast = NULL;
  242. struct qib_mcast_qp *p, *tmp;
  243. struct rb_node *n;
  244. int last = 0;
  245. int ret;
  246. if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
  247. ret = -EINVAL;
  248. goto bail;
  249. }
  250. spin_lock_irq(&ibp->lock);
  251. /* Find the GID in the mcast table. */
  252. n = ibp->mcast_tree.rb_node;
  253. while (1) {
  254. if (n == NULL) {
  255. spin_unlock_irq(&ibp->lock);
  256. ret = -EINVAL;
  257. goto bail;
  258. }
  259. mcast = rb_entry(n, struct qib_mcast, rb_node);
  260. ret = memcmp(gid->raw, mcast->mgid.raw,
  261. sizeof(union ib_gid));
  262. if (ret < 0)
  263. n = n->rb_left;
  264. else if (ret > 0)
  265. n = n->rb_right;
  266. else
  267. break;
  268. }
  269. /* Search the QP list. */
  270. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
  271. if (p->qp != qp)
  272. continue;
  273. /*
  274. * We found it, so remove it, but don't poison the forward
  275. * link until we are sure there are no list walkers.
  276. */
  277. list_del_rcu(&p->list);
  278. mcast->n_attached--;
  279. /* If this was the last attached QP, remove the GID too. */
  280. if (list_empty(&mcast->qp_list)) {
  281. rb_erase(&mcast->rb_node, &ibp->mcast_tree);
  282. last = 1;
  283. }
  284. break;
  285. }
  286. spin_unlock_irq(&ibp->lock);
  287. if (p) {
  288. /*
  289. * Wait for any list walkers to finish before freeing the
  290. * list element.
  291. */
  292. wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
  293. qib_mcast_qp_free(p);
  294. }
  295. if (last) {
  296. atomic_dec(&mcast->refcount);
  297. wait_event(mcast->wait, !atomic_read(&mcast->refcount));
  298. qib_mcast_free(mcast);
  299. spin_lock_irq(&dev->n_mcast_grps_lock);
  300. dev->n_mcast_grps_allocated--;
  301. spin_unlock_irq(&dev->n_mcast_grps_lock);
  302. }
  303. ret = 0;
  304. bail:
  305. return ret;
  306. }
  307. int qib_mcast_tree_empty(struct qib_ibport *ibp)
  308. {
  309. return ibp->mcast_tree.rb_node == NULL;
  310. }