usnic_ib_qp_grp.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This program is free software; you may redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; version 2 of the License.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  9. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  10. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  11. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  12. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  13. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  14. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  15. * SOFTWARE.
  16. *
  17. */
  18. #include <linux/bug.h>
  19. #include <linux/errno.h>
  20. #include <linux/module.h>
  21. #include <linux/spinlock.h>
  22. #include "usnic_log.h"
  23. #include "usnic_vnic.h"
  24. #include "usnic_fwd.h"
  25. #include "usnic_uiom.h"
  26. #include "usnic_debugfs.h"
  27. #include "usnic_ib_qp_grp.h"
  28. #include "usnic_ib_sysfs.h"
  29. #include "usnic_transport.h"
  30. #define DFLT_RQ_IDX 0
  31. const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
  32. {
  33. switch (state) {
  34. case IB_QPS_RESET:
  35. return "Rst";
  36. case IB_QPS_INIT:
  37. return "Init";
  38. case IB_QPS_RTR:
  39. return "RTR";
  40. case IB_QPS_RTS:
  41. return "RTS";
  42. case IB_QPS_SQD:
  43. return "SQD";
  44. case IB_QPS_SQE:
  45. return "SQE";
  46. case IB_QPS_ERR:
  47. return "ERR";
  48. default:
  49. return "UNKOWN STATE";
  50. }
  51. }
  52. int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
  53. {
  54. return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
  55. }
  56. int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
  57. {
  58. struct usnic_ib_qp_grp *qp_grp = obj;
  59. struct usnic_ib_qp_grp_flow *default_flow;
  60. if (obj) {
  61. default_flow = list_first_entry(&qp_grp->flows_lst,
  62. struct usnic_ib_qp_grp_flow, link);
  63. return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
  64. qp_grp->ibqp.qp_num,
  65. usnic_ib_qp_grp_state_to_string(
  66. qp_grp->state),
  67. qp_grp->owner_pid,
  68. usnic_vnic_get_index(qp_grp->vf->vnic),
  69. default_flow->flow->flow_id);
  70. } else {
  71. return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
  72. }
  73. }
  74. static struct usnic_vnic_res_chunk *
  75. get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
  76. {
  77. lockdep_assert_held(&qp_grp->lock);
  78. /*
  79. * The QP res chunk, used to derive qp indices,
  80. * are just indices of the RQs
  81. */
  82. return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  83. }
  84. static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  85. {
  86. int status;
  87. int i, vnic_idx;
  88. struct usnic_vnic_res_chunk *res_chunk;
  89. struct usnic_vnic_res *res;
  90. lockdep_assert_held(&qp_grp->lock);
  91. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  92. res_chunk = get_qp_res_chunk(qp_grp);
  93. if (IS_ERR_OR_NULL(res_chunk)) {
  94. usnic_err("Unable to get qp res with err %ld\n",
  95. PTR_ERR(res_chunk));
  96. return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
  97. }
  98. for (i = 0; i < res_chunk->cnt; i++) {
  99. res = res_chunk->res[i];
  100. status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
  101. res->vnic_idx);
  102. if (status) {
  103. usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
  104. res->vnic_idx, qp_grp->ufdev->name,
  105. vnic_idx, status);
  106. goto out_err;
  107. }
  108. }
  109. return 0;
  110. out_err:
  111. for (i--; i >= 0; i--) {
  112. res = res_chunk->res[i];
  113. usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  114. res->vnic_idx);
  115. }
  116. return status;
  117. }
  118. static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  119. {
  120. int i, vnic_idx;
  121. struct usnic_vnic_res_chunk *res_chunk;
  122. struct usnic_vnic_res *res;
  123. int status = 0;
  124. lockdep_assert_held(&qp_grp->lock);
  125. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  126. res_chunk = get_qp_res_chunk(qp_grp);
  127. if (IS_ERR_OR_NULL(res_chunk)) {
  128. usnic_err("Unable to get qp res with err %ld\n",
  129. PTR_ERR(res_chunk));
  130. return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
  131. }
  132. for (i = 0; i < res_chunk->cnt; i++) {
  133. res = res_chunk->res[i];
  134. status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  135. res->vnic_idx);
  136. if (status) {
  137. usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
  138. res->vnic_idx,
  139. qp_grp->ufdev->name,
  140. vnic_idx, status);
  141. }
  142. }
  143. return status;
  144. }
  145. static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
  146. struct usnic_filter_action *uaction)
  147. {
  148. struct usnic_vnic_res_chunk *res_chunk;
  149. res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  150. if (IS_ERR_OR_NULL(res_chunk)) {
  151. usnic_err("Unable to get %s with err %ld\n",
  152. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
  153. PTR_ERR(res_chunk));
  154. return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
  155. }
  156. uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  157. uaction->action.type = FILTER_ACTION_RQ_STEERING;
  158. uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
  159. return 0;
  160. }
  161. static struct usnic_ib_qp_grp_flow*
  162. create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
  163. struct usnic_transport_spec *trans_spec)
  164. {
  165. uint16_t port_num;
  166. int err;
  167. struct filter filter;
  168. struct usnic_filter_action uaction;
  169. struct usnic_ib_qp_grp_flow *qp_flow;
  170. struct usnic_fwd_flow *flow;
  171. enum usnic_transport_type trans_type;
  172. trans_type = trans_spec->trans_type;
  173. port_num = trans_spec->usnic_roce.port_num;
  174. /* Reserve Port */
  175. port_num = usnic_transport_rsrv_port(trans_type, port_num);
  176. if (port_num == 0)
  177. return ERR_PTR(-EINVAL);
  178. /* Create Flow */
  179. usnic_fwd_init_usnic_filter(&filter, port_num);
  180. err = init_filter_action(qp_grp, &uaction);
  181. if (err)
  182. goto out_unreserve_port;
  183. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  184. if (IS_ERR_OR_NULL(flow)) {
  185. usnic_err("Unable to alloc flow failed with err %ld\n",
  186. PTR_ERR(flow));
  187. err = flow ? PTR_ERR(flow) : -EFAULT;
  188. goto out_unreserve_port;
  189. }
  190. /* Create Flow Handle */
  191. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  192. if (IS_ERR_OR_NULL(qp_flow)) {
  193. err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
  194. goto out_dealloc_flow;
  195. }
  196. qp_flow->flow = flow;
  197. qp_flow->trans_type = trans_type;
  198. qp_flow->usnic_roce.port_num = port_num;
  199. qp_flow->qp_grp = qp_grp;
  200. return qp_flow;
  201. out_dealloc_flow:
  202. usnic_fwd_dealloc_flow(flow);
  203. out_unreserve_port:
  204. usnic_transport_unrsrv_port(trans_type, port_num);
  205. return ERR_PTR(err);
  206. }
  207. static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  208. {
  209. usnic_fwd_dealloc_flow(qp_flow->flow);
  210. usnic_transport_unrsrv_port(qp_flow->trans_type,
  211. qp_flow->usnic_roce.port_num);
  212. kfree(qp_flow);
  213. }
  214. static struct usnic_ib_qp_grp_flow*
  215. create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
  216. struct usnic_transport_spec *trans_spec)
  217. {
  218. struct socket *sock;
  219. int sock_fd;
  220. int err;
  221. struct filter filter;
  222. struct usnic_filter_action uaction;
  223. struct usnic_ib_qp_grp_flow *qp_flow;
  224. struct usnic_fwd_flow *flow;
  225. enum usnic_transport_type trans_type;
  226. uint32_t addr;
  227. uint16_t port_num;
  228. int proto;
  229. trans_type = trans_spec->trans_type;
  230. sock_fd = trans_spec->udp.sock_fd;
  231. /* Get and check socket */
  232. sock = usnic_transport_get_socket(sock_fd);
  233. if (IS_ERR_OR_NULL(sock))
  234. return ERR_CAST(sock);
  235. err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
  236. if (err)
  237. goto out_put_sock;
  238. if (proto != IPPROTO_UDP) {
  239. usnic_err("Protocol for fd %d is not UDP", sock_fd);
  240. err = -EPERM;
  241. goto out_put_sock;
  242. }
  243. /* Create flow */
  244. usnic_fwd_init_udp_filter(&filter, addr, port_num);
  245. err = init_filter_action(qp_grp, &uaction);
  246. if (err)
  247. goto out_put_sock;
  248. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  249. if (IS_ERR_OR_NULL(flow)) {
  250. usnic_err("Unable to alloc flow failed with err %ld\n",
  251. PTR_ERR(flow));
  252. err = flow ? PTR_ERR(flow) : -EFAULT;
  253. goto out_put_sock;
  254. }
  255. /* Create qp_flow */
  256. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  257. if (IS_ERR_OR_NULL(qp_flow)) {
  258. err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
  259. goto out_dealloc_flow;
  260. }
  261. qp_flow->flow = flow;
  262. qp_flow->trans_type = trans_type;
  263. qp_flow->udp.sock = sock;
  264. qp_flow->qp_grp = qp_grp;
  265. return qp_flow;
  266. out_dealloc_flow:
  267. usnic_fwd_dealloc_flow(flow);
  268. out_put_sock:
  269. usnic_transport_put_socket(sock);
  270. return ERR_PTR(err);
  271. }
  272. static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  273. {
  274. usnic_fwd_dealloc_flow(qp_flow->flow);
  275. usnic_transport_put_socket(qp_flow->udp.sock);
  276. kfree(qp_flow);
  277. }
  278. static struct usnic_ib_qp_grp_flow*
  279. create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
  280. struct usnic_transport_spec *trans_spec)
  281. {
  282. struct usnic_ib_qp_grp_flow *qp_flow;
  283. enum usnic_transport_type trans_type;
  284. trans_type = trans_spec->trans_type;
  285. switch (trans_type) {
  286. case USNIC_TRANSPORT_ROCE_CUSTOM:
  287. qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
  288. break;
  289. case USNIC_TRANSPORT_IPV4_UDP:
  290. qp_flow = create_udp_flow(qp_grp, trans_spec);
  291. break;
  292. default:
  293. usnic_err("Unsupported transport %u\n",
  294. trans_spec->trans_type);
  295. return ERR_PTR(-EINVAL);
  296. }
  297. if (!IS_ERR_OR_NULL(qp_flow)) {
  298. list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
  299. usnic_debugfs_flow_add(qp_flow);
  300. }
  301. return qp_flow;
  302. }
  303. static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  304. {
  305. usnic_debugfs_flow_remove(qp_flow);
  306. list_del(&qp_flow->link);
  307. switch (qp_flow->trans_type) {
  308. case USNIC_TRANSPORT_ROCE_CUSTOM:
  309. release_roce_custom_flow(qp_flow);
  310. break;
  311. case USNIC_TRANSPORT_IPV4_UDP:
  312. release_udp_flow(qp_flow);
  313. break;
  314. default:
  315. WARN(1, "Unsupported transport %u\n",
  316. qp_flow->trans_type);
  317. break;
  318. }
  319. }
  320. static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
  321. {
  322. struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
  323. list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
  324. release_and_remove_flow(qp_flow);
  325. }
  326. int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
  327. enum ib_qp_state new_state,
  328. void *data)
  329. {
  330. int status = 0;
  331. int vnic_idx;
  332. struct ib_event ib_event;
  333. enum ib_qp_state old_state;
  334. struct usnic_transport_spec *trans_spec;
  335. struct usnic_ib_qp_grp_flow *qp_flow;
  336. old_state = qp_grp->state;
  337. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  338. trans_spec = (struct usnic_transport_spec *) data;
  339. spin_lock(&qp_grp->lock);
  340. switch (new_state) {
  341. case IB_QPS_RESET:
  342. switch (old_state) {
  343. case IB_QPS_RESET:
  344. /* NO-OP */
  345. break;
  346. case IB_QPS_INIT:
  347. release_and_remove_all_flows(qp_grp);
  348. status = 0;
  349. break;
  350. case IB_QPS_RTR:
  351. case IB_QPS_RTS:
  352. case IB_QPS_ERR:
  353. status = disable_qp_grp(qp_grp);
  354. release_and_remove_all_flows(qp_grp);
  355. break;
  356. default:
  357. status = -EINVAL;
  358. }
  359. break;
  360. case IB_QPS_INIT:
  361. switch (old_state) {
  362. case IB_QPS_RESET:
  363. if (trans_spec) {
  364. qp_flow = create_and_add_flow(qp_grp,
  365. trans_spec);
  366. if (IS_ERR_OR_NULL(qp_flow)) {
  367. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  368. break;
  369. }
  370. } else {
  371. /*
  372. * Optional to specify filters.
  373. */
  374. status = 0;
  375. }
  376. break;
  377. case IB_QPS_INIT:
  378. if (trans_spec) {
  379. qp_flow = create_and_add_flow(qp_grp,
  380. trans_spec);
  381. if (IS_ERR_OR_NULL(qp_flow)) {
  382. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  383. break;
  384. }
  385. } else {
  386. /*
  387. * Doesn't make sense to go into INIT state
  388. * from INIT state w/o adding filters.
  389. */
  390. status = -EINVAL;
  391. }
  392. break;
  393. case IB_QPS_RTR:
  394. status = disable_qp_grp(qp_grp);
  395. break;
  396. case IB_QPS_RTS:
  397. status = disable_qp_grp(qp_grp);
  398. break;
  399. default:
  400. status = -EINVAL;
  401. }
  402. break;
  403. case IB_QPS_RTR:
  404. switch (old_state) {
  405. case IB_QPS_INIT:
  406. status = enable_qp_grp(qp_grp);
  407. break;
  408. default:
  409. status = -EINVAL;
  410. }
  411. break;
  412. case IB_QPS_RTS:
  413. switch (old_state) {
  414. case IB_QPS_RTR:
  415. /* NO-OP FOR NOW */
  416. break;
  417. default:
  418. status = -EINVAL;
  419. }
  420. break;
  421. case IB_QPS_ERR:
  422. ib_event.device = &qp_grp->vf->pf->ib_dev;
  423. ib_event.element.qp = &qp_grp->ibqp;
  424. ib_event.event = IB_EVENT_QP_FATAL;
  425. switch (old_state) {
  426. case IB_QPS_RESET:
  427. qp_grp->ibqp.event_handler(&ib_event,
  428. qp_grp->ibqp.qp_context);
  429. break;
  430. case IB_QPS_INIT:
  431. release_and_remove_all_flows(qp_grp);
  432. qp_grp->ibqp.event_handler(&ib_event,
  433. qp_grp->ibqp.qp_context);
  434. break;
  435. case IB_QPS_RTR:
  436. case IB_QPS_RTS:
  437. status = disable_qp_grp(qp_grp);
  438. release_and_remove_all_flows(qp_grp);
  439. qp_grp->ibqp.event_handler(&ib_event,
  440. qp_grp->ibqp.qp_context);
  441. break;
  442. default:
  443. status = -EINVAL;
  444. }
  445. break;
  446. default:
  447. status = -EINVAL;
  448. }
  449. spin_unlock(&qp_grp->lock);
  450. if (!status) {
  451. qp_grp->state = new_state;
  452. usnic_info("Transistioned %u from %s to %s",
  453. qp_grp->grp_id,
  454. usnic_ib_qp_grp_state_to_string(old_state),
  455. usnic_ib_qp_grp_state_to_string(new_state));
  456. } else {
  457. usnic_err("Failed to transition %u from %s to %s",
  458. qp_grp->grp_id,
  459. usnic_ib_qp_grp_state_to_string(old_state),
  460. usnic_ib_qp_grp_state_to_string(new_state));
  461. }
  462. return status;
  463. }
  464. static struct usnic_vnic_res_chunk**
  465. alloc_res_chunk_list(struct usnic_vnic *vnic,
  466. struct usnic_vnic_res_spec *res_spec, void *owner_obj)
  467. {
  468. enum usnic_vnic_res_type res_type;
  469. struct usnic_vnic_res_chunk **res_chunk_list;
  470. int err, i, res_cnt, res_lst_sz;
  471. for (res_lst_sz = 0;
  472. res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
  473. res_lst_sz++) {
  474. /* Do Nothing */
  475. }
  476. res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
  477. GFP_ATOMIC);
  478. if (!res_chunk_list)
  479. return ERR_PTR(-ENOMEM);
  480. for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
  481. i++) {
  482. res_type = res_spec->resources[i].type;
  483. res_cnt = res_spec->resources[i].cnt;
  484. res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
  485. res_cnt, owner_obj);
  486. if (IS_ERR_OR_NULL(res_chunk_list[i])) {
  487. err = res_chunk_list[i] ?
  488. PTR_ERR(res_chunk_list[i]) : -ENOMEM;
  489. usnic_err("Failed to get %s from %s with err %d\n",
  490. usnic_vnic_res_type_to_str(res_type),
  491. usnic_vnic_pci_name(vnic),
  492. err);
  493. goto out_free_res;
  494. }
  495. }
  496. return res_chunk_list;
  497. out_free_res:
  498. for (i--; i > 0; i--)
  499. usnic_vnic_put_resources(res_chunk_list[i]);
  500. kfree(res_chunk_list);
  501. return ERR_PTR(err);
  502. }
  503. static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
  504. {
  505. int i;
  506. for (i = 0; res_chunk_list[i]; i++)
  507. usnic_vnic_put_resources(res_chunk_list[i]);
  508. kfree(res_chunk_list);
  509. }
  510. static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
  511. struct usnic_ib_pd *pd,
  512. struct usnic_ib_qp_grp *qp_grp)
  513. {
  514. int err;
  515. struct pci_dev *pdev;
  516. lockdep_assert_held(&vf->lock);
  517. pdev = usnic_vnic_get_pdev(vf->vnic);
  518. if (vf->qp_grp_ref_cnt == 0) {
  519. err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
  520. if (err) {
  521. usnic_err("Failed to attach %s to domain\n",
  522. pci_name(pdev));
  523. return err;
  524. }
  525. vf->pd = pd;
  526. }
  527. vf->qp_grp_ref_cnt++;
  528. WARN_ON(vf->pd != pd);
  529. qp_grp->vf = vf;
  530. return 0;
  531. }
  532. static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
  533. {
  534. struct pci_dev *pdev;
  535. struct usnic_ib_pd *pd;
  536. lockdep_assert_held(&qp_grp->vf->lock);
  537. pd = qp_grp->vf->pd;
  538. pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
  539. if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
  540. qp_grp->vf->pd = NULL;
  541. usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
  542. }
  543. qp_grp->vf = NULL;
  544. }
  545. static void log_spec(struct usnic_vnic_res_spec *res_spec)
  546. {
  547. char buf[512];
  548. usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
  549. usnic_dbg("%s\n", buf);
  550. }
  551. static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
  552. uint32_t *id)
  553. {
  554. enum usnic_transport_type trans_type = qp_flow->trans_type;
  555. int err;
  556. uint16_t port_num = 0;
  557. switch (trans_type) {
  558. case USNIC_TRANSPORT_ROCE_CUSTOM:
  559. *id = qp_flow->usnic_roce.port_num;
  560. break;
  561. case USNIC_TRANSPORT_IPV4_UDP:
  562. err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
  563. NULL, NULL,
  564. &port_num);
  565. if (err)
  566. return err;
  567. /*
  568. * Copy port_num to stack first and then to *id,
  569. * so that the short to int cast works for little
  570. * and big endian systems.
  571. */
  572. *id = port_num;
  573. break;
  574. default:
  575. usnic_err("Unsupported transport %u\n", trans_type);
  576. return -EINVAL;
  577. }
  578. return 0;
  579. }
  580. struct usnic_ib_qp_grp *
  581. usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
  582. struct usnic_ib_pd *pd,
  583. struct usnic_vnic_res_spec *res_spec,
  584. struct usnic_transport_spec *transport_spec)
  585. {
  586. struct usnic_ib_qp_grp *qp_grp;
  587. int err;
  588. enum usnic_transport_type transport = transport_spec->trans_type;
  589. struct usnic_ib_qp_grp_flow *qp_flow;
  590. lockdep_assert_held(&vf->lock);
  591. err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
  592. res_spec);
  593. if (err) {
  594. usnic_err("Spec does not meet miniumum req for transport %d\n",
  595. transport);
  596. log_spec(res_spec);
  597. return ERR_PTR(err);
  598. }
  599. qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
  600. if (!qp_grp) {
  601. usnic_err("Unable to alloc qp_grp - Out of memory\n");
  602. return NULL;
  603. }
  604. qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
  605. qp_grp);
  606. if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
  607. err = qp_grp->res_chunk_list ?
  608. PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
  609. usnic_err("Unable to alloc res for %d with err %d\n",
  610. qp_grp->grp_id, err);
  611. goto out_free_qp_grp;
  612. }
  613. err = qp_grp_and_vf_bind(vf, pd, qp_grp);
  614. if (err)
  615. goto out_free_res;
  616. INIT_LIST_HEAD(&qp_grp->flows_lst);
  617. spin_lock_init(&qp_grp->lock);
  618. qp_grp->ufdev = ufdev;
  619. qp_grp->state = IB_QPS_RESET;
  620. qp_grp->owner_pid = current->pid;
  621. qp_flow = create_and_add_flow(qp_grp, transport_spec);
  622. if (IS_ERR_OR_NULL(qp_flow)) {
  623. usnic_err("Unable to create and add flow with err %ld\n",
  624. PTR_ERR(qp_flow));
  625. err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  626. goto out_qp_grp_vf_unbind;
  627. }
  628. err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
  629. if (err)
  630. goto out_release_flow;
  631. qp_grp->ibqp.qp_num = qp_grp->grp_id;
  632. usnic_ib_sysfs_qpn_add(qp_grp);
  633. return qp_grp;
  634. out_release_flow:
  635. release_and_remove_flow(qp_flow);
  636. out_qp_grp_vf_unbind:
  637. qp_grp_and_vf_unbind(qp_grp);
  638. out_free_res:
  639. free_qp_grp_res(qp_grp->res_chunk_list);
  640. out_free_qp_grp:
  641. kfree(qp_grp);
  642. return ERR_PTR(err);
  643. }
  644. void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
  645. {
  646. WARN_ON(qp_grp->state != IB_QPS_RESET);
  647. lockdep_assert_held(&qp_grp->vf->lock);
  648. release_and_remove_all_flows(qp_grp);
  649. usnic_ib_sysfs_qpn_remove(qp_grp);
  650. qp_grp_and_vf_unbind(qp_grp);
  651. free_qp_grp_res(qp_grp->res_chunk_list);
  652. kfree(qp_grp);
  653. }
  654. struct usnic_vnic_res_chunk*
  655. usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
  656. enum usnic_vnic_res_type res_type)
  657. {
  658. int i;
  659. for (i = 0; qp_grp->res_chunk_list[i]; i++) {
  660. if (qp_grp->res_chunk_list[i]->type == res_type)
  661. return qp_grp->res_chunk_list[i];
  662. }
  663. return ERR_PTR(-EINVAL);
  664. }