bnxt_ulp.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2016-2018 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/pci.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/rtnetlink.h>
  16. #include <linux/bitops.h>
  17. #include <linux/irq.h>
  18. #include <asm/byteorder.h>
  19. #include <linux/bitmap.h>
  20. #include "bnxt_hsi.h"
  21. #include "bnxt.h"
  22. #include "bnxt_ulp.h"
  23. static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
  24. struct bnxt_ulp_ops *ulp_ops, void *handle)
  25. {
  26. struct net_device *dev = edev->net;
  27. struct bnxt *bp = netdev_priv(dev);
  28. struct bnxt_ulp *ulp;
  29. ASSERT_RTNL();
  30. if (ulp_id >= BNXT_MAX_ULP)
  31. return -EINVAL;
  32. ulp = &edev->ulp_tbl[ulp_id];
  33. if (rcu_access_pointer(ulp->ulp_ops)) {
  34. netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
  35. return -EBUSY;
  36. }
  37. if (ulp_id == BNXT_ROCE_ULP) {
  38. unsigned int max_stat_ctxs;
  39. max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
  40. if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
  41. bp->num_stat_ctxs == max_stat_ctxs)
  42. return -ENOMEM;
  43. bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
  44. BNXT_MIN_ROCE_STAT_CTXS);
  45. }
  46. atomic_set(&ulp->ref_count, 0);
  47. ulp->handle = handle;
  48. rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
  49. if (ulp_id == BNXT_ROCE_ULP) {
  50. if (test_bit(BNXT_STATE_OPEN, &bp->state))
  51. bnxt_hwrm_vnic_cfg(bp, 0);
  52. }
  53. return 0;
  54. }
  55. static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
  56. {
  57. struct net_device *dev = edev->net;
  58. struct bnxt *bp = netdev_priv(dev);
  59. struct bnxt_ulp *ulp;
  60. int i = 0;
  61. ASSERT_RTNL();
  62. if (ulp_id >= BNXT_MAX_ULP)
  63. return -EINVAL;
  64. ulp = &edev->ulp_tbl[ulp_id];
  65. if (!rcu_access_pointer(ulp->ulp_ops)) {
  66. netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
  67. return -EINVAL;
  68. }
  69. if (ulp_id == BNXT_ROCE_ULP) {
  70. unsigned int max_stat_ctxs;
  71. max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
  72. bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
  73. if (ulp->msix_requested)
  74. edev->en_ops->bnxt_free_msix(edev, ulp_id);
  75. }
  76. if (ulp->max_async_event_id)
  77. bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
  78. RCU_INIT_POINTER(ulp->ulp_ops, NULL);
  79. synchronize_rcu();
  80. ulp->max_async_event_id = 0;
  81. ulp->async_events_bmap = NULL;
  82. while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
  83. msleep(100);
  84. i++;
  85. }
  86. return 0;
  87. }
  88. static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
  89. {
  90. struct bnxt_en_dev *edev = bp->edev;
  91. int num_msix, idx, i;
  92. num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
  93. idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
  94. for (i = 0; i < num_msix; i++) {
  95. ent[i].vector = bp->irq_tbl[idx + i].vector;
  96. ent[i].ring_idx = idx + i;
  97. ent[i].db_offset = (idx + i) * 0x80;
  98. }
  99. }
  100. static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
  101. struct bnxt_msix_entry *ent, int num_msix)
  102. {
  103. struct net_device *dev = edev->net;
  104. struct bnxt *bp = netdev_priv(dev);
  105. int max_idx, max_cp_rings;
  106. int avail_msix, idx;
  107. int rc = 0;
  108. ASSERT_RTNL();
  109. if (ulp_id != BNXT_ROCE_ULP)
  110. return -EINVAL;
  111. if (!(bp->flags & BNXT_FLAG_USING_MSIX))
  112. return -ENODEV;
  113. if (edev->ulp_tbl[ulp_id].msix_requested)
  114. return -EAGAIN;
  115. max_cp_rings = bnxt_get_max_func_cp_rings(bp);
  116. avail_msix = bnxt_get_avail_msix(bp, num_msix);
  117. if (!avail_msix)
  118. return -ENOMEM;
  119. if (avail_msix > num_msix)
  120. avail_msix = num_msix;
  121. if (BNXT_NEW_RM(bp)) {
  122. idx = bp->cp_nr_rings;
  123. } else {
  124. max_idx = min_t(int, bp->total_irqs, max_cp_rings);
  125. idx = max_idx - avail_msix;
  126. }
  127. edev->ulp_tbl[ulp_id].msix_base = idx;
  128. edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
  129. if (bp->total_irqs < (idx + avail_msix)) {
  130. if (netif_running(dev)) {
  131. bnxt_close_nic(bp, true, false);
  132. rc = bnxt_open_nic(bp, true, false);
  133. } else {
  134. rc = bnxt_reserve_rings(bp);
  135. }
  136. }
  137. if (rc) {
  138. edev->ulp_tbl[ulp_id].msix_requested = 0;
  139. return -EAGAIN;
  140. }
  141. if (BNXT_NEW_RM(bp)) {
  142. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  143. avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
  144. edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
  145. }
  146. bnxt_fill_msix_vecs(bp, ent);
  147. edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
  148. return avail_msix;
  149. }
  150. static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
  151. {
  152. struct net_device *dev = edev->net;
  153. struct bnxt *bp = netdev_priv(dev);
  154. ASSERT_RTNL();
  155. if (ulp_id != BNXT_ROCE_ULP)
  156. return -EINVAL;
  157. if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
  158. return 0;
  159. edev->ulp_tbl[ulp_id].msix_requested = 0;
  160. edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
  161. if (netif_running(dev)) {
  162. bnxt_close_nic(bp, true, false);
  163. bnxt_open_nic(bp, true, false);
  164. }
  165. return 0;
  166. }
  167. int bnxt_get_ulp_msix_num(struct bnxt *bp)
  168. {
  169. if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
  170. struct bnxt_en_dev *edev = bp->edev;
  171. return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
  172. }
  173. return 0;
  174. }
  175. int bnxt_get_ulp_msix_base(struct bnxt *bp)
  176. {
  177. if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
  178. struct bnxt_en_dev *edev = bp->edev;
  179. if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
  180. return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
  181. }
  182. return 0;
  183. }
  184. static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
  185. struct bnxt_fw_msg *fw_msg)
  186. {
  187. struct net_device *dev = edev->net;
  188. struct bnxt *bp = netdev_priv(dev);
  189. struct input *req;
  190. int rc;
  191. mutex_lock(&bp->hwrm_cmd_lock);
  192. req = fw_msg->msg;
  193. req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
  194. rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
  195. fw_msg->timeout);
  196. if (!rc) {
  197. struct output *resp = bp->hwrm_cmd_resp_addr;
  198. u32 len = le16_to_cpu(resp->resp_len);
  199. if (fw_msg->resp_max_len < len)
  200. len = fw_msg->resp_max_len;
  201. memcpy(fw_msg->resp, resp, len);
  202. }
  203. mutex_unlock(&bp->hwrm_cmd_lock);
  204. return rc;
  205. }
  206. static void bnxt_ulp_get(struct bnxt_ulp *ulp)
  207. {
  208. atomic_inc(&ulp->ref_count);
  209. }
  210. static void bnxt_ulp_put(struct bnxt_ulp *ulp)
  211. {
  212. atomic_dec(&ulp->ref_count);
  213. }
  214. void bnxt_ulp_stop(struct bnxt *bp)
  215. {
  216. struct bnxt_en_dev *edev = bp->edev;
  217. struct bnxt_ulp_ops *ops;
  218. int i;
  219. if (!edev)
  220. return;
  221. for (i = 0; i < BNXT_MAX_ULP; i++) {
  222. struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
  223. ops = rtnl_dereference(ulp->ulp_ops);
  224. if (!ops || !ops->ulp_stop)
  225. continue;
  226. ops->ulp_stop(ulp->handle);
  227. }
  228. }
  229. void bnxt_ulp_start(struct bnxt *bp)
  230. {
  231. struct bnxt_en_dev *edev = bp->edev;
  232. struct bnxt_ulp_ops *ops;
  233. int i;
  234. if (!edev)
  235. return;
  236. for (i = 0; i < BNXT_MAX_ULP; i++) {
  237. struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
  238. ops = rtnl_dereference(ulp->ulp_ops);
  239. if (!ops || !ops->ulp_start)
  240. continue;
  241. ops->ulp_start(ulp->handle);
  242. }
  243. }
  244. void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
  245. {
  246. struct bnxt_en_dev *edev = bp->edev;
  247. struct bnxt_ulp_ops *ops;
  248. int i;
  249. if (!edev)
  250. return;
  251. for (i = 0; i < BNXT_MAX_ULP; i++) {
  252. struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
  253. rcu_read_lock();
  254. ops = rcu_dereference(ulp->ulp_ops);
  255. if (!ops || !ops->ulp_sriov_config) {
  256. rcu_read_unlock();
  257. continue;
  258. }
  259. bnxt_ulp_get(ulp);
  260. rcu_read_unlock();
  261. ops->ulp_sriov_config(ulp->handle, num_vfs);
  262. bnxt_ulp_put(ulp);
  263. }
  264. }
  265. void bnxt_ulp_shutdown(struct bnxt *bp)
  266. {
  267. struct bnxt_en_dev *edev = bp->edev;
  268. struct bnxt_ulp_ops *ops;
  269. int i;
  270. if (!edev)
  271. return;
  272. for (i = 0; i < BNXT_MAX_ULP; i++) {
  273. struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
  274. ops = rtnl_dereference(ulp->ulp_ops);
  275. if (!ops || !ops->ulp_shutdown)
  276. continue;
  277. ops->ulp_shutdown(ulp->handle);
  278. }
  279. }
  280. void bnxt_ulp_irq_stop(struct bnxt *bp)
  281. {
  282. struct bnxt_en_dev *edev = bp->edev;
  283. struct bnxt_ulp_ops *ops;
  284. if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
  285. return;
  286. if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
  287. struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
  288. if (!ulp->msix_requested)
  289. return;
  290. ops = rtnl_dereference(ulp->ulp_ops);
  291. if (!ops || !ops->ulp_irq_stop)
  292. return;
  293. ops->ulp_irq_stop(ulp->handle);
  294. }
  295. }
  296. void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
  297. {
  298. struct bnxt_en_dev *edev = bp->edev;
  299. struct bnxt_ulp_ops *ops;
  300. if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
  301. return;
  302. if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
  303. struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
  304. struct bnxt_msix_entry *ent = NULL;
  305. if (!ulp->msix_requested)
  306. return;
  307. ops = rtnl_dereference(ulp->ulp_ops);
  308. if (!ops || !ops->ulp_irq_restart)
  309. return;
  310. if (!err) {
  311. ent = kcalloc(ulp->msix_requested, sizeof(*ent),
  312. GFP_KERNEL);
  313. if (!ent)
  314. return;
  315. bnxt_fill_msix_vecs(bp, ent);
  316. }
  317. ops->ulp_irq_restart(ulp->handle, ent);
  318. kfree(ent);
  319. }
  320. }
  321. void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
  322. {
  323. u16 event_id = le16_to_cpu(cmpl->event_id);
  324. struct bnxt_en_dev *edev = bp->edev;
  325. struct bnxt_ulp_ops *ops;
  326. int i;
  327. if (!edev)
  328. return;
  329. rcu_read_lock();
  330. for (i = 0; i < BNXT_MAX_ULP; i++) {
  331. struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
  332. ops = rcu_dereference(ulp->ulp_ops);
  333. if (!ops || !ops->ulp_async_notifier)
  334. continue;
  335. if (!ulp->async_events_bmap ||
  336. event_id > ulp->max_async_event_id)
  337. continue;
  338. /* Read max_async_event_id first before testing the bitmap. */
  339. smp_rmb();
  340. if (test_bit(event_id, ulp->async_events_bmap))
  341. ops->ulp_async_notifier(ulp->handle, cmpl);
  342. }
  343. rcu_read_unlock();
  344. }
  345. static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
  346. unsigned long *events_bmap, u16 max_id)
  347. {
  348. struct net_device *dev = edev->net;
  349. struct bnxt *bp = netdev_priv(dev);
  350. struct bnxt_ulp *ulp;
  351. if (ulp_id >= BNXT_MAX_ULP)
  352. return -EINVAL;
  353. ulp = &edev->ulp_tbl[ulp_id];
  354. ulp->async_events_bmap = events_bmap;
  355. /* Make sure bnxt_ulp_async_events() sees this order */
  356. smp_wmb();
  357. ulp->max_async_event_id = max_id;
  358. bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
  359. return 0;
  360. }
  361. static const struct bnxt_en_ops bnxt_en_ops_tbl = {
  362. .bnxt_register_device = bnxt_register_dev,
  363. .bnxt_unregister_device = bnxt_unregister_dev,
  364. .bnxt_request_msix = bnxt_req_msix_vecs,
  365. .bnxt_free_msix = bnxt_free_msix_vecs,
  366. .bnxt_send_fw_msg = bnxt_send_msg,
  367. .bnxt_register_fw_async_events = bnxt_register_async_events,
  368. };
  369. struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
  370. {
  371. struct bnxt *bp = netdev_priv(dev);
  372. struct bnxt_en_dev *edev;
  373. edev = bp->edev;
  374. if (!edev) {
  375. edev = kzalloc(sizeof(*edev), GFP_KERNEL);
  376. if (!edev)
  377. return ERR_PTR(-ENOMEM);
  378. edev->en_ops = &bnxt_en_ops_tbl;
  379. if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
  380. edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
  381. if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
  382. edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
  383. edev->net = dev;
  384. edev->pdev = bp->pdev;
  385. bp->edev = edev;
  386. }
  387. return bp->edev;
  388. }