nes_mgt.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. /*
  2. * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/skbuff.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/kthread.h>
  36. #include <linux/ip.h>
  37. #include <linux/tcp.h>
  38. #include <net/tcp.h>
  39. #include "nes.h"
  40. #include "nes_mgt.h"
  41. atomic_t pau_qps_created;
  42. atomic_t pau_qps_destroyed;
  43. static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
  44. {
  45. unsigned long flags;
  46. dma_addr_t bus_address;
  47. struct sk_buff *skb;
  48. struct nes_hw_nic_rq_wqe *nic_rqe;
  49. struct nes_hw_mgt *nesmgt;
  50. struct nes_device *nesdev;
  51. struct nes_rskb_cb *cb;
  52. u32 rx_wqes_posted = 0;
  53. nesmgt = &mgtvnic->mgt;
  54. nesdev = mgtvnic->nesvnic->nesdev;
  55. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  56. if (nesmgt->replenishing_rq != 0) {
  57. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  58. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  59. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  60. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  61. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  62. add_timer(&mgtvnic->rq_wqes_timer);
  63. } else {
  64. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  65. }
  66. return;
  67. }
  68. nesmgt->replenishing_rq = 1;
  69. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  70. do {
  71. skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size);
  72. if (skb) {
  73. skb->dev = mgtvnic->nesvnic->netdev;
  74. bus_address = pci_map_single(nesdev->pcidev,
  75. skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  76. cb = (struct nes_rskb_cb *)&skb->cb[0];
  77. cb->busaddr = bus_address;
  78. cb->maplen = mgtvnic->nesvnic->max_frame_size;
  79. nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head];
  80. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
  81. cpu_to_le32(mgtvnic->nesvnic->max_frame_size);
  82. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  83. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
  84. cpu_to_le32((u32)bus_address);
  85. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
  86. cpu_to_le32((u32)((u64)bus_address >> 32));
  87. nesmgt->rx_skb[nesmgt->rq_head] = skb;
  88. nesmgt->rq_head++;
  89. nesmgt->rq_head &= nesmgt->rq_size - 1;
  90. atomic_dec(&mgtvnic->rx_skbs_needed);
  91. barrier();
  92. if (++rx_wqes_posted == 255) {
  93. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  94. rx_wqes_posted = 0;
  95. }
  96. } else {
  97. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  98. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  99. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  100. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  101. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  102. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  103. add_timer(&mgtvnic->rq_wqes_timer);
  104. } else {
  105. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  106. }
  107. break;
  108. }
  109. } while (atomic_read(&mgtvnic->rx_skbs_needed));
  110. barrier();
  111. if (rx_wqes_posted)
  112. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  113. nesmgt->replenishing_rq = 0;
  114. }
  115. /**
  116. * nes_mgt_rq_wqes_timeout
  117. */
  118. static void nes_mgt_rq_wqes_timeout(struct timer_list *t)
  119. {
  120. struct nes_vnic_mgt *mgtvnic = from_timer(mgtvnic, t,
  121. rq_wqes_timer);
  122. atomic_set(&mgtvnic->rx_skb_timer_running, 0);
  123. if (atomic_read(&mgtvnic->rx_skbs_needed))
  124. nes_replenish_mgt_rq(mgtvnic);
  125. }
  126. /**
  127. * nes_mgt_free_skb - unmap and free skb
  128. */
  129. static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir)
  130. {
  131. struct nes_rskb_cb *cb;
  132. cb = (struct nes_rskb_cb *)&skb->cb[0];
  133. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir);
  134. cb->busaddr = 0;
  135. dev_kfree_skb_any(skb);
  136. }
  137. /**
  138. * nes_download_callback - handle download completions
  139. */
  140. static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  141. {
  142. struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer;
  143. struct nes_qp *nesqp = fpdu_info->nesqp;
  144. struct sk_buff *skb;
  145. int i;
  146. for (i = 0; i < fpdu_info->frag_cnt; i++) {
  147. skb = fpdu_info->frags[i].skb;
  148. if (fpdu_info->frags[i].cmplt) {
  149. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  150. nes_rem_ref_cm_node(nesqp->cm_node);
  151. }
  152. }
  153. if (fpdu_info->hdr_vbase)
  154. pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len,
  155. fpdu_info->hdr_vbase, fpdu_info->hdr_pbase);
  156. kfree(fpdu_info);
  157. }
  158. /**
  159. * nes_get_seq - Get the seq, ack_seq and window from the packet
  160. */
  161. static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  162. {
  163. struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0];
  164. struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  165. struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  166. *ack = be32_to_cpu(tcph->ack_seq);
  167. *wnd = be16_to_cpu(tcph->window);
  168. *fin_rcvd = tcph->fin;
  169. *rst_rcvd = tcph->rst;
  170. return be32_to_cpu(tcph->seq);
  171. }
  172. /**
  173. * nes_get_next_skb - Get the next skb based on where current skb is in the queue
  174. */
  175. static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp,
  176. struct sk_buff *skb, u32 nextseq, u32 *ack,
  177. u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  178. {
  179. u32 seq;
  180. bool processacks;
  181. struct sk_buff *old_skb;
  182. if (skb) {
  183. /* Continue processing fpdu */
  184. if (skb->next == (struct sk_buff *)&nesqp->pau_list)
  185. goto out;
  186. skb = skb->next;
  187. processacks = false;
  188. } else {
  189. /* Starting a new one */
  190. if (skb_queue_empty(&nesqp->pau_list))
  191. goto out;
  192. skb = skb_peek(&nesqp->pau_list);
  193. processacks = true;
  194. }
  195. while (1) {
  196. if (skb_queue_empty(&nesqp->pau_list))
  197. goto out;
  198. seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
  199. if (seq == nextseq) {
  200. if (skb->len || processacks)
  201. break;
  202. } else if (after(seq, nextseq)) {
  203. goto out;
  204. }
  205. old_skb = skb;
  206. skb = skb->next;
  207. skb_unlink(old_skb, &nesqp->pau_list);
  208. nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
  209. nes_rem_ref_cm_node(nesqp->cm_node);
  210. if (skb == (struct sk_buff *)&nesqp->pau_list)
  211. goto out;
  212. }
  213. return skb;
  214. out:
  215. return NULL;
  216. }
  217. /**
  218. * get_fpdu_info - Find the next complete fpdu and return its fragments.
  219. */
  220. static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
  221. struct pau_fpdu_info **pau_fpdu_info)
  222. {
  223. struct sk_buff *skb;
  224. struct iphdr *iph;
  225. struct tcphdr *tcph;
  226. struct nes_rskb_cb *cb;
  227. struct pau_fpdu_info *fpdu_info = NULL;
  228. struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
  229. u32 fpdu_len = 0;
  230. u32 tmp_len;
  231. int frag_cnt = 0;
  232. u32 tot_len;
  233. u32 frag_tot;
  234. u32 ack;
  235. u32 fin_rcvd;
  236. u32 rst_rcvd;
  237. u16 wnd;
  238. int i;
  239. int rc = 0;
  240. *pau_fpdu_info = NULL;
  241. skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  242. if (!skb)
  243. goto out;
  244. cb = (struct nes_rskb_cb *)&skb->cb[0];
  245. if (skb->len) {
  246. fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
  247. fpdu_len = (fpdu_len + 3) & 0xfffffffc;
  248. tmp_len = fpdu_len;
  249. /* See if we have all of the fpdu */
  250. frag_tot = 0;
  251. memset(&frags, 0, sizeof frags);
  252. for (i = 0; i < MAX_FPDU_FRAGS; i++) {
  253. frags[i].physaddr = cb->busaddr;
  254. frags[i].physaddr += skb->data - cb->data_start;
  255. frags[i].frag_len = min(tmp_len, skb->len);
  256. frags[i].skb = skb;
  257. frags[i].cmplt = (skb->len == frags[i].frag_len);
  258. frag_tot += frags[i].frag_len;
  259. frag_cnt++;
  260. tmp_len -= frags[i].frag_len;
  261. if (tmp_len == 0)
  262. break;
  263. skb = nes_get_next_skb(nesdev, nesqp, skb,
  264. nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  265. if (!skb)
  266. goto out;
  267. if (rst_rcvd) {
  268. /* rst received in the middle of fpdu */
  269. for (; i >= 0; i--) {
  270. skb_unlink(frags[i].skb, &nesqp->pau_list);
  271. nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE);
  272. }
  273. cb = (struct nes_rskb_cb *)&skb->cb[0];
  274. frags[0].physaddr = cb->busaddr;
  275. frags[0].physaddr += skb->data - cb->data_start;
  276. frags[0].frag_len = skb->len;
  277. frags[0].skb = skb;
  278. frags[0].cmplt = true;
  279. frag_cnt = 1;
  280. break;
  281. }
  282. cb = (struct nes_rskb_cb *)&skb->cb[0];
  283. }
  284. } else {
  285. /* no data */
  286. frags[0].physaddr = cb->busaddr;
  287. frags[0].frag_len = 0;
  288. frags[0].skb = skb;
  289. frags[0].cmplt = true;
  290. frag_cnt = 1;
  291. }
  292. /* Found one */
  293. fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
  294. if (!fpdu_info) {
  295. rc = -ENOMEM;
  296. goto out;
  297. }
  298. fpdu_info->cqp_request = nes_get_cqp_request(nesdev);
  299. if (fpdu_info->cqp_request == NULL) {
  300. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  301. rc = -ENOMEM;
  302. goto out;
  303. }
  304. cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0];
  305. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  306. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  307. fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start;
  308. fpdu_info->data_len = fpdu_len;
  309. tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN;
  310. if (frags[0].cmplt) {
  311. fpdu_info->hdr_pbase = cb->busaddr;
  312. fpdu_info->hdr_vbase = NULL;
  313. } else {
  314. fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev,
  315. fpdu_info->hdr_len, &fpdu_info->hdr_pbase);
  316. if (!fpdu_info->hdr_vbase) {
  317. nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n");
  318. rc = -ENOMEM;
  319. goto out;
  320. }
  321. /* Copy hdrs, adjusting len and seqnum */
  322. memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len);
  323. iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN);
  324. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  325. }
  326. iph->tot_len = cpu_to_be16(tot_len);
  327. iph->saddr = cpu_to_be32(0x7f000001);
  328. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  329. tcph->ack_seq = cpu_to_be32(ack);
  330. tcph->window = cpu_to_be16(wnd);
  331. nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd;
  332. memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags));
  333. fpdu_info->frag_cnt = frag_cnt;
  334. fpdu_info->nesqp = nesqp;
  335. *pau_fpdu_info = fpdu_info;
  336. /* Update skb's for next pass */
  337. for (i = 0; i < frag_cnt; i++) {
  338. cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0];
  339. skb_pull(frags[i].skb, frags[i].frag_len);
  340. if (frags[i].skb->len == 0) {
  341. /* Pull skb off the list - it will be freed in the callback */
  342. if (!skb_queue_empty(&nesqp->pau_list))
  343. skb_unlink(frags[i].skb, &nesqp->pau_list);
  344. } else {
  345. /* Last skb still has data so update the seq */
  346. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  347. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  348. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  349. }
  350. }
  351. out:
  352. if (rc) {
  353. if (fpdu_info) {
  354. if (fpdu_info->cqp_request)
  355. nes_put_cqp_request(nesdev, fpdu_info->cqp_request);
  356. kfree(fpdu_info);
  357. }
  358. }
  359. return rc;
  360. }
  361. /**
  362. * forward_fpdu - send complete fpdus, one at a time
  363. */
  364. static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  365. {
  366. struct nes_device *nesdev = nesvnic->nesdev;
  367. struct pau_fpdu_info *fpdu_info;
  368. struct nes_hw_cqp_wqe *cqp_wqe;
  369. struct nes_cqp_request *cqp_request;
  370. unsigned long flags;
  371. u64 u64tmp;
  372. u32 u32tmp;
  373. int rc;
  374. while (1) {
  375. spin_lock_irqsave(&nesqp->pau_lock, flags);
  376. rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
  377. if (rc || (fpdu_info == NULL)) {
  378. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  379. return rc;
  380. }
  381. cqp_request = fpdu_info->cqp_request;
  382. cqp_wqe = &cqp_request->cqp_wqe;
  383. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  384. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX,
  385. NES_CQP_DOWNLOAD_SEGMENT |
  386. (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT));
  387. u32tmp = fpdu_info->hdr_len << 16;
  388. u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len;
  389. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX,
  390. u32tmp);
  391. u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len;
  392. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX,
  393. u32tmp);
  394. u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len;
  395. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX,
  396. u32tmp);
  397. u64tmp = (u64)fpdu_info->hdr_pbase;
  398. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
  399. lower_32_bits(u64tmp));
  400. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
  401. upper_32_bits(u64tmp));
  402. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
  403. lower_32_bits(fpdu_info->frags[0].physaddr));
  404. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX,
  405. upper_32_bits(fpdu_info->frags[0].physaddr));
  406. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX,
  407. lower_32_bits(fpdu_info->frags[1].physaddr));
  408. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX,
  409. upper_32_bits(fpdu_info->frags[1].physaddr));
  410. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX,
  411. lower_32_bits(fpdu_info->frags[2].physaddr));
  412. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX,
  413. upper_32_bits(fpdu_info->frags[2].physaddr));
  414. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX,
  415. lower_32_bits(fpdu_info->frags[3].physaddr));
  416. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX,
  417. upper_32_bits(fpdu_info->frags[3].physaddr));
  418. cqp_request->cqp_callback_pointer = fpdu_info;
  419. cqp_request->callback = 1;
  420. cqp_request->cqp_callback = nes_download_callback;
  421. atomic_set(&cqp_request->refcount, 1);
  422. nes_post_cqp_request(nesdev, cqp_request);
  423. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  424. }
  425. return 0;
  426. }
  427. static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  428. {
  429. int again = 1;
  430. unsigned long flags;
  431. do {
  432. /* Ignore rc - if it failed, tcp retries will cause it to try again */
  433. forward_fpdus(nesvnic, nesqp);
  434. spin_lock_irqsave(&nesqp->pau_lock, flags);
  435. if (nesqp->pau_pending) {
  436. nesqp->pau_pending = 0;
  437. } else {
  438. nesqp->pau_busy = 0;
  439. again = 0;
  440. }
  441. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  442. } while (again);
  443. }
  444. /**
  445. * queue_fpdus - Handle fpdu's that hw passed up to sw
  446. */
  447. static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  448. {
  449. struct sk_buff *tmpskb;
  450. struct nes_rskb_cb *cb;
  451. struct iphdr *iph;
  452. struct tcphdr *tcph;
  453. unsigned char *tcph_end;
  454. u32 rcv_nxt;
  455. u32 rcv_wnd;
  456. u32 seqnum;
  457. u32 len;
  458. bool process_it = false;
  459. unsigned long flags;
  460. /* Move data ptr to after tcp header */
  461. iph = (struct iphdr *)skb->data;
  462. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  463. seqnum = be32_to_cpu(tcph->seq);
  464. tcph_end = (((char *)tcph) + (4 * tcph->doff));
  465. len = be16_to_cpu(iph->tot_len);
  466. if (skb->len > len)
  467. skb_trim(skb, len);
  468. skb_pull(skb, tcph_end - skb->data);
  469. /* Initialize tracking values */
  470. cb = (struct nes_rskb_cb *)&skb->cb[0];
  471. cb->seqnum = seqnum;
  472. /* Make sure data is in the receive window */
  473. rcv_nxt = nesqp->pau_rcv_nxt;
  474. rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd);
  475. if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) {
  476. nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE);
  477. nes_rem_ref_cm_node(nesqp->cm_node);
  478. return;
  479. }
  480. spin_lock_irqsave(&nesqp->pau_lock, flags);
  481. if (nesqp->pau_busy)
  482. nesqp->pau_pending = 1;
  483. else
  484. nesqp->pau_busy = 1;
  485. /* Queue skb by sequence number */
  486. if (skb_queue_len(&nesqp->pau_list) == 0) {
  487. skb_queue_head(&nesqp->pau_list, skb);
  488. } else {
  489. tmpskb = nesqp->pau_list.next;
  490. while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
  491. cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
  492. if (before(seqnum, cb->seqnum))
  493. break;
  494. tmpskb = tmpskb->next;
  495. }
  496. skb_insert(tmpskb, skb, &nesqp->pau_list);
  497. }
  498. if (nesqp->pau_state == PAU_READY)
  499. process_it = true;
  500. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  501. if (process_it)
  502. process_fpdus(nesvnic, nesqp);
  503. return;
  504. }
  505. /**
  506. * mgt_thread - Handle mgt skbs in a safe context
  507. */
  508. static int mgt_thread(void *context)
  509. {
  510. struct nes_vnic *nesvnic = context;
  511. struct sk_buff *skb;
  512. struct nes_rskb_cb *cb;
  513. while (!kthread_should_stop()) {
  514. wait_event_interruptible(nesvnic->mgt_wait_queue,
  515. skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop());
  516. while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) {
  517. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  518. cb = (struct nes_rskb_cb *)&skb->cb[0];
  519. cb->data_start = skb->data - ETH_HLEN;
  520. cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start,
  521. nesvnic->max_frame_size, PCI_DMA_TODEVICE);
  522. queue_fpdus(skb, nesvnic, cb->nesqp);
  523. }
  524. }
  525. /* Closing down so delete any entries on the queue */
  526. while (skb_queue_len(&nesvnic->mgt_skb_list)) {
  527. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  528. cb = (struct nes_rskb_cb *)&skb->cb[0];
  529. nes_rem_ref_cm_node(cb->nesqp->cm_node);
  530. dev_kfree_skb_any(skb);
  531. }
  532. return 0;
  533. }
  534. /**
  535. * nes_queue_skbs - Queue skb so it can be handled in a thread context
  536. */
  537. void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  538. {
  539. struct nes_rskb_cb *cb;
  540. cb = (struct nes_rskb_cb *)&skb->cb[0];
  541. cb->nesqp = nesqp;
  542. skb_queue_tail(&nesvnic->mgt_skb_list, skb);
  543. wake_up_interruptible(&nesvnic->mgt_wait_queue);
  544. }
  545. void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
  546. {
  547. struct sk_buff *skb;
  548. unsigned long flags;
  549. atomic_inc(&pau_qps_destroyed);
  550. /* Free packets that have not yet been forwarded */
  551. /* Lock is acquired by skb_dequeue when removing the skb */
  552. spin_lock_irqsave(&nesqp->pau_lock, flags);
  553. while (skb_queue_len(&nesqp->pau_list)) {
  554. skb = skb_dequeue(&nesqp->pau_list);
  555. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  556. nes_rem_ref_cm_node(nesqp->cm_node);
  557. }
  558. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  559. }
  560. static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  561. {
  562. struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer;
  563. struct nes_cqp_request *new_request;
  564. struct nes_hw_cqp_wqe *cqp_wqe;
  565. struct nes_adapter *nesadapter;
  566. struct nes_qp *nesqp;
  567. struct nes_v4_quad nes_quad;
  568. u32 crc_value;
  569. u64 u64temp;
  570. nesadapter = nesdev->nesadapter;
  571. nesqp = qh_chg->nesqp;
  572. /* Should we handle the bad completion */
  573. if (cqp_request->major_code)
  574. WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
  575. cqp_request->major_code);
  576. switch (nesqp->pau_state) {
  577. case PAU_DEL_QH:
  578. /* Old hash code deleted, now set the new one */
  579. nesqp->pau_state = PAU_ADD_LB_QH;
  580. new_request = nes_get_cqp_request(nesdev);
  581. if (new_request == NULL) {
  582. nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n");
  583. WARN_ON(1);
  584. return;
  585. }
  586. memset(&nes_quad, 0, sizeof(nes_quad));
  587. nes_quad.DstIpAdrIndex =
  588. cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
  589. nes_quad.SrcIpadr = cpu_to_be32(0x7f000001);
  590. nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]);
  591. nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]);
  592. /* Produce hash key */
  593. crc_value = get_crc_value(&nes_quad);
  594. nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
  595. nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n",
  596. nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
  597. nesqp->hte_index &= nesadapter->hte_index_mask;
  598. nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
  599. nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001);
  600. nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt);
  601. cqp_wqe = &new_request->cqp_wqe;
  602. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  603. set_wqe_32bit_value(cqp_wqe->wqe_words,
  604. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH |
  605. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  606. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  607. u64temp = (u64)nesqp->nesqp_context_pbase;
  608. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  609. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n");
  610. new_request->cqp_callback_pointer = qh_chg;
  611. new_request->callback = 1;
  612. new_request->cqp_callback = nes_chg_qh_handler;
  613. atomic_set(&new_request->refcount, 1);
  614. nes_post_cqp_request(nesdev, new_request);
  615. break;
  616. case PAU_ADD_LB_QH:
  617. /* Start processing the queued fpdu's */
  618. nesqp->pau_state = PAU_READY;
  619. process_fpdus(qh_chg->nesvnic, qh_chg->nesqp);
  620. kfree(qh_chg);
  621. break;
  622. }
  623. }
  624. /**
  625. * nes_change_quad_hash
  626. */
  627. static int nes_change_quad_hash(struct nes_device *nesdev,
  628. struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  629. {
  630. struct nes_cqp_request *cqp_request = NULL;
  631. struct pau_qh_chg *qh_chg = NULL;
  632. u64 u64temp;
  633. struct nes_hw_cqp_wqe *cqp_wqe;
  634. int ret = 0;
  635. cqp_request = nes_get_cqp_request(nesdev);
  636. if (cqp_request == NULL) {
  637. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  638. ret = -ENOMEM;
  639. goto chg_qh_err;
  640. }
  641. qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
  642. if (!qh_chg) {
  643. ret = -ENOMEM;
  644. goto chg_qh_err;
  645. }
  646. qh_chg->nesdev = nesdev;
  647. qh_chg->nesvnic = nesvnic;
  648. qh_chg->nesqp = nesqp;
  649. nesqp->pau_state = PAU_DEL_QH;
  650. cqp_wqe = &cqp_request->cqp_wqe;
  651. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  652. set_wqe_32bit_value(cqp_wqe->wqe_words,
  653. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE |
  654. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  655. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  656. u64temp = (u64)nesqp->nesqp_context_pbase;
  657. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  658. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n");
  659. cqp_request->cqp_callback_pointer = qh_chg;
  660. cqp_request->callback = 1;
  661. cqp_request->cqp_callback = nes_chg_qh_handler;
  662. atomic_set(&cqp_request->refcount, 1);
  663. nes_post_cqp_request(nesdev, cqp_request);
  664. return ret;
  665. chg_qh_err:
  666. kfree(qh_chg);
  667. if (cqp_request)
  668. nes_put_cqp_request(nesdev, cqp_request);
  669. return ret;
  670. }
  671. /**
  672. * nes_mgt_ce_handler
  673. * This management code deals with any packed and unaligned (pau) fpdu's
  674. * that the hardware cannot handle.
  675. */
  676. static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
  677. {
  678. struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq);
  679. struct nes_adapter *nesadapter = nesdev->nesadapter;
  680. u32 head;
  681. u32 cq_size;
  682. u32 cqe_count = 0;
  683. u32 cqe_misc;
  684. u32 qp_id = 0;
  685. u32 skbs_needed;
  686. unsigned long context;
  687. struct nes_qp *nesqp;
  688. struct sk_buff *rx_skb;
  689. struct nes_rskb_cb *cb;
  690. head = cq->cq_head;
  691. cq_size = cq->cq_size;
  692. while (1) {
  693. cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
  694. if (!(cqe_misc & NES_NIC_CQE_VALID))
  695. break;
  696. nesqp = NULL;
  697. if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) {
  698. qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]);
  699. qp_id &= 0x001fffff;
  700. if (qp_id < nesadapter->max_qp) {
  701. context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN];
  702. nesqp = (struct nes_qp *)context;
  703. }
  704. }
  705. if (nesqp) {
  706. if (nesqp->pau_mode == false) {
  707. nesqp->pau_mode = true; /* First time for this qp */
  708. nesqp->pau_rcv_nxt = le32_to_cpu(
  709. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
  710. skb_queue_head_init(&nesqp->pau_list);
  711. spin_lock_init(&nesqp->pau_lock);
  712. atomic_inc(&pau_qps_created);
  713. nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
  714. }
  715. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  716. rx_skb->len = 0;
  717. skb_put(rx_skb, cqe_misc & 0x0000ffff);
  718. rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev);
  719. cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
  720. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE);
  721. cb->busaddr = 0;
  722. mgtvnic->mgt.rq_tail++;
  723. mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1;
  724. nes_add_ref_cm_node(nesqp->cm_node);
  725. nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp);
  726. } else {
  727. printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id);
  728. }
  729. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
  730. cqe_count++;
  731. if (++head >= cq_size)
  732. head = 0;
  733. if (cqe_count == 255) {
  734. /* Replenish mgt CQ */
  735. nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16));
  736. nesdev->currcq_count += cqe_count;
  737. cqe_count = 0;
  738. }
  739. skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed);
  740. if (skbs_needed > (mgtvnic->mgt.rq_size >> 1))
  741. nes_replenish_mgt_rq(mgtvnic);
  742. }
  743. cq->cq_head = head;
  744. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  745. cq->cq_number | (cqe_count << 16));
  746. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  747. nesdev->currcq_count += cqe_count;
  748. }
  749. /**
  750. * nes_init_mgt_qp
  751. */
  752. int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic)
  753. {
  754. struct nes_vnic_mgt *mgtvnic;
  755. u32 counter;
  756. void *vmem;
  757. dma_addr_t pmem;
  758. struct nes_hw_cqp_wqe *cqp_wqe;
  759. u32 cqp_head;
  760. unsigned long flags;
  761. struct nes_hw_nic_qp_context *mgt_context;
  762. u64 u64temp;
  763. struct nes_hw_nic_rq_wqe *mgt_rqe;
  764. struct sk_buff *skb;
  765. u32 wqe_count;
  766. struct nes_rskb_cb *cb;
  767. u32 mgt_mem_size;
  768. void *mgt_vbase;
  769. dma_addr_t mgt_pbase;
  770. int i;
  771. int ret;
  772. /* Allocate space the all mgt QPs once */
  773. mgtvnic = kcalloc(NES_MGT_QP_COUNT, sizeof(struct nes_vnic_mgt),
  774. GFP_KERNEL);
  775. if (!mgtvnic)
  776. return -ENOMEM;
  777. /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
  778. /* We are not sending from this NIC so sq is not allocated */
  779. mgt_mem_size = 256 +
  780. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) +
  781. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) +
  782. sizeof(struct nes_hw_nic_qp_context);
  783. mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
  784. mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase);
  785. if (!mgt_vbase) {
  786. kfree(mgtvnic);
  787. nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n");
  788. return -ENOMEM;
  789. }
  790. nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size;
  791. nesvnic->mgt_vbase = mgt_vbase;
  792. nesvnic->mgt_pbase = mgt_pbase;
  793. skb_queue_head_init(&nesvnic->mgt_skb_list);
  794. init_waitqueue_head(&nesvnic->mgt_wait_queue);
  795. nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread");
  796. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  797. mgtvnic->nesvnic = nesvnic;
  798. mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i;
  799. memset(mgt_vbase, 0, mgt_mem_size);
  800. nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n",
  801. mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size);
  802. vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) &
  803. ~(unsigned long)(256 - 1));
  804. pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) &
  805. ~(unsigned long long)(256 - 1));
  806. spin_lock_init(&mgtvnic->mgt.rq_lock);
  807. /* setup the RQ */
  808. mgtvnic->mgt.rq_vbase = vmem;
  809. mgtvnic->mgt.rq_pbase = pmem;
  810. mgtvnic->mgt.rq_head = 0;
  811. mgtvnic->mgt.rq_tail = 0;
  812. mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT;
  813. /* setup the CQ */
  814. vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  815. pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  816. mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id;
  817. mgtvnic->mgt_cq.cq_vbase = vmem;
  818. mgtvnic->mgt_cq.cq_pbase = pmem;
  819. mgtvnic->mgt_cq.cq_head = 0;
  820. mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT;
  821. mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler;
  822. /* Send CreateCQ request to CQP */
  823. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  824. cqp_head = nesdev->cqp.sq_head;
  825. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  826. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  827. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
  828. NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
  829. ((u32)mgtvnic->mgt_cq.cq_size << 16));
  830. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
  831. mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16));
  832. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase;
  833. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
  834. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
  835. u64temp = (unsigned long)&mgtvnic->mgt_cq;
  836. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
  837. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
  838. cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
  839. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
  840. if (++cqp_head >= nesdev->cqp.sq_size)
  841. cqp_head = 0;
  842. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  843. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  844. /* Send CreateQP request to CQP */
  845. mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]);
  846. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] =
  847. cpu_to_le32((u32)NES_MGT_CTX_SIZE |
  848. ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
  849. nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
  850. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
  851. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
  852. if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0)
  853. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
  854. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  855. mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  856. mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  857. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  858. mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  859. mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  860. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
  861. NES_CQP_QP_TYPE_NIC);
  862. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id);
  863. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase +
  864. (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
  865. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  866. if (++cqp_head >= nesdev->cqp.sq_size)
  867. cqp_head = 0;
  868. nesdev->cqp.sq_head = cqp_head;
  869. barrier();
  870. /* Ring doorbell (2 WQEs) */
  871. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  872. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  873. nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n",
  874. mgtvnic->mgt.qp_id);
  875. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  876. NES_EVENT_TIMEOUT);
  877. nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n",
  878. mgtvnic->mgt.qp_id, ret);
  879. if (!ret) {
  880. nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id);
  881. if (i == 0) {
  882. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  883. nesvnic->mgt_pbase);
  884. kfree(mgtvnic);
  885. } else {
  886. nes_destroy_mgt(nesvnic);
  887. }
  888. return -EIO;
  889. }
  890. /* Populate the RQ */
  891. for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) {
  892. skb = dev_alloc_skb(nesvnic->max_frame_size);
  893. if (!skb) {
  894. nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
  895. return -ENOMEM;
  896. }
  897. skb->dev = netdev;
  898. pmem = pci_map_single(nesdev->pcidev, skb->data,
  899. nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  900. cb = (struct nes_rskb_cb *)&skb->cb[0];
  901. cb->busaddr = pmem;
  902. cb->maplen = nesvnic->max_frame_size;
  903. mgt_rqe = &mgtvnic->mgt.rq_vbase[counter];
  904. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size);
  905. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  906. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
  907. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
  908. mgtvnic->mgt.rx_skb[counter] = skb;
  909. }
  910. timer_setup(&mgtvnic->rq_wqes_timer, nes_mgt_rq_wqes_timeout,
  911. 0);
  912. wqe_count = NES_MGT_WQ_COUNT - 1;
  913. mgtvnic->mgt.rq_head = wqe_count;
  914. barrier();
  915. do {
  916. counter = min(wqe_count, ((u32)255));
  917. wqe_count -= counter;
  918. nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id);
  919. } while (wqe_count);
  920. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  921. mgtvnic->mgt_cq.cq_number);
  922. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  923. mgt_vbase += mgt_mem_size;
  924. mgt_pbase += mgt_mem_size;
  925. nesvnic->mgtvnic[i] = mgtvnic++;
  926. }
  927. return 0;
  928. }
  929. void nes_destroy_mgt(struct nes_vnic *nesvnic)
  930. {
  931. struct nes_device *nesdev = nesvnic->nesdev;
  932. struct nes_vnic_mgt *mgtvnic;
  933. struct nes_vnic_mgt *first_mgtvnic;
  934. unsigned long flags;
  935. struct nes_hw_cqp_wqe *cqp_wqe;
  936. u32 cqp_head;
  937. struct sk_buff *rx_skb;
  938. int i;
  939. int ret;
  940. kthread_stop(nesvnic->mgt_thread);
  941. /* Free remaining NIC receive buffers */
  942. first_mgtvnic = nesvnic->mgtvnic[0];
  943. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  944. mgtvnic = nesvnic->mgtvnic[i];
  945. if (mgtvnic == NULL)
  946. continue;
  947. while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) {
  948. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  949. nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE);
  950. mgtvnic->mgt.rq_tail++;
  951. mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1);
  952. }
  953. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  954. /* Destroy NIC QP */
  955. cqp_head = nesdev->cqp.sq_head;
  956. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  957. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  958. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  959. (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
  960. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  961. mgtvnic->mgt.qp_id);
  962. if (++cqp_head >= nesdev->cqp.sq_size)
  963. cqp_head = 0;
  964. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  965. /* Destroy NIC CQ */
  966. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  967. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  968. (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16)));
  969. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  970. (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
  971. if (++cqp_head >= nesdev->cqp.sq_size)
  972. cqp_head = 0;
  973. nesdev->cqp.sq_head = cqp_head;
  974. barrier();
  975. /* Ring doorbell (2 WQEs) */
  976. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  977. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  978. nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
  979. " cqp.sq_tail=%u, cqp.sq_size=%u\n",
  980. cqp_head, nesdev->cqp.sq_head,
  981. nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
  982. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  983. NES_EVENT_TIMEOUT);
  984. nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
  985. " cqp.sq_head=%u, cqp.sq_tail=%u\n",
  986. ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
  987. if (!ret)
  988. nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n",
  989. mgtvnic->mgt.qp_id);
  990. nesvnic->mgtvnic[i] = NULL;
  991. }
  992. if (nesvnic->mgt_vbase) {
  993. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  994. nesvnic->mgt_pbase);
  995. nesvnic->mgt_vbase = NULL;
  996. nesvnic->mgt_pbase = 0;
  997. }
  998. kfree(first_mgtvnic);
  999. }