netxen_nic_ctx.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * Copyright (C) 2009 - QLogic Corporation.
  4. * All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. *
  19. * The full GNU General Public License is included in this distribution
  20. * in the file called "COPYING".
  21. *
  22. */
  23. #include "netxen_nic_hw.h"
  24. #include "netxen_nic.h"
  25. #define NXHAL_VERSION 1
  26. static u32
  27. netxen_poll_rsp(struct netxen_adapter *adapter)
  28. {
  29. u32 rsp = NX_CDRP_RSP_OK;
  30. int timeout = 0;
  31. do {
  32. /* give atleast 1ms for firmware to respond */
  33. msleep(1);
  34. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  35. return NX_CDRP_RSP_TIMEOUT;
  36. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  37. } while (!NX_CDRP_IS_RSP(rsp));
  38. return rsp;
  39. }
  40. static u32
  41. netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
  42. {
  43. u32 rsp;
  44. u32 signature = 0;
  45. u32 rcode = NX_RCODE_SUCCESS;
  46. signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
  47. NXHAL_VERSION);
  48. /* Acquire semaphore before accessing CRB */
  49. if (netxen_api_lock(adapter))
  50. return NX_RCODE_TIMEOUT;
  51. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  52. NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
  53. NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
  54. NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
  55. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
  56. rsp = netxen_poll_rsp(adapter);
  57. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  58. printk(KERN_ERR "%s: card response timeout.\n",
  59. netxen_nic_driver_name);
  60. rcode = NX_RCODE_TIMEOUT;
  61. } else if (rsp == NX_CDRP_RSP_FAIL) {
  62. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  63. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  64. netxen_nic_driver_name, rcode);
  65. } else if (rsp == NX_CDRP_RSP_OK) {
  66. cmd->rsp.cmd = NX_RCODE_SUCCESS;
  67. if (cmd->rsp.arg2)
  68. cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
  69. if (cmd->rsp.arg3)
  70. cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
  71. }
  72. if (cmd->rsp.arg1)
  73. cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  74. /* Release semaphore */
  75. netxen_api_unlock(adapter);
  76. return rcode;
  77. }
  78. static int
  79. netxen_get_minidump_template_size(struct netxen_adapter *adapter)
  80. {
  81. struct netxen_cmd_args cmd;
  82. memset(&cmd, 0, sizeof(cmd));
  83. cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
  84. memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
  85. netxen_issue_cmd(adapter, &cmd);
  86. if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
  87. dev_info(&adapter->pdev->dev,
  88. "Can't get template size %d\n", cmd.rsp.cmd);
  89. return -EIO;
  90. }
  91. adapter->mdump.md_template_size = cmd.rsp.arg2;
  92. adapter->mdump.md_template_ver = cmd.rsp.arg3;
  93. return 0;
  94. }
  95. static int
  96. netxen_get_minidump_template(struct netxen_adapter *adapter)
  97. {
  98. dma_addr_t md_template_addr;
  99. void *addr;
  100. u32 size;
  101. struct netxen_cmd_args cmd;
  102. size = adapter->mdump.md_template_size;
  103. if (size == 0) {
  104. dev_err(&adapter->pdev->dev, "Can not capture Minidump "
  105. "template. Invalid template size.\n");
  106. return NX_RCODE_INVALID_ARGS;
  107. }
  108. addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
  109. if (!addr) {
  110. dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
  111. return -ENOMEM;
  112. }
  113. memset(&cmd, 0, sizeof(cmd));
  114. memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
  115. cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
  116. cmd.req.arg1 = LSD(md_template_addr);
  117. cmd.req.arg2 = MSD(md_template_addr);
  118. cmd.req.arg3 |= size;
  119. netxen_issue_cmd(adapter, &cmd);
  120. if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
  121. memcpy(adapter->mdump.md_template, addr, size);
  122. } else {
  123. dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
  124. cmd.rsp.cmd, size, cmd.rsp.arg2);
  125. }
  126. pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
  127. return 0;
  128. }
  129. static u32
  130. netxen_check_template_checksum(struct netxen_adapter *adapter)
  131. {
  132. u64 sum = 0 ;
  133. u32 *buff = adapter->mdump.md_template;
  134. int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
  135. while (count-- > 0)
  136. sum += *buff++ ;
  137. while (sum >> 32)
  138. sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
  139. return ~sum;
  140. }
  141. int
  142. netxen_setup_minidump(struct netxen_adapter *adapter)
  143. {
  144. int err = 0, i;
  145. u32 *template, *tmp_buf;
  146. err = netxen_get_minidump_template_size(adapter);
  147. if (err) {
  148. adapter->mdump.fw_supports_md = 0;
  149. if ((err == NX_RCODE_CMD_INVALID) ||
  150. (err == NX_RCODE_CMD_NOT_IMPL)) {
  151. dev_info(&adapter->pdev->dev,
  152. "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n",
  153. NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
  154. NX_MD_SUPPORT_SUBVERSION);
  155. }
  156. return err;
  157. }
  158. if (!adapter->mdump.md_template_size) {
  159. dev_err(&adapter->pdev->dev, "Error : Invalid template size "
  160. ",should be non-zero.\n");
  161. return -EIO;
  162. }
  163. adapter->mdump.md_template =
  164. kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
  165. if (!adapter->mdump.md_template)
  166. return -ENOMEM;
  167. err = netxen_get_minidump_template(adapter);
  168. if (err) {
  169. if (err == NX_RCODE_CMD_NOT_IMPL)
  170. adapter->mdump.fw_supports_md = 0;
  171. goto free_template;
  172. }
  173. if (netxen_check_template_checksum(adapter)) {
  174. dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
  175. err = -EIO;
  176. goto free_template;
  177. }
  178. adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
  179. tmp_buf = (u32 *) adapter->mdump.md_template;
  180. template = (u32 *) adapter->mdump.md_template;
  181. for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
  182. *template++ = __le32_to_cpu(*tmp_buf++);
  183. adapter->mdump.md_capture_buff = NULL;
  184. adapter->mdump.fw_supports_md = 1;
  185. adapter->mdump.md_enabled = 0;
  186. return err;
  187. free_template:
  188. kfree(adapter->mdump.md_template);
  189. adapter->mdump.md_template = NULL;
  190. return err;
  191. }
  192. int
  193. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  194. {
  195. u32 rcode = NX_RCODE_SUCCESS;
  196. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  197. struct netxen_cmd_args cmd;
  198. memset(&cmd, 0, sizeof(cmd));
  199. cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
  200. cmd.req.arg1 = recv_ctx->context_id;
  201. cmd.req.arg2 = mtu;
  202. cmd.req.arg3 = 0;
  203. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  204. rcode = netxen_issue_cmd(adapter, &cmd);
  205. if (rcode != NX_RCODE_SUCCESS)
  206. return -EIO;
  207. return 0;
  208. }
  209. int
  210. nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
  211. u32 speed, u32 duplex, u32 autoneg)
  212. {
  213. struct netxen_cmd_args cmd;
  214. memset(&cmd, 0, sizeof(cmd));
  215. cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
  216. cmd.req.arg1 = speed;
  217. cmd.req.arg2 = duplex;
  218. cmd.req.arg3 = autoneg;
  219. return netxen_issue_cmd(adapter, &cmd);
  220. }
  221. static int
  222. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  223. {
  224. void *addr;
  225. nx_hostrq_rx_ctx_t *prq;
  226. nx_cardrsp_rx_ctx_t *prsp;
  227. nx_hostrq_rds_ring_t *prq_rds;
  228. nx_hostrq_sds_ring_t *prq_sds;
  229. nx_cardrsp_rds_ring_t *prsp_rds;
  230. nx_cardrsp_sds_ring_t *prsp_sds;
  231. struct nx_host_rds_ring *rds_ring;
  232. struct nx_host_sds_ring *sds_ring;
  233. struct netxen_cmd_args cmd;
  234. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  235. u64 phys_addr;
  236. int i, nrds_rings, nsds_rings;
  237. size_t rq_size, rsp_size;
  238. u32 cap, reg, val;
  239. int err;
  240. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  241. nrds_rings = adapter->max_rds_rings;
  242. nsds_rings = adapter->max_sds_rings;
  243. rq_size =
  244. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  245. rsp_size =
  246. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  247. addr = pci_alloc_consistent(adapter->pdev,
  248. rq_size, &hostrq_phys_addr);
  249. if (addr == NULL)
  250. return -ENOMEM;
  251. prq = addr;
  252. addr = pci_alloc_consistent(adapter->pdev,
  253. rsp_size, &cardrsp_phys_addr);
  254. if (addr == NULL) {
  255. err = -ENOMEM;
  256. goto out_free_rq;
  257. }
  258. prsp = addr;
  259. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  260. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  261. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  262. if (adapter->flags & NETXEN_FW_MSS_CAP)
  263. cap |= NX_CAP0_HW_LRO_MSS;
  264. prq->capabilities[0] = cpu_to_le32(cap);
  265. prq->host_int_crb_mode =
  266. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  267. prq->host_rds_crb_mode =
  268. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  269. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  270. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  271. prq->rds_ring_offset = cpu_to_le32(0);
  272. val = le32_to_cpu(prq->rds_ring_offset) +
  273. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  274. prq->sds_ring_offset = cpu_to_le32(val);
  275. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  276. le32_to_cpu(prq->rds_ring_offset));
  277. for (i = 0; i < nrds_rings; i++) {
  278. rds_ring = &recv_ctx->rds_rings[i];
  279. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  280. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  281. prq_rds[i].ring_kind = cpu_to_le32(i);
  282. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  283. }
  284. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  285. le32_to_cpu(prq->sds_ring_offset));
  286. for (i = 0; i < nsds_rings; i++) {
  287. sds_ring = &recv_ctx->sds_rings[i];
  288. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  289. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  290. prq_sds[i].msi_index = cpu_to_le16(i);
  291. }
  292. phys_addr = hostrq_phys_addr;
  293. memset(&cmd, 0, sizeof(cmd));
  294. cmd.req.arg1 = (u32)(phys_addr >> 32);
  295. cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
  296. cmd.req.arg3 = rq_size;
  297. cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
  298. err = netxen_issue_cmd(adapter, &cmd);
  299. if (err) {
  300. printk(KERN_WARNING
  301. "Failed to create rx ctx in firmware%d\n", err);
  302. goto out_free_rsp;
  303. }
  304. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  305. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  306. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  307. rds_ring = &recv_ctx->rds_rings[i];
  308. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  309. rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
  310. NETXEN_NIC_REG(reg - 0x200));
  311. }
  312. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  313. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  314. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  315. sds_ring = &recv_ctx->sds_rings[i];
  316. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  317. sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
  318. NETXEN_NIC_REG(reg - 0x200));
  319. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  320. sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
  321. NETXEN_NIC_REG(reg - 0x200));
  322. }
  323. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  324. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  325. recv_ctx->virt_port = prsp->virt_port;
  326. out_free_rsp:
  327. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  328. out_free_rq:
  329. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  330. return err;
  331. }
  332. static void
  333. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  334. {
  335. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  336. struct netxen_cmd_args cmd;
  337. memset(&cmd, 0, sizeof(cmd));
  338. cmd.req.arg1 = recv_ctx->context_id;
  339. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  340. cmd.req.arg3 = 0;
  341. cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
  342. if (netxen_issue_cmd(adapter, &cmd)) {
  343. printk(KERN_WARNING
  344. "%s: Failed to destroy rx ctx in firmware\n",
  345. netxen_nic_driver_name);
  346. }
  347. }
  348. static int
  349. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  350. {
  351. nx_hostrq_tx_ctx_t *prq;
  352. nx_hostrq_cds_ring_t *prq_cds;
  353. nx_cardrsp_tx_ctx_t *prsp;
  354. void *rq_addr, *rsp_addr;
  355. size_t rq_size, rsp_size;
  356. u32 temp;
  357. int err = 0;
  358. u64 offset, phys_addr;
  359. dma_addr_t rq_phys_addr, rsp_phys_addr;
  360. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  361. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  362. struct netxen_cmd_args cmd;
  363. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  364. rq_addr = pci_alloc_consistent(adapter->pdev,
  365. rq_size, &rq_phys_addr);
  366. if (!rq_addr)
  367. return -ENOMEM;
  368. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  369. rsp_addr = pci_alloc_consistent(adapter->pdev,
  370. rsp_size, &rsp_phys_addr);
  371. if (!rsp_addr) {
  372. err = -ENOMEM;
  373. goto out_free_rq;
  374. }
  375. memset(rq_addr, 0, rq_size);
  376. prq = rq_addr;
  377. memset(rsp_addr, 0, rsp_size);
  378. prsp = rsp_addr;
  379. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  380. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  381. prq->capabilities[0] = cpu_to_le32(temp);
  382. prq->host_int_crb_mode =
  383. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  384. prq->interrupt_ctl = 0;
  385. prq->msi_index = 0;
  386. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  387. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  388. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  389. prq_cds = &prq->cds_ring;
  390. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  391. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  392. phys_addr = rq_phys_addr;
  393. memset(&cmd, 0, sizeof(cmd));
  394. cmd.req.arg1 = (u32)(phys_addr >> 32);
  395. cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
  396. cmd.req.arg3 = rq_size;
  397. cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
  398. err = netxen_issue_cmd(adapter, &cmd);
  399. if (err == NX_RCODE_SUCCESS) {
  400. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  401. tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
  402. NETXEN_NIC_REG(temp - 0x200));
  403. #if 0
  404. adapter->tx_state =
  405. le32_to_cpu(prsp->host_ctx_state);
  406. #endif
  407. adapter->tx_context_id =
  408. le16_to_cpu(prsp->context_id);
  409. } else {
  410. printk(KERN_WARNING
  411. "Failed to create tx ctx in firmware%d\n", err);
  412. err = -EIO;
  413. }
  414. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  415. out_free_rq:
  416. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  417. return err;
  418. }
  419. static void
  420. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  421. {
  422. struct netxen_cmd_args cmd;
  423. memset(&cmd, 0, sizeof(cmd));
  424. cmd.req.arg1 = adapter->tx_context_id;
  425. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  426. cmd.req.arg3 = 0;
  427. cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
  428. if (netxen_issue_cmd(adapter, &cmd)) {
  429. printk(KERN_WARNING
  430. "%s: Failed to destroy tx ctx in firmware\n",
  431. netxen_nic_driver_name);
  432. }
  433. }
  434. int
  435. nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
  436. {
  437. u32 rcode;
  438. struct netxen_cmd_args cmd;
  439. memset(&cmd, 0, sizeof(cmd));
  440. cmd.req.arg1 = reg;
  441. cmd.req.arg2 = 0;
  442. cmd.req.arg3 = 0;
  443. cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
  444. cmd.rsp.arg1 = 1;
  445. rcode = netxen_issue_cmd(adapter, &cmd);
  446. if (rcode != NX_RCODE_SUCCESS)
  447. return -EIO;
  448. if (val == NULL)
  449. return -EIO;
  450. *val = cmd.rsp.arg1;
  451. return 0;
  452. }
  453. int
  454. nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
  455. {
  456. u32 rcode;
  457. struct netxen_cmd_args cmd;
  458. memset(&cmd, 0, sizeof(cmd));
  459. cmd.req.arg1 = reg;
  460. cmd.req.arg2 = val;
  461. cmd.req.arg3 = 0;
  462. cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
  463. rcode = netxen_issue_cmd(adapter, &cmd);
  464. if (rcode != NX_RCODE_SUCCESS)
  465. return -EIO;
  466. return 0;
  467. }
  468. static u64 ctx_addr_sig_regs[][3] = {
  469. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  470. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  471. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  472. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  473. };
  474. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  475. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  476. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  477. #define lower32(x) ((u32)((x) & 0xffffffff))
  478. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  479. static struct netxen_recv_crb recv_crb_registers[] = {
  480. /* Instance 0 */
  481. {
  482. /* crb_rcv_producer: */
  483. {
  484. NETXEN_NIC_REG(0x100),
  485. /* Jumbo frames */
  486. NETXEN_NIC_REG(0x110),
  487. /* LRO */
  488. NETXEN_NIC_REG(0x120)
  489. },
  490. /* crb_sts_consumer: */
  491. {
  492. NETXEN_NIC_REG(0x138),
  493. NETXEN_NIC_REG_2(0x000),
  494. NETXEN_NIC_REG_2(0x004),
  495. NETXEN_NIC_REG_2(0x008),
  496. },
  497. /* sw_int_mask */
  498. {
  499. CRB_SW_INT_MASK_0,
  500. NETXEN_NIC_REG_2(0x044),
  501. NETXEN_NIC_REG_2(0x048),
  502. NETXEN_NIC_REG_2(0x04c),
  503. },
  504. },
  505. /* Instance 1 */
  506. {
  507. /* crb_rcv_producer: */
  508. {
  509. NETXEN_NIC_REG(0x144),
  510. /* Jumbo frames */
  511. NETXEN_NIC_REG(0x154),
  512. /* LRO */
  513. NETXEN_NIC_REG(0x164)
  514. },
  515. /* crb_sts_consumer: */
  516. {
  517. NETXEN_NIC_REG(0x17c),
  518. NETXEN_NIC_REG_2(0x020),
  519. NETXEN_NIC_REG_2(0x024),
  520. NETXEN_NIC_REG_2(0x028),
  521. },
  522. /* sw_int_mask */
  523. {
  524. CRB_SW_INT_MASK_1,
  525. NETXEN_NIC_REG_2(0x064),
  526. NETXEN_NIC_REG_2(0x068),
  527. NETXEN_NIC_REG_2(0x06c),
  528. },
  529. },
  530. /* Instance 2 */
  531. {
  532. /* crb_rcv_producer: */
  533. {
  534. NETXEN_NIC_REG(0x1d8),
  535. /* Jumbo frames */
  536. NETXEN_NIC_REG(0x1f8),
  537. /* LRO */
  538. NETXEN_NIC_REG(0x208)
  539. },
  540. /* crb_sts_consumer: */
  541. {
  542. NETXEN_NIC_REG(0x220),
  543. NETXEN_NIC_REG_2(0x03c),
  544. NETXEN_NIC_REG_2(0x03c),
  545. NETXEN_NIC_REG_2(0x03c),
  546. },
  547. /* sw_int_mask */
  548. {
  549. CRB_SW_INT_MASK_2,
  550. NETXEN_NIC_REG_2(0x03c),
  551. NETXEN_NIC_REG_2(0x03c),
  552. NETXEN_NIC_REG_2(0x03c),
  553. },
  554. },
  555. /* Instance 3 */
  556. {
  557. /* crb_rcv_producer: */
  558. {
  559. NETXEN_NIC_REG(0x22c),
  560. /* Jumbo frames */
  561. NETXEN_NIC_REG(0x23c),
  562. /* LRO */
  563. NETXEN_NIC_REG(0x24c)
  564. },
  565. /* crb_sts_consumer: */
  566. {
  567. NETXEN_NIC_REG(0x264),
  568. NETXEN_NIC_REG_2(0x03c),
  569. NETXEN_NIC_REG_2(0x03c),
  570. NETXEN_NIC_REG_2(0x03c),
  571. },
  572. /* sw_int_mask */
  573. {
  574. CRB_SW_INT_MASK_3,
  575. NETXEN_NIC_REG_2(0x03c),
  576. NETXEN_NIC_REG_2(0x03c),
  577. NETXEN_NIC_REG_2(0x03c),
  578. },
  579. },
  580. };
  581. static int
  582. netxen_init_old_ctx(struct netxen_adapter *adapter)
  583. {
  584. struct netxen_recv_context *recv_ctx;
  585. struct nx_host_rds_ring *rds_ring;
  586. struct nx_host_sds_ring *sds_ring;
  587. struct nx_host_tx_ring *tx_ring;
  588. int ring;
  589. int port = adapter->portnum;
  590. struct netxen_ring_ctx *hwctx;
  591. u32 signature;
  592. tx_ring = adapter->tx_ring;
  593. recv_ctx = &adapter->recv_ctx;
  594. hwctx = recv_ctx->hwctx;
  595. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  596. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  597. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  598. rds_ring = &recv_ctx->rds_rings[ring];
  599. hwctx->rcv_rings[ring].addr =
  600. cpu_to_le64(rds_ring->phys_addr);
  601. hwctx->rcv_rings[ring].size =
  602. cpu_to_le32(rds_ring->num_desc);
  603. }
  604. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  605. sds_ring = &recv_ctx->sds_rings[ring];
  606. if (ring == 0) {
  607. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  608. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  609. }
  610. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  611. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  612. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  613. }
  614. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  615. signature = (adapter->max_sds_rings > 1) ?
  616. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  617. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  618. lower32(recv_ctx->phys_addr));
  619. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  620. upper32(recv_ctx->phys_addr));
  621. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  622. signature | port);
  623. return 0;
  624. }
  625. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  626. {
  627. void *addr;
  628. int err = 0;
  629. int ring;
  630. struct netxen_recv_context *recv_ctx;
  631. struct nx_host_rds_ring *rds_ring;
  632. struct nx_host_sds_ring *sds_ring;
  633. struct nx_host_tx_ring *tx_ring;
  634. struct pci_dev *pdev = adapter->pdev;
  635. struct net_device *netdev = adapter->netdev;
  636. int port = adapter->portnum;
  637. recv_ctx = &adapter->recv_ctx;
  638. tx_ring = adapter->tx_ring;
  639. addr = pci_alloc_consistent(pdev,
  640. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  641. &recv_ctx->phys_addr);
  642. if (addr == NULL) {
  643. dev_err(&pdev->dev, "failed to allocate hw context\n");
  644. return -ENOMEM;
  645. }
  646. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  647. recv_ctx->hwctx = addr;
  648. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  649. recv_ctx->hwctx->cmd_consumer_offset =
  650. cpu_to_le64(recv_ctx->phys_addr +
  651. sizeof(struct netxen_ring_ctx));
  652. tx_ring->hw_consumer =
  653. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  654. /* cmd desc ring */
  655. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  656. &tx_ring->phys_addr);
  657. if (addr == NULL) {
  658. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  659. netdev->name);
  660. err = -ENOMEM;
  661. goto err_out_free;
  662. }
  663. tx_ring->desc_head = addr;
  664. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  665. rds_ring = &recv_ctx->rds_rings[ring];
  666. addr = pci_alloc_consistent(adapter->pdev,
  667. RCV_DESC_RINGSIZE(rds_ring),
  668. &rds_ring->phys_addr);
  669. if (addr == NULL) {
  670. dev_err(&pdev->dev,
  671. "%s: failed to allocate rds ring [%d]\n",
  672. netdev->name, ring);
  673. err = -ENOMEM;
  674. goto err_out_free;
  675. }
  676. rds_ring->desc_head = addr;
  677. if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
  678. rds_ring->crb_rcv_producer =
  679. netxen_get_ioaddr(adapter,
  680. recv_crb_registers[port].crb_rcv_producer[ring]);
  681. }
  682. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  683. sds_ring = &recv_ctx->sds_rings[ring];
  684. addr = pci_alloc_consistent(adapter->pdev,
  685. STATUS_DESC_RINGSIZE(sds_ring),
  686. &sds_ring->phys_addr);
  687. if (addr == NULL) {
  688. dev_err(&pdev->dev,
  689. "%s: failed to allocate sds ring [%d]\n",
  690. netdev->name, ring);
  691. err = -ENOMEM;
  692. goto err_out_free;
  693. }
  694. sds_ring->desc_head = addr;
  695. if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  696. sds_ring->crb_sts_consumer =
  697. netxen_get_ioaddr(adapter,
  698. recv_crb_registers[port].crb_sts_consumer[ring]);
  699. sds_ring->crb_intr_mask =
  700. netxen_get_ioaddr(adapter,
  701. recv_crb_registers[port].sw_int_mask[ring]);
  702. }
  703. }
  704. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  705. if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
  706. goto done;
  707. err = nx_fw_cmd_create_rx_ctx(adapter);
  708. if (err)
  709. goto err_out_free;
  710. err = nx_fw_cmd_create_tx_ctx(adapter);
  711. if (err)
  712. goto err_out_free;
  713. } else {
  714. err = netxen_init_old_ctx(adapter);
  715. if (err)
  716. goto err_out_free;
  717. }
  718. done:
  719. return 0;
  720. err_out_free:
  721. netxen_free_hw_resources(adapter);
  722. return err;
  723. }
  724. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  725. {
  726. struct netxen_recv_context *recv_ctx;
  727. struct nx_host_rds_ring *rds_ring;
  728. struct nx_host_sds_ring *sds_ring;
  729. struct nx_host_tx_ring *tx_ring;
  730. int ring;
  731. int port = adapter->portnum;
  732. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  733. if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
  734. goto done;
  735. nx_fw_cmd_destroy_rx_ctx(adapter);
  736. nx_fw_cmd_destroy_tx_ctx(adapter);
  737. } else {
  738. netxen_api_lock(adapter);
  739. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  740. NETXEN_CTX_D3_RESET | port);
  741. netxen_api_unlock(adapter);
  742. }
  743. /* Allow dma queues to drain after context reset */
  744. msleep(20);
  745. done:
  746. recv_ctx = &adapter->recv_ctx;
  747. if (recv_ctx->hwctx != NULL) {
  748. pci_free_consistent(adapter->pdev,
  749. sizeof(struct netxen_ring_ctx) +
  750. sizeof(uint32_t),
  751. recv_ctx->hwctx,
  752. recv_ctx->phys_addr);
  753. recv_ctx->hwctx = NULL;
  754. }
  755. tx_ring = adapter->tx_ring;
  756. if (tx_ring->desc_head != NULL) {
  757. pci_free_consistent(adapter->pdev,
  758. TX_DESC_RINGSIZE(tx_ring),
  759. tx_ring->desc_head, tx_ring->phys_addr);
  760. tx_ring->desc_head = NULL;
  761. }
  762. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  763. rds_ring = &recv_ctx->rds_rings[ring];
  764. if (rds_ring->desc_head != NULL) {
  765. pci_free_consistent(adapter->pdev,
  766. RCV_DESC_RINGSIZE(rds_ring),
  767. rds_ring->desc_head,
  768. rds_ring->phys_addr);
  769. rds_ring->desc_head = NULL;
  770. }
  771. }
  772. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  773. sds_ring = &recv_ctx->sds_rings[ring];
  774. if (sds_ring->desc_head != NULL) {
  775. pci_free_consistent(adapter->pdev,
  776. STATUS_DESC_RINGSIZE(sds_ring),
  777. sds_ring->desc_head,
  778. sds_ring->phys_addr);
  779. sds_ring->desc_head = NULL;
  780. }
  781. }
  782. }