netxen_nic_ctx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2003 - 2009 NetXen, Inc.
  4. * Copyright (C) 2009 - QLogic Corporation.
  5. * All rights reserved.
  6. */
  7. #include "netxen_nic_hw.h"
  8. #include "netxen_nic.h"
  9. #define NXHAL_VERSION 1
  10. static u32
  11. netxen_poll_rsp(struct netxen_adapter *adapter)
  12. {
  13. u32 rsp = NX_CDRP_RSP_OK;
  14. int timeout = 0;
  15. do {
  16. /* give atleast 1ms for firmware to respond */
  17. msleep(1);
  18. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  19. return NX_CDRP_RSP_TIMEOUT;
  20. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  21. } while (!NX_CDRP_IS_RSP(rsp));
  22. return rsp;
  23. }
  24. static u32
  25. netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
  26. {
  27. u32 rsp;
  28. u32 signature = 0;
  29. u32 rcode = NX_RCODE_SUCCESS;
  30. signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
  31. NXHAL_VERSION);
  32. /* Acquire semaphore before accessing CRB */
  33. if (netxen_api_lock(adapter))
  34. return NX_RCODE_TIMEOUT;
  35. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  36. NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
  37. NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
  38. NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
  39. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
  40. rsp = netxen_poll_rsp(adapter);
  41. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  42. printk(KERN_ERR "%s: card response timeout.\n",
  43. netxen_nic_driver_name);
  44. rcode = NX_RCODE_TIMEOUT;
  45. } else if (rsp == NX_CDRP_RSP_FAIL) {
  46. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  47. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  48. netxen_nic_driver_name, rcode);
  49. } else if (rsp == NX_CDRP_RSP_OK) {
  50. cmd->rsp.cmd = NX_RCODE_SUCCESS;
  51. if (cmd->rsp.arg2)
  52. cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
  53. if (cmd->rsp.arg3)
  54. cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
  55. }
  56. if (cmd->rsp.arg1)
  57. cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  58. /* Release semaphore */
  59. netxen_api_unlock(adapter);
  60. return rcode;
  61. }
  62. static int
  63. netxen_get_minidump_template_size(struct netxen_adapter *adapter)
  64. {
  65. struct netxen_cmd_args cmd;
  66. memset(&cmd, 0, sizeof(cmd));
  67. cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
  68. memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
  69. netxen_issue_cmd(adapter, &cmd);
  70. if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
  71. dev_info(&adapter->pdev->dev,
  72. "Can't get template size %d\n", cmd.rsp.cmd);
  73. return -EIO;
  74. }
  75. adapter->mdump.md_template_size = cmd.rsp.arg2;
  76. adapter->mdump.md_template_ver = cmd.rsp.arg3;
  77. return 0;
  78. }
  79. static int
  80. netxen_get_minidump_template(struct netxen_adapter *adapter)
  81. {
  82. dma_addr_t md_template_addr;
  83. void *addr;
  84. u32 size;
  85. struct netxen_cmd_args cmd;
  86. size = adapter->mdump.md_template_size;
  87. if (size == 0) {
  88. dev_err(&adapter->pdev->dev, "Can not capture Minidump "
  89. "template. Invalid template size.\n");
  90. return NX_RCODE_INVALID_ARGS;
  91. }
  92. addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
  93. if (!addr) {
  94. dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
  95. return -ENOMEM;
  96. }
  97. memset(&cmd, 0, sizeof(cmd));
  98. memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
  99. cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
  100. cmd.req.arg1 = LSD(md_template_addr);
  101. cmd.req.arg2 = MSD(md_template_addr);
  102. cmd.req.arg3 |= size;
  103. netxen_issue_cmd(adapter, &cmd);
  104. if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
  105. memcpy(adapter->mdump.md_template, addr, size);
  106. } else {
  107. dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
  108. cmd.rsp.cmd, size, cmd.rsp.arg2);
  109. }
  110. pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
  111. return 0;
  112. }
  113. static u32
  114. netxen_check_template_checksum(struct netxen_adapter *adapter)
  115. {
  116. u64 sum = 0 ;
  117. u32 *buff = adapter->mdump.md_template;
  118. int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
  119. while (count-- > 0)
  120. sum += *buff++ ;
  121. while (sum >> 32)
  122. sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
  123. return ~sum;
  124. }
  125. int
  126. netxen_setup_minidump(struct netxen_adapter *adapter)
  127. {
  128. int err = 0, i;
  129. u32 *template, *tmp_buf;
  130. err = netxen_get_minidump_template_size(adapter);
  131. if (err) {
  132. adapter->mdump.fw_supports_md = 0;
  133. if ((err == NX_RCODE_CMD_INVALID) ||
  134. (err == NX_RCODE_CMD_NOT_IMPL)) {
  135. dev_info(&adapter->pdev->dev,
  136. "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n",
  137. NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
  138. NX_MD_SUPPORT_SUBVERSION);
  139. }
  140. return err;
  141. }
  142. if (!adapter->mdump.md_template_size) {
  143. dev_err(&adapter->pdev->dev, "Error : Invalid template size "
  144. ",should be non-zero.\n");
  145. return -EIO;
  146. }
  147. adapter->mdump.md_template =
  148. kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
  149. if (!adapter->mdump.md_template)
  150. return -ENOMEM;
  151. err = netxen_get_minidump_template(adapter);
  152. if (err) {
  153. if (err == NX_RCODE_CMD_NOT_IMPL)
  154. adapter->mdump.fw_supports_md = 0;
  155. goto free_template;
  156. }
  157. if (netxen_check_template_checksum(adapter)) {
  158. dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
  159. err = -EIO;
  160. goto free_template;
  161. }
  162. adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
  163. tmp_buf = (u32 *) adapter->mdump.md_template;
  164. template = (u32 *) adapter->mdump.md_template;
  165. for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
  166. *template++ = __le32_to_cpu(*tmp_buf++);
  167. adapter->mdump.md_capture_buff = NULL;
  168. adapter->mdump.fw_supports_md = 1;
  169. adapter->mdump.md_enabled = 0;
  170. return err;
  171. free_template:
  172. kfree(adapter->mdump.md_template);
  173. adapter->mdump.md_template = NULL;
  174. return err;
  175. }
  176. int
  177. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  178. {
  179. u32 rcode = NX_RCODE_SUCCESS;
  180. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  181. struct netxen_cmd_args cmd;
  182. memset(&cmd, 0, sizeof(cmd));
  183. cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
  184. cmd.req.arg1 = recv_ctx->context_id;
  185. cmd.req.arg2 = mtu;
  186. cmd.req.arg3 = 0;
  187. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  188. rcode = netxen_issue_cmd(adapter, &cmd);
  189. if (rcode != NX_RCODE_SUCCESS)
  190. return -EIO;
  191. return 0;
  192. }
  193. int
  194. nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
  195. u32 speed, u32 duplex, u32 autoneg)
  196. {
  197. struct netxen_cmd_args cmd;
  198. memset(&cmd, 0, sizeof(cmd));
  199. cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
  200. cmd.req.arg1 = speed;
  201. cmd.req.arg2 = duplex;
  202. cmd.req.arg3 = autoneg;
  203. return netxen_issue_cmd(adapter, &cmd);
  204. }
  205. static int
  206. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  207. {
  208. void *addr;
  209. nx_hostrq_rx_ctx_t *prq;
  210. nx_cardrsp_rx_ctx_t *prsp;
  211. nx_hostrq_rds_ring_t *prq_rds;
  212. nx_hostrq_sds_ring_t *prq_sds;
  213. nx_cardrsp_rds_ring_t *prsp_rds;
  214. nx_cardrsp_sds_ring_t *prsp_sds;
  215. struct nx_host_rds_ring *rds_ring;
  216. struct nx_host_sds_ring *sds_ring;
  217. struct netxen_cmd_args cmd;
  218. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  219. u64 phys_addr;
  220. int i, nrds_rings, nsds_rings;
  221. size_t rq_size, rsp_size;
  222. u32 cap, reg, val;
  223. int err;
  224. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  225. nrds_rings = adapter->max_rds_rings;
  226. nsds_rings = adapter->max_sds_rings;
  227. rq_size =
  228. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  229. rsp_size =
  230. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  231. addr = pci_alloc_consistent(adapter->pdev,
  232. rq_size, &hostrq_phys_addr);
  233. if (addr == NULL)
  234. return -ENOMEM;
  235. prq = addr;
  236. addr = pci_alloc_consistent(adapter->pdev,
  237. rsp_size, &cardrsp_phys_addr);
  238. if (addr == NULL) {
  239. err = -ENOMEM;
  240. goto out_free_rq;
  241. }
  242. prsp = addr;
  243. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  244. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  245. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  246. if (adapter->flags & NETXEN_FW_MSS_CAP)
  247. cap |= NX_CAP0_HW_LRO_MSS;
  248. prq->capabilities[0] = cpu_to_le32(cap);
  249. prq->host_int_crb_mode =
  250. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  251. prq->host_rds_crb_mode =
  252. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  253. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  254. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  255. prq->rds_ring_offset = cpu_to_le32(0);
  256. val = le32_to_cpu(prq->rds_ring_offset) +
  257. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  258. prq->sds_ring_offset = cpu_to_le32(val);
  259. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  260. le32_to_cpu(prq->rds_ring_offset));
  261. for (i = 0; i < nrds_rings; i++) {
  262. rds_ring = &recv_ctx->rds_rings[i];
  263. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  264. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  265. prq_rds[i].ring_kind = cpu_to_le32(i);
  266. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  267. }
  268. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  269. le32_to_cpu(prq->sds_ring_offset));
  270. for (i = 0; i < nsds_rings; i++) {
  271. sds_ring = &recv_ctx->sds_rings[i];
  272. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  273. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  274. prq_sds[i].msi_index = cpu_to_le16(i);
  275. }
  276. phys_addr = hostrq_phys_addr;
  277. memset(&cmd, 0, sizeof(cmd));
  278. cmd.req.arg1 = (u32)(phys_addr >> 32);
  279. cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
  280. cmd.req.arg3 = rq_size;
  281. cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
  282. err = netxen_issue_cmd(adapter, &cmd);
  283. if (err) {
  284. printk(KERN_WARNING
  285. "Failed to create rx ctx in firmware%d\n", err);
  286. goto out_free_rsp;
  287. }
  288. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  289. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  290. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  291. rds_ring = &recv_ctx->rds_rings[i];
  292. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  293. rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
  294. NETXEN_NIC_REG(reg - 0x200));
  295. }
  296. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  297. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  298. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  299. sds_ring = &recv_ctx->sds_rings[i];
  300. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  301. sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
  302. NETXEN_NIC_REG(reg - 0x200));
  303. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  304. sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
  305. NETXEN_NIC_REG(reg - 0x200));
  306. }
  307. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  308. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  309. recv_ctx->virt_port = prsp->virt_port;
  310. out_free_rsp:
  311. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  312. out_free_rq:
  313. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  314. return err;
  315. }
  316. static void
  317. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  318. {
  319. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  320. struct netxen_cmd_args cmd;
  321. memset(&cmd, 0, sizeof(cmd));
  322. cmd.req.arg1 = recv_ctx->context_id;
  323. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  324. cmd.req.arg3 = 0;
  325. cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
  326. if (netxen_issue_cmd(adapter, &cmd)) {
  327. printk(KERN_WARNING
  328. "%s: Failed to destroy rx ctx in firmware\n",
  329. netxen_nic_driver_name);
  330. }
  331. }
  332. static int
  333. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  334. {
  335. nx_hostrq_tx_ctx_t *prq;
  336. nx_hostrq_cds_ring_t *prq_cds;
  337. nx_cardrsp_tx_ctx_t *prsp;
  338. void *rq_addr, *rsp_addr;
  339. size_t rq_size, rsp_size;
  340. u32 temp;
  341. int err = 0;
  342. u64 offset, phys_addr;
  343. dma_addr_t rq_phys_addr, rsp_phys_addr;
  344. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  345. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  346. struct netxen_cmd_args cmd;
  347. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  348. rq_addr = pci_alloc_consistent(adapter->pdev,
  349. rq_size, &rq_phys_addr);
  350. if (!rq_addr)
  351. return -ENOMEM;
  352. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  353. rsp_addr = pci_alloc_consistent(adapter->pdev,
  354. rsp_size, &rsp_phys_addr);
  355. if (!rsp_addr) {
  356. err = -ENOMEM;
  357. goto out_free_rq;
  358. }
  359. prq = rq_addr;
  360. prsp = rsp_addr;
  361. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  362. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  363. prq->capabilities[0] = cpu_to_le32(temp);
  364. prq->host_int_crb_mode =
  365. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  366. prq->interrupt_ctl = 0;
  367. prq->msi_index = 0;
  368. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  369. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  370. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  371. prq_cds = &prq->cds_ring;
  372. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  373. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  374. phys_addr = rq_phys_addr;
  375. memset(&cmd, 0, sizeof(cmd));
  376. cmd.req.arg1 = (u32)(phys_addr >> 32);
  377. cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
  378. cmd.req.arg3 = rq_size;
  379. cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
  380. err = netxen_issue_cmd(adapter, &cmd);
  381. if (err == NX_RCODE_SUCCESS) {
  382. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  383. tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
  384. NETXEN_NIC_REG(temp - 0x200));
  385. #if 0
  386. adapter->tx_state =
  387. le32_to_cpu(prsp->host_ctx_state);
  388. #endif
  389. adapter->tx_context_id =
  390. le16_to_cpu(prsp->context_id);
  391. } else {
  392. printk(KERN_WARNING
  393. "Failed to create tx ctx in firmware%d\n", err);
  394. err = -EIO;
  395. }
  396. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  397. out_free_rq:
  398. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  399. return err;
  400. }
  401. static void
  402. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  403. {
  404. struct netxen_cmd_args cmd;
  405. memset(&cmd, 0, sizeof(cmd));
  406. cmd.req.arg1 = adapter->tx_context_id;
  407. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  408. cmd.req.arg3 = 0;
  409. cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
  410. if (netxen_issue_cmd(adapter, &cmd)) {
  411. printk(KERN_WARNING
  412. "%s: Failed to destroy tx ctx in firmware\n",
  413. netxen_nic_driver_name);
  414. }
  415. }
  416. int
  417. nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
  418. {
  419. u32 rcode;
  420. struct netxen_cmd_args cmd;
  421. memset(&cmd, 0, sizeof(cmd));
  422. cmd.req.arg1 = reg;
  423. cmd.req.arg2 = 0;
  424. cmd.req.arg3 = 0;
  425. cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
  426. cmd.rsp.arg1 = 1;
  427. rcode = netxen_issue_cmd(adapter, &cmd);
  428. if (rcode != NX_RCODE_SUCCESS)
  429. return -EIO;
  430. if (val == NULL)
  431. return -EIO;
  432. *val = cmd.rsp.arg1;
  433. return 0;
  434. }
  435. int
  436. nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
  437. {
  438. u32 rcode;
  439. struct netxen_cmd_args cmd;
  440. memset(&cmd, 0, sizeof(cmd));
  441. cmd.req.arg1 = reg;
  442. cmd.req.arg2 = val;
  443. cmd.req.arg3 = 0;
  444. cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
  445. rcode = netxen_issue_cmd(adapter, &cmd);
  446. if (rcode != NX_RCODE_SUCCESS)
  447. return -EIO;
  448. return 0;
  449. }
  450. static u64 ctx_addr_sig_regs[][3] = {
  451. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  452. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  453. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  454. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  455. };
  456. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  457. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  458. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  459. #define lower32(x) ((u32)((x) & 0xffffffff))
  460. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  461. static struct netxen_recv_crb recv_crb_registers[] = {
  462. /* Instance 0 */
  463. {
  464. /* crb_rcv_producer: */
  465. {
  466. NETXEN_NIC_REG(0x100),
  467. /* Jumbo frames */
  468. NETXEN_NIC_REG(0x110),
  469. /* LRO */
  470. NETXEN_NIC_REG(0x120)
  471. },
  472. /* crb_sts_consumer: */
  473. {
  474. NETXEN_NIC_REG(0x138),
  475. NETXEN_NIC_REG_2(0x000),
  476. NETXEN_NIC_REG_2(0x004),
  477. NETXEN_NIC_REG_2(0x008),
  478. },
  479. /* sw_int_mask */
  480. {
  481. CRB_SW_INT_MASK_0,
  482. NETXEN_NIC_REG_2(0x044),
  483. NETXEN_NIC_REG_2(0x048),
  484. NETXEN_NIC_REG_2(0x04c),
  485. },
  486. },
  487. /* Instance 1 */
  488. {
  489. /* crb_rcv_producer: */
  490. {
  491. NETXEN_NIC_REG(0x144),
  492. /* Jumbo frames */
  493. NETXEN_NIC_REG(0x154),
  494. /* LRO */
  495. NETXEN_NIC_REG(0x164)
  496. },
  497. /* crb_sts_consumer: */
  498. {
  499. NETXEN_NIC_REG(0x17c),
  500. NETXEN_NIC_REG_2(0x020),
  501. NETXEN_NIC_REG_2(0x024),
  502. NETXEN_NIC_REG_2(0x028),
  503. },
  504. /* sw_int_mask */
  505. {
  506. CRB_SW_INT_MASK_1,
  507. NETXEN_NIC_REG_2(0x064),
  508. NETXEN_NIC_REG_2(0x068),
  509. NETXEN_NIC_REG_2(0x06c),
  510. },
  511. },
  512. /* Instance 2 */
  513. {
  514. /* crb_rcv_producer: */
  515. {
  516. NETXEN_NIC_REG(0x1d8),
  517. /* Jumbo frames */
  518. NETXEN_NIC_REG(0x1f8),
  519. /* LRO */
  520. NETXEN_NIC_REG(0x208)
  521. },
  522. /* crb_sts_consumer: */
  523. {
  524. NETXEN_NIC_REG(0x220),
  525. NETXEN_NIC_REG_2(0x03c),
  526. NETXEN_NIC_REG_2(0x03c),
  527. NETXEN_NIC_REG_2(0x03c),
  528. },
  529. /* sw_int_mask */
  530. {
  531. CRB_SW_INT_MASK_2,
  532. NETXEN_NIC_REG_2(0x03c),
  533. NETXEN_NIC_REG_2(0x03c),
  534. NETXEN_NIC_REG_2(0x03c),
  535. },
  536. },
  537. /* Instance 3 */
  538. {
  539. /* crb_rcv_producer: */
  540. {
  541. NETXEN_NIC_REG(0x22c),
  542. /* Jumbo frames */
  543. NETXEN_NIC_REG(0x23c),
  544. /* LRO */
  545. NETXEN_NIC_REG(0x24c)
  546. },
  547. /* crb_sts_consumer: */
  548. {
  549. NETXEN_NIC_REG(0x264),
  550. NETXEN_NIC_REG_2(0x03c),
  551. NETXEN_NIC_REG_2(0x03c),
  552. NETXEN_NIC_REG_2(0x03c),
  553. },
  554. /* sw_int_mask */
  555. {
  556. CRB_SW_INT_MASK_3,
  557. NETXEN_NIC_REG_2(0x03c),
  558. NETXEN_NIC_REG_2(0x03c),
  559. NETXEN_NIC_REG_2(0x03c),
  560. },
  561. },
  562. };
  563. static int
  564. netxen_init_old_ctx(struct netxen_adapter *adapter)
  565. {
  566. struct netxen_recv_context *recv_ctx;
  567. struct nx_host_rds_ring *rds_ring;
  568. struct nx_host_sds_ring *sds_ring;
  569. struct nx_host_tx_ring *tx_ring;
  570. int ring;
  571. int port = adapter->portnum;
  572. struct netxen_ring_ctx *hwctx;
  573. u32 signature;
  574. tx_ring = adapter->tx_ring;
  575. recv_ctx = &adapter->recv_ctx;
  576. hwctx = recv_ctx->hwctx;
  577. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  578. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  579. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  580. rds_ring = &recv_ctx->rds_rings[ring];
  581. hwctx->rcv_rings[ring].addr =
  582. cpu_to_le64(rds_ring->phys_addr);
  583. hwctx->rcv_rings[ring].size =
  584. cpu_to_le32(rds_ring->num_desc);
  585. }
  586. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  587. sds_ring = &recv_ctx->sds_rings[ring];
  588. if (ring == 0) {
  589. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  590. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  591. }
  592. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  593. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  594. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  595. }
  596. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  597. signature = (adapter->max_sds_rings > 1) ?
  598. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  599. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  600. lower32(recv_ctx->phys_addr));
  601. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  602. upper32(recv_ctx->phys_addr));
  603. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  604. signature | port);
  605. return 0;
  606. }
  607. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  608. {
  609. void *addr;
  610. int err = 0;
  611. int ring;
  612. struct netxen_recv_context *recv_ctx;
  613. struct nx_host_rds_ring *rds_ring;
  614. struct nx_host_sds_ring *sds_ring;
  615. struct nx_host_tx_ring *tx_ring;
  616. struct pci_dev *pdev = adapter->pdev;
  617. struct net_device *netdev = adapter->netdev;
  618. int port = adapter->portnum;
  619. recv_ctx = &adapter->recv_ctx;
  620. tx_ring = adapter->tx_ring;
  621. addr = pci_alloc_consistent(pdev,
  622. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  623. &recv_ctx->phys_addr);
  624. if (addr == NULL) {
  625. dev_err(&pdev->dev, "failed to allocate hw context\n");
  626. return -ENOMEM;
  627. }
  628. recv_ctx->hwctx = addr;
  629. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  630. recv_ctx->hwctx->cmd_consumer_offset =
  631. cpu_to_le64(recv_ctx->phys_addr +
  632. sizeof(struct netxen_ring_ctx));
  633. tx_ring->hw_consumer =
  634. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  635. /* cmd desc ring */
  636. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  637. &tx_ring->phys_addr);
  638. if (addr == NULL) {
  639. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  640. netdev->name);
  641. err = -ENOMEM;
  642. goto err_out_free;
  643. }
  644. tx_ring->desc_head = addr;
  645. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  646. rds_ring = &recv_ctx->rds_rings[ring];
  647. addr = pci_alloc_consistent(adapter->pdev,
  648. RCV_DESC_RINGSIZE(rds_ring),
  649. &rds_ring->phys_addr);
  650. if (addr == NULL) {
  651. dev_err(&pdev->dev,
  652. "%s: failed to allocate rds ring [%d]\n",
  653. netdev->name, ring);
  654. err = -ENOMEM;
  655. goto err_out_free;
  656. }
  657. rds_ring->desc_head = addr;
  658. if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
  659. rds_ring->crb_rcv_producer =
  660. netxen_get_ioaddr(adapter,
  661. recv_crb_registers[port].crb_rcv_producer[ring]);
  662. }
  663. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  664. sds_ring = &recv_ctx->sds_rings[ring];
  665. addr = pci_alloc_consistent(adapter->pdev,
  666. STATUS_DESC_RINGSIZE(sds_ring),
  667. &sds_ring->phys_addr);
  668. if (addr == NULL) {
  669. dev_err(&pdev->dev,
  670. "%s: failed to allocate sds ring [%d]\n",
  671. netdev->name, ring);
  672. err = -ENOMEM;
  673. goto err_out_free;
  674. }
  675. sds_ring->desc_head = addr;
  676. if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  677. sds_ring->crb_sts_consumer =
  678. netxen_get_ioaddr(adapter,
  679. recv_crb_registers[port].crb_sts_consumer[ring]);
  680. sds_ring->crb_intr_mask =
  681. netxen_get_ioaddr(adapter,
  682. recv_crb_registers[port].sw_int_mask[ring]);
  683. }
  684. }
  685. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  686. if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
  687. goto done;
  688. err = nx_fw_cmd_create_rx_ctx(adapter);
  689. if (err)
  690. goto err_out_free;
  691. err = nx_fw_cmd_create_tx_ctx(adapter);
  692. if (err)
  693. goto err_out_free;
  694. } else {
  695. err = netxen_init_old_ctx(adapter);
  696. if (err)
  697. goto err_out_free;
  698. }
  699. done:
  700. return 0;
  701. err_out_free:
  702. netxen_free_hw_resources(adapter);
  703. return err;
  704. }
  705. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  706. {
  707. struct netxen_recv_context *recv_ctx;
  708. struct nx_host_rds_ring *rds_ring;
  709. struct nx_host_sds_ring *sds_ring;
  710. struct nx_host_tx_ring *tx_ring;
  711. int ring;
  712. int port = adapter->portnum;
  713. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  714. if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
  715. goto done;
  716. nx_fw_cmd_destroy_rx_ctx(adapter);
  717. nx_fw_cmd_destroy_tx_ctx(adapter);
  718. } else {
  719. netxen_api_lock(adapter);
  720. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  721. NETXEN_CTX_D3_RESET | port);
  722. netxen_api_unlock(adapter);
  723. }
  724. /* Allow dma queues to drain after context reset */
  725. msleep(20);
  726. done:
  727. recv_ctx = &adapter->recv_ctx;
  728. if (recv_ctx->hwctx != NULL) {
  729. pci_free_consistent(adapter->pdev,
  730. sizeof(struct netxen_ring_ctx) +
  731. sizeof(uint32_t),
  732. recv_ctx->hwctx,
  733. recv_ctx->phys_addr);
  734. recv_ctx->hwctx = NULL;
  735. }
  736. tx_ring = adapter->tx_ring;
  737. if (tx_ring->desc_head != NULL) {
  738. pci_free_consistent(adapter->pdev,
  739. TX_DESC_RINGSIZE(tx_ring),
  740. tx_ring->desc_head, tx_ring->phys_addr);
  741. tx_ring->desc_head = NULL;
  742. }
  743. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  744. rds_ring = &recv_ctx->rds_rings[ring];
  745. if (rds_ring->desc_head != NULL) {
  746. pci_free_consistent(adapter->pdev,
  747. RCV_DESC_RINGSIZE(rds_ring),
  748. rds_ring->desc_head,
  749. rds_ring->phys_addr);
  750. rds_ring->desc_head = NULL;
  751. }
  752. }
  753. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  754. sds_ring = &recv_ctx->sds_rings[ring];
  755. if (sds_ring->desc_head != NULL) {
  756. pci_free_consistent(adapter->pdev,
  757. STATUS_DESC_RINGSIZE(sds_ring),
  758. sds_ring->desc_head,
  759. sds_ring->phys_addr);
  760. sds_ring->desc_head = NULL;
  761. }
  762. }
  763. }