ethtool.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. /* ethtool support for ixgbevf */
  4. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  5. #include <linux/types.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/pci.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/ethtool.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/if_vlan.h>
  13. #include <linux/uaccess.h>
  14. #include "ixgbevf.h"
  15. #define IXGBE_ALL_RAR_ENTRIES 16
  16. enum {NETDEV_STATS, IXGBEVF_STATS};
  17. struct ixgbe_stats {
  18. char stat_string[ETH_GSTRING_LEN];
  19. int type;
  20. int sizeof_stat;
  21. int stat_offset;
  22. };
  23. #define IXGBEVF_STAT(_name, _stat) { \
  24. .stat_string = _name, \
  25. .type = IXGBEVF_STATS, \
  26. .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
  27. .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
  28. }
  29. #define IXGBEVF_NETDEV_STAT(_net_stat) { \
  30. .stat_string = #_net_stat, \
  31. .type = NETDEV_STATS, \
  32. .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
  33. .stat_offset = offsetof(struct net_device_stats, _net_stat) \
  34. }
  35. static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
  36. IXGBEVF_NETDEV_STAT(rx_packets),
  37. IXGBEVF_NETDEV_STAT(tx_packets),
  38. IXGBEVF_NETDEV_STAT(rx_bytes),
  39. IXGBEVF_NETDEV_STAT(tx_bytes),
  40. IXGBEVF_STAT("tx_busy", tx_busy),
  41. IXGBEVF_STAT("tx_restart_queue", restart_queue),
  42. IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
  43. IXGBEVF_NETDEV_STAT(multicast),
  44. IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
  45. IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
  46. IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
  47. IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
  48. };
  49. #define IXGBEVF_QUEUE_STATS_LEN ( \
  50. (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
  51. ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
  52. ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
  53. (sizeof(struct ixgbevf_stats) / sizeof(u64)))
  54. #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
  55. #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
  56. static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  57. "Register test (offline)",
  58. "Link test (on/offline)"
  59. };
  60. #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
  61. static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
  62. #define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
  63. "legacy-rx",
  64. };
  65. #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
  66. static int ixgbevf_get_link_ksettings(struct net_device *netdev,
  67. struct ethtool_link_ksettings *cmd)
  68. {
  69. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  70. struct ixgbe_hw *hw = &adapter->hw;
  71. u32 link_speed = 0;
  72. bool link_up;
  73. ethtool_link_ksettings_zero_link_mode(cmd, supported);
  74. ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
  75. cmd->base.autoneg = AUTONEG_DISABLE;
  76. cmd->base.port = -1;
  77. hw->mac.get_link_status = 1;
  78. hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  79. if (link_up) {
  80. __u32 speed = SPEED_10000;
  81. switch (link_speed) {
  82. case IXGBE_LINK_SPEED_10GB_FULL:
  83. speed = SPEED_10000;
  84. break;
  85. case IXGBE_LINK_SPEED_1GB_FULL:
  86. speed = SPEED_1000;
  87. break;
  88. case IXGBE_LINK_SPEED_100_FULL:
  89. speed = SPEED_100;
  90. break;
  91. }
  92. cmd->base.speed = speed;
  93. cmd->base.duplex = DUPLEX_FULL;
  94. } else {
  95. cmd->base.speed = SPEED_UNKNOWN;
  96. cmd->base.duplex = DUPLEX_UNKNOWN;
  97. }
  98. return 0;
  99. }
  100. static u32 ixgbevf_get_msglevel(struct net_device *netdev)
  101. {
  102. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  103. return adapter->msg_enable;
  104. }
  105. static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
  106. {
  107. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  108. adapter->msg_enable = data;
  109. }
  110. #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
  111. static int ixgbevf_get_regs_len(struct net_device *netdev)
  112. {
  113. #define IXGBE_REGS_LEN 45
  114. return IXGBE_REGS_LEN * sizeof(u32);
  115. }
  116. static void ixgbevf_get_regs(struct net_device *netdev,
  117. struct ethtool_regs *regs,
  118. void *p)
  119. {
  120. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  121. struct ixgbe_hw *hw = &adapter->hw;
  122. u32 *regs_buff = p;
  123. u32 regs_len = ixgbevf_get_regs_len(netdev);
  124. u8 i;
  125. memset(p, 0, regs_len);
  126. /* generate a number suitable for ethtool's register version */
  127. regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
  128. /* General Registers */
  129. regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
  130. regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
  131. regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
  132. regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
  133. regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
  134. /* Interrupt */
  135. /* don't read EICR because it can clear interrupt causes, instead
  136. * read EICS which is a shadow but doesn't clear EICR
  137. */
  138. regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
  139. regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
  140. regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
  141. regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
  142. regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
  143. regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
  144. regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
  145. regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
  146. regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
  147. /* Receive DMA */
  148. for (i = 0; i < 2; i++)
  149. regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
  150. for (i = 0; i < 2; i++)
  151. regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
  152. for (i = 0; i < 2; i++)
  153. regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
  154. for (i = 0; i < 2; i++)
  155. regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
  156. for (i = 0; i < 2; i++)
  157. regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
  158. for (i = 0; i < 2; i++)
  159. regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
  160. for (i = 0; i < 2; i++)
  161. regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
  162. /* Receive */
  163. regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
  164. /* Transmit */
  165. for (i = 0; i < 2; i++)
  166. regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
  167. for (i = 0; i < 2; i++)
  168. regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
  169. for (i = 0; i < 2; i++)
  170. regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
  171. for (i = 0; i < 2; i++)
  172. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
  173. for (i = 0; i < 2; i++)
  174. regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
  175. for (i = 0; i < 2; i++)
  176. regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
  177. for (i = 0; i < 2; i++)
  178. regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
  179. for (i = 0; i < 2; i++)
  180. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
  181. }
  182. static void ixgbevf_get_drvinfo(struct net_device *netdev,
  183. struct ethtool_drvinfo *drvinfo)
  184. {
  185. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  186. strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
  187. strlcpy(drvinfo->version, ixgbevf_driver_version,
  188. sizeof(drvinfo->version));
  189. strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
  190. sizeof(drvinfo->bus_info));
  191. drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
  192. }
  193. static void ixgbevf_get_ringparam(struct net_device *netdev,
  194. struct ethtool_ringparam *ring)
  195. {
  196. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  197. ring->rx_max_pending = IXGBEVF_MAX_RXD;
  198. ring->tx_max_pending = IXGBEVF_MAX_TXD;
  199. ring->rx_pending = adapter->rx_ring_count;
  200. ring->tx_pending = adapter->tx_ring_count;
  201. }
  202. static int ixgbevf_set_ringparam(struct net_device *netdev,
  203. struct ethtool_ringparam *ring)
  204. {
  205. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  206. struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
  207. u32 new_rx_count, new_tx_count;
  208. int i, j, err = 0;
  209. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  210. return -EINVAL;
  211. new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
  212. new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
  213. new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  214. new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
  215. new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
  216. new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  217. /* if nothing to do return success */
  218. if ((new_tx_count == adapter->tx_ring_count) &&
  219. (new_rx_count == adapter->rx_ring_count))
  220. return 0;
  221. while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
  222. usleep_range(1000, 2000);
  223. if (!netif_running(adapter->netdev)) {
  224. for (i = 0; i < adapter->num_tx_queues; i++)
  225. adapter->tx_ring[i]->count = new_tx_count;
  226. for (i = 0; i < adapter->num_xdp_queues; i++)
  227. adapter->xdp_ring[i]->count = new_tx_count;
  228. for (i = 0; i < adapter->num_rx_queues; i++)
  229. adapter->rx_ring[i]->count = new_rx_count;
  230. adapter->tx_ring_count = new_tx_count;
  231. adapter->xdp_ring_count = new_tx_count;
  232. adapter->rx_ring_count = new_rx_count;
  233. goto clear_reset;
  234. }
  235. if (new_tx_count != adapter->tx_ring_count) {
  236. tx_ring = vmalloc(array_size(sizeof(*tx_ring),
  237. adapter->num_tx_queues +
  238. adapter->num_xdp_queues));
  239. if (!tx_ring) {
  240. err = -ENOMEM;
  241. goto clear_reset;
  242. }
  243. for (i = 0; i < adapter->num_tx_queues; i++) {
  244. /* clone ring and setup updated count */
  245. tx_ring[i] = *adapter->tx_ring[i];
  246. tx_ring[i].count = new_tx_count;
  247. err = ixgbevf_setup_tx_resources(&tx_ring[i]);
  248. if (err) {
  249. while (i) {
  250. i--;
  251. ixgbevf_free_tx_resources(&tx_ring[i]);
  252. }
  253. vfree(tx_ring);
  254. tx_ring = NULL;
  255. goto clear_reset;
  256. }
  257. }
  258. for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
  259. /* clone ring and setup updated count */
  260. tx_ring[i] = *adapter->xdp_ring[j];
  261. tx_ring[i].count = new_tx_count;
  262. err = ixgbevf_setup_tx_resources(&tx_ring[i]);
  263. if (err) {
  264. while (i) {
  265. i--;
  266. ixgbevf_free_tx_resources(&tx_ring[i]);
  267. }
  268. vfree(tx_ring);
  269. tx_ring = NULL;
  270. goto clear_reset;
  271. }
  272. }
  273. }
  274. if (new_rx_count != adapter->rx_ring_count) {
  275. rx_ring = vmalloc(array_size(sizeof(*rx_ring),
  276. adapter->num_rx_queues));
  277. if (!rx_ring) {
  278. err = -ENOMEM;
  279. goto clear_reset;
  280. }
  281. for (i = 0; i < adapter->num_rx_queues; i++) {
  282. /* clone ring and setup updated count */
  283. rx_ring[i] = *adapter->rx_ring[i];
  284. /* Clear copied XDP RX-queue info */
  285. memset(&rx_ring[i].xdp_rxq, 0,
  286. sizeof(rx_ring[i].xdp_rxq));
  287. rx_ring[i].count = new_rx_count;
  288. err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
  289. if (err) {
  290. while (i) {
  291. i--;
  292. ixgbevf_free_rx_resources(&rx_ring[i]);
  293. }
  294. vfree(rx_ring);
  295. rx_ring = NULL;
  296. goto clear_reset;
  297. }
  298. }
  299. }
  300. /* bring interface down to prepare for update */
  301. ixgbevf_down(adapter);
  302. /* Tx */
  303. if (tx_ring) {
  304. for (i = 0; i < adapter->num_tx_queues; i++) {
  305. ixgbevf_free_tx_resources(adapter->tx_ring[i]);
  306. *adapter->tx_ring[i] = tx_ring[i];
  307. }
  308. adapter->tx_ring_count = new_tx_count;
  309. for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
  310. ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
  311. *adapter->xdp_ring[j] = tx_ring[i];
  312. }
  313. adapter->xdp_ring_count = new_tx_count;
  314. vfree(tx_ring);
  315. tx_ring = NULL;
  316. }
  317. /* Rx */
  318. if (rx_ring) {
  319. for (i = 0; i < adapter->num_rx_queues; i++) {
  320. ixgbevf_free_rx_resources(adapter->rx_ring[i]);
  321. *adapter->rx_ring[i] = rx_ring[i];
  322. }
  323. adapter->rx_ring_count = new_rx_count;
  324. vfree(rx_ring);
  325. rx_ring = NULL;
  326. }
  327. /* restore interface using new values */
  328. ixgbevf_up(adapter);
  329. clear_reset:
  330. /* free Tx resources if Rx error is encountered */
  331. if (tx_ring) {
  332. for (i = 0;
  333. i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
  334. ixgbevf_free_tx_resources(&tx_ring[i]);
  335. vfree(tx_ring);
  336. }
  337. clear_bit(__IXGBEVF_RESETTING, &adapter->state);
  338. return err;
  339. }
  340. static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
  341. {
  342. switch (stringset) {
  343. case ETH_SS_TEST:
  344. return IXGBEVF_TEST_LEN;
  345. case ETH_SS_STATS:
  346. return IXGBEVF_STATS_LEN;
  347. case ETH_SS_PRIV_FLAGS:
  348. return IXGBEVF_PRIV_FLAGS_STR_LEN;
  349. default:
  350. return -EINVAL;
  351. }
  352. }
  353. static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
  354. struct ethtool_stats *stats, u64 *data)
  355. {
  356. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  357. struct rtnl_link_stats64 temp;
  358. const struct rtnl_link_stats64 *net_stats;
  359. unsigned int start;
  360. struct ixgbevf_ring *ring;
  361. int i, j;
  362. char *p;
  363. ixgbevf_update_stats(adapter);
  364. net_stats = dev_get_stats(netdev, &temp);
  365. for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
  366. switch (ixgbevf_gstrings_stats[i].type) {
  367. case NETDEV_STATS:
  368. p = (char *)net_stats +
  369. ixgbevf_gstrings_stats[i].stat_offset;
  370. break;
  371. case IXGBEVF_STATS:
  372. p = (char *)adapter +
  373. ixgbevf_gstrings_stats[i].stat_offset;
  374. break;
  375. default:
  376. data[i] = 0;
  377. continue;
  378. }
  379. data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
  380. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  381. }
  382. /* populate Tx queue data */
  383. for (j = 0; j < adapter->num_tx_queues; j++) {
  384. ring = adapter->tx_ring[j];
  385. if (!ring) {
  386. data[i++] = 0;
  387. data[i++] = 0;
  388. continue;
  389. }
  390. do {
  391. start = u64_stats_fetch_begin_irq(&ring->syncp);
  392. data[i] = ring->stats.packets;
  393. data[i + 1] = ring->stats.bytes;
  394. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  395. i += 2;
  396. }
  397. /* populate XDP queue data */
  398. for (j = 0; j < adapter->num_xdp_queues; j++) {
  399. ring = adapter->xdp_ring[j];
  400. if (!ring) {
  401. data[i++] = 0;
  402. data[i++] = 0;
  403. continue;
  404. }
  405. do {
  406. start = u64_stats_fetch_begin_irq(&ring->syncp);
  407. data[i] = ring->stats.packets;
  408. data[i + 1] = ring->stats.bytes;
  409. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  410. i += 2;
  411. }
  412. /* populate Rx queue data */
  413. for (j = 0; j < adapter->num_rx_queues; j++) {
  414. ring = adapter->rx_ring[j];
  415. if (!ring) {
  416. data[i++] = 0;
  417. data[i++] = 0;
  418. continue;
  419. }
  420. do {
  421. start = u64_stats_fetch_begin_irq(&ring->syncp);
  422. data[i] = ring->stats.packets;
  423. data[i + 1] = ring->stats.bytes;
  424. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  425. i += 2;
  426. }
  427. }
  428. static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
  429. u8 *data)
  430. {
  431. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  432. char *p = (char *)data;
  433. int i;
  434. switch (stringset) {
  435. case ETH_SS_TEST:
  436. memcpy(data, *ixgbe_gstrings_test,
  437. IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
  438. break;
  439. case ETH_SS_STATS:
  440. for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
  441. memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
  442. ETH_GSTRING_LEN);
  443. p += ETH_GSTRING_LEN;
  444. }
  445. for (i = 0; i < adapter->num_tx_queues; i++) {
  446. sprintf(p, "tx_queue_%u_packets", i);
  447. p += ETH_GSTRING_LEN;
  448. sprintf(p, "tx_queue_%u_bytes", i);
  449. p += ETH_GSTRING_LEN;
  450. }
  451. for (i = 0; i < adapter->num_xdp_queues; i++) {
  452. sprintf(p, "xdp_queue_%u_packets", i);
  453. p += ETH_GSTRING_LEN;
  454. sprintf(p, "xdp_queue_%u_bytes", i);
  455. p += ETH_GSTRING_LEN;
  456. }
  457. for (i = 0; i < adapter->num_rx_queues; i++) {
  458. sprintf(p, "rx_queue_%u_packets", i);
  459. p += ETH_GSTRING_LEN;
  460. sprintf(p, "rx_queue_%u_bytes", i);
  461. p += ETH_GSTRING_LEN;
  462. }
  463. break;
  464. case ETH_SS_PRIV_FLAGS:
  465. memcpy(data, ixgbevf_priv_flags_strings,
  466. IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
  467. break;
  468. }
  469. }
  470. static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
  471. {
  472. struct ixgbe_hw *hw = &adapter->hw;
  473. bool link_up;
  474. u32 link_speed = 0;
  475. *data = 0;
  476. hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
  477. if (!link_up)
  478. *data = 1;
  479. return *data;
  480. }
  481. /* ethtool register test data */
  482. struct ixgbevf_reg_test {
  483. u16 reg;
  484. u8 array_len;
  485. u8 test_type;
  486. u32 mask;
  487. u32 write;
  488. };
  489. /* In the hardware, registers are laid out either singly, in arrays
  490. * spaced 0x40 bytes apart, or in contiguous tables. We assume
  491. * most tests take place on arrays or single registers (handled
  492. * as a single-element array) and special-case the tables.
  493. * Table tests are always pattern tests.
  494. *
  495. * We also make provision for some required setup steps by specifying
  496. * registers to be written without any read-back testing.
  497. */
  498. #define PATTERN_TEST 1
  499. #define SET_READ_TEST 2
  500. #define WRITE_NO_TEST 3
  501. #define TABLE32_TEST 4
  502. #define TABLE64_TEST_LO 5
  503. #define TABLE64_TEST_HI 6
  504. /* default VF register test */
  505. static const struct ixgbevf_reg_test reg_test_vf[] = {
  506. { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
  507. { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  508. { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  509. { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  510. { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  511. { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
  512. { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  513. { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  514. { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
  515. { .reg = 0 }
  516. };
  517. static const u32 register_test_patterns[] = {
  518. 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
  519. };
  520. static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
  521. int reg, u32 mask, u32 write)
  522. {
  523. u32 pat, val, before;
  524. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  525. *data = 1;
  526. return true;
  527. }
  528. for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
  529. before = ixgbevf_read_reg(&adapter->hw, reg);
  530. ixgbe_write_reg(&adapter->hw, reg,
  531. register_test_patterns[pat] & write);
  532. val = ixgbevf_read_reg(&adapter->hw, reg);
  533. if (val != (register_test_patterns[pat] & write & mask)) {
  534. hw_dbg(&adapter->hw,
  535. "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
  536. reg, val,
  537. register_test_patterns[pat] & write & mask);
  538. *data = reg;
  539. ixgbe_write_reg(&adapter->hw, reg, before);
  540. return true;
  541. }
  542. ixgbe_write_reg(&adapter->hw, reg, before);
  543. }
  544. return false;
  545. }
  546. static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
  547. int reg, u32 mask, u32 write)
  548. {
  549. u32 val, before;
  550. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  551. *data = 1;
  552. return true;
  553. }
  554. before = ixgbevf_read_reg(&adapter->hw, reg);
  555. ixgbe_write_reg(&adapter->hw, reg, write & mask);
  556. val = ixgbevf_read_reg(&adapter->hw, reg);
  557. if ((write & mask) != (val & mask)) {
  558. pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
  559. reg, (val & mask), write & mask);
  560. *data = reg;
  561. ixgbe_write_reg(&adapter->hw, reg, before);
  562. return true;
  563. }
  564. ixgbe_write_reg(&adapter->hw, reg, before);
  565. return false;
  566. }
  567. static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
  568. {
  569. const struct ixgbevf_reg_test *test;
  570. u32 i;
  571. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  572. dev_err(&adapter->pdev->dev,
  573. "Adapter removed - register test blocked\n");
  574. *data = 1;
  575. return 1;
  576. }
  577. test = reg_test_vf;
  578. /* Perform the register test, looping through the test table
  579. * until we either fail or reach the null entry.
  580. */
  581. while (test->reg) {
  582. for (i = 0; i < test->array_len; i++) {
  583. bool b = false;
  584. switch (test->test_type) {
  585. case PATTERN_TEST:
  586. b = reg_pattern_test(adapter, data,
  587. test->reg + (i * 0x40),
  588. test->mask,
  589. test->write);
  590. break;
  591. case SET_READ_TEST:
  592. b = reg_set_and_check(adapter, data,
  593. test->reg + (i * 0x40),
  594. test->mask,
  595. test->write);
  596. break;
  597. case WRITE_NO_TEST:
  598. ixgbe_write_reg(&adapter->hw,
  599. test->reg + (i * 0x40),
  600. test->write);
  601. break;
  602. case TABLE32_TEST:
  603. b = reg_pattern_test(adapter, data,
  604. test->reg + (i * 4),
  605. test->mask,
  606. test->write);
  607. break;
  608. case TABLE64_TEST_LO:
  609. b = reg_pattern_test(adapter, data,
  610. test->reg + (i * 8),
  611. test->mask,
  612. test->write);
  613. break;
  614. case TABLE64_TEST_HI:
  615. b = reg_pattern_test(adapter, data,
  616. test->reg + 4 + (i * 8),
  617. test->mask,
  618. test->write);
  619. break;
  620. }
  621. if (b)
  622. return 1;
  623. }
  624. test++;
  625. }
  626. *data = 0;
  627. return *data;
  628. }
  629. static void ixgbevf_diag_test(struct net_device *netdev,
  630. struct ethtool_test *eth_test, u64 *data)
  631. {
  632. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  633. bool if_running = netif_running(netdev);
  634. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  635. dev_err(&adapter->pdev->dev,
  636. "Adapter removed - test blocked\n");
  637. data[0] = 1;
  638. data[1] = 1;
  639. eth_test->flags |= ETH_TEST_FL_FAILED;
  640. return;
  641. }
  642. set_bit(__IXGBEVF_TESTING, &adapter->state);
  643. if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
  644. /* Offline tests */
  645. hw_dbg(&adapter->hw, "offline testing starting\n");
  646. /* Link test performed before hardware reset so autoneg doesn't
  647. * interfere with test result
  648. */
  649. if (ixgbevf_link_test(adapter, &data[1]))
  650. eth_test->flags |= ETH_TEST_FL_FAILED;
  651. if (if_running)
  652. /* indicate we're in test mode */
  653. ixgbevf_close(netdev);
  654. else
  655. ixgbevf_reset(adapter);
  656. hw_dbg(&adapter->hw, "register testing starting\n");
  657. if (ixgbevf_reg_test(adapter, &data[0]))
  658. eth_test->flags |= ETH_TEST_FL_FAILED;
  659. ixgbevf_reset(adapter);
  660. clear_bit(__IXGBEVF_TESTING, &adapter->state);
  661. if (if_running)
  662. ixgbevf_open(netdev);
  663. } else {
  664. hw_dbg(&adapter->hw, "online testing starting\n");
  665. /* Online tests */
  666. if (ixgbevf_link_test(adapter, &data[1]))
  667. eth_test->flags |= ETH_TEST_FL_FAILED;
  668. /* Online tests aren't run; pass by default */
  669. data[0] = 0;
  670. clear_bit(__IXGBEVF_TESTING, &adapter->state);
  671. }
  672. msleep_interruptible(4 * 1000);
  673. }
  674. static int ixgbevf_nway_reset(struct net_device *netdev)
  675. {
  676. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  677. if (netif_running(netdev))
  678. ixgbevf_reinit_locked(adapter);
  679. return 0;
  680. }
  681. static int ixgbevf_get_coalesce(struct net_device *netdev,
  682. struct ethtool_coalesce *ec)
  683. {
  684. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  685. /* only valid if in constant ITR mode */
  686. if (adapter->rx_itr_setting <= 1)
  687. ec->rx_coalesce_usecs = adapter->rx_itr_setting;
  688. else
  689. ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
  690. /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
  691. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  692. return 0;
  693. /* only valid if in constant ITR mode */
  694. if (adapter->tx_itr_setting <= 1)
  695. ec->tx_coalesce_usecs = adapter->tx_itr_setting;
  696. else
  697. ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
  698. return 0;
  699. }
  700. static int ixgbevf_set_coalesce(struct net_device *netdev,
  701. struct ethtool_coalesce *ec)
  702. {
  703. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  704. struct ixgbevf_q_vector *q_vector;
  705. int num_vectors, i;
  706. u16 tx_itr_param, rx_itr_param;
  707. /* don't accept Tx specific changes if we've got mixed RxTx vectors */
  708. if (adapter->q_vector[0]->tx.count &&
  709. adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
  710. return -EINVAL;
  711. if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
  712. (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
  713. return -EINVAL;
  714. if (ec->rx_coalesce_usecs > 1)
  715. adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
  716. else
  717. adapter->rx_itr_setting = ec->rx_coalesce_usecs;
  718. if (adapter->rx_itr_setting == 1)
  719. rx_itr_param = IXGBE_20K_ITR;
  720. else
  721. rx_itr_param = adapter->rx_itr_setting;
  722. if (ec->tx_coalesce_usecs > 1)
  723. adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
  724. else
  725. adapter->tx_itr_setting = ec->tx_coalesce_usecs;
  726. if (adapter->tx_itr_setting == 1)
  727. tx_itr_param = IXGBE_12K_ITR;
  728. else
  729. tx_itr_param = adapter->tx_itr_setting;
  730. num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  731. for (i = 0; i < num_vectors; i++) {
  732. q_vector = adapter->q_vector[i];
  733. if (q_vector->tx.count && !q_vector->rx.count)
  734. /* Tx only */
  735. q_vector->itr = tx_itr_param;
  736. else
  737. /* Rx only or mixed */
  738. q_vector->itr = rx_itr_param;
  739. ixgbevf_write_eitr(q_vector);
  740. }
  741. return 0;
  742. }
  743. static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
  744. u32 *rules __always_unused)
  745. {
  746. struct ixgbevf_adapter *adapter = netdev_priv(dev);
  747. switch (info->cmd) {
  748. case ETHTOOL_GRXRINGS:
  749. info->data = adapter->num_rx_queues;
  750. return 0;
  751. default:
  752. hw_dbg(&adapter->hw, "Command parameters not supported\n");
  753. return -EOPNOTSUPP;
  754. }
  755. }
  756. static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
  757. {
  758. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  759. if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
  760. return IXGBEVF_X550_VFRETA_SIZE;
  761. return IXGBEVF_82599_RETA_SIZE;
  762. }
  763. static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
  764. {
  765. return IXGBEVF_RSS_HASH_KEY_SIZE;
  766. }
  767. static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
  768. u8 *hfunc)
  769. {
  770. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  771. int err = 0;
  772. if (hfunc)
  773. *hfunc = ETH_RSS_HASH_TOP;
  774. if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
  775. if (key)
  776. memcpy(key, adapter->rss_key,
  777. ixgbevf_get_rxfh_key_size(netdev));
  778. if (indir) {
  779. int i;
  780. for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
  781. indir[i] = adapter->rss_indir_tbl[i];
  782. }
  783. } else {
  784. /* If neither indirection table nor hash key was requested
  785. * - just return a success avoiding taking any locks.
  786. */
  787. if (!indir && !key)
  788. return 0;
  789. spin_lock_bh(&adapter->mbx_lock);
  790. if (indir)
  791. err = ixgbevf_get_reta_locked(&adapter->hw, indir,
  792. adapter->num_rx_queues);
  793. if (!err && key)
  794. err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
  795. spin_unlock_bh(&adapter->mbx_lock);
  796. }
  797. return err;
  798. }
  799. static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
  800. {
  801. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  802. u32 priv_flags = 0;
  803. if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
  804. priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
  805. return priv_flags;
  806. }
  807. static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
  808. {
  809. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  810. unsigned int flags = adapter->flags;
  811. flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
  812. if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
  813. flags |= IXGBEVF_FLAGS_LEGACY_RX;
  814. if (flags != adapter->flags) {
  815. adapter->flags = flags;
  816. /* reset interface to repopulate queues */
  817. if (netif_running(netdev))
  818. ixgbevf_reinit_locked(adapter);
  819. }
  820. return 0;
  821. }
  822. static const struct ethtool_ops ixgbevf_ethtool_ops = {
  823. .get_drvinfo = ixgbevf_get_drvinfo,
  824. .get_regs_len = ixgbevf_get_regs_len,
  825. .get_regs = ixgbevf_get_regs,
  826. .nway_reset = ixgbevf_nway_reset,
  827. .get_link = ethtool_op_get_link,
  828. .get_ringparam = ixgbevf_get_ringparam,
  829. .set_ringparam = ixgbevf_set_ringparam,
  830. .get_msglevel = ixgbevf_get_msglevel,
  831. .set_msglevel = ixgbevf_set_msglevel,
  832. .self_test = ixgbevf_diag_test,
  833. .get_sset_count = ixgbevf_get_sset_count,
  834. .get_strings = ixgbevf_get_strings,
  835. .get_ethtool_stats = ixgbevf_get_ethtool_stats,
  836. .get_coalesce = ixgbevf_get_coalesce,
  837. .set_coalesce = ixgbevf_set_coalesce,
  838. .get_rxnfc = ixgbevf_get_rxnfc,
  839. .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
  840. .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
  841. .get_rxfh = ixgbevf_get_rxfh,
  842. .get_link_ksettings = ixgbevf_get_link_ksettings,
  843. .get_priv_flags = ixgbevf_get_priv_flags,
  844. .set_priv_flags = ixgbevf_set_priv_flags,
  845. };
  846. void ixgbevf_set_ethtool_ops(struct net_device *netdev)
  847. {
  848. netdev->ethtool_ops = &ixgbevf_ethtool_ops;
  849. }