en_ethtool.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/mlx4/driver.h>
  37. #include <linux/mlx4/device.h>
  38. #include <linux/in.h>
  39. #include <net/ip.h>
  40. #include <linux/bitmap.h>
  41. #include "mlx4_en.h"
  42. #include "en_port.h"
  43. #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
  44. #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
  45. #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
  46. static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
  47. {
  48. int i, t;
  49. int err = 0;
  50. for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
  51. for (i = 0; i < priv->tx_ring_num[t]; i++) {
  52. priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
  53. priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
  54. if (priv->port_up) {
  55. err = mlx4_en_set_cq_moder(priv,
  56. priv->tx_cq[t][i]);
  57. if (err)
  58. return err;
  59. }
  60. }
  61. }
  62. if (priv->adaptive_rx_coal)
  63. return 0;
  64. for (i = 0; i < priv->rx_ring_num; i++) {
  65. priv->rx_cq[i]->moder_cnt = priv->rx_frames;
  66. priv->rx_cq[i]->moder_time = priv->rx_usecs;
  67. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  68. if (priv->port_up) {
  69. err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
  70. if (err)
  71. return err;
  72. }
  73. }
  74. return err;
  75. }
  76. static void
  77. mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
  78. {
  79. struct mlx4_en_priv *priv = netdev_priv(dev);
  80. struct mlx4_en_dev *mdev = priv->mdev;
  81. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  82. strlcpy(drvinfo->version, DRV_VERSION,
  83. sizeof(drvinfo->version));
  84. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  85. "%d.%d.%d",
  86. (u16) (mdev->dev->caps.fw_ver >> 32),
  87. (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
  88. (u16) (mdev->dev->caps.fw_ver & 0xffff));
  89. strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
  90. sizeof(drvinfo->bus_info));
  91. }
  92. static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
  93. "blueflame",
  94. "phv-bit"
  95. };
  96. static const char main_strings[][ETH_GSTRING_LEN] = {
  97. /* main statistics */
  98. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  99. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  100. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  101. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  102. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  103. "tx_heartbeat_errors", "tx_window_errors",
  104. /* port statistics */
  105. "tso_packets",
  106. "xmit_more",
  107. "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
  108. "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
  109. /* pf statistics */
  110. "pf_rx_packets",
  111. "pf_rx_bytes",
  112. "pf_tx_packets",
  113. "pf_tx_bytes",
  114. /* priority flow control statistics rx */
  115. "rx_pause_prio_0", "rx_pause_duration_prio_0",
  116. "rx_pause_transition_prio_0",
  117. "rx_pause_prio_1", "rx_pause_duration_prio_1",
  118. "rx_pause_transition_prio_1",
  119. "rx_pause_prio_2", "rx_pause_duration_prio_2",
  120. "rx_pause_transition_prio_2",
  121. "rx_pause_prio_3", "rx_pause_duration_prio_3",
  122. "rx_pause_transition_prio_3",
  123. "rx_pause_prio_4", "rx_pause_duration_prio_4",
  124. "rx_pause_transition_prio_4",
  125. "rx_pause_prio_5", "rx_pause_duration_prio_5",
  126. "rx_pause_transition_prio_5",
  127. "rx_pause_prio_6", "rx_pause_duration_prio_6",
  128. "rx_pause_transition_prio_6",
  129. "rx_pause_prio_7", "rx_pause_duration_prio_7",
  130. "rx_pause_transition_prio_7",
  131. /* flow control statistics rx */
  132. "rx_pause", "rx_pause_duration", "rx_pause_transition",
  133. /* priority flow control statistics tx */
  134. "tx_pause_prio_0", "tx_pause_duration_prio_0",
  135. "tx_pause_transition_prio_0",
  136. "tx_pause_prio_1", "tx_pause_duration_prio_1",
  137. "tx_pause_transition_prio_1",
  138. "tx_pause_prio_2", "tx_pause_duration_prio_2",
  139. "tx_pause_transition_prio_2",
  140. "tx_pause_prio_3", "tx_pause_duration_prio_3",
  141. "tx_pause_transition_prio_3",
  142. "tx_pause_prio_4", "tx_pause_duration_prio_4",
  143. "tx_pause_transition_prio_4",
  144. "tx_pause_prio_5", "tx_pause_duration_prio_5",
  145. "tx_pause_transition_prio_5",
  146. "tx_pause_prio_6", "tx_pause_duration_prio_6",
  147. "tx_pause_transition_prio_6",
  148. "tx_pause_prio_7", "tx_pause_duration_prio_7",
  149. "tx_pause_transition_prio_7",
  150. /* flow control statistics tx */
  151. "tx_pause", "tx_pause_duration", "tx_pause_transition",
  152. /* packet statistics */
  153. "rx_multicast_packets",
  154. "rx_broadcast_packets",
  155. "rx_jabbers",
  156. "rx_in_range_length_error",
  157. "rx_out_range_length_error",
  158. "tx_multicast_packets",
  159. "tx_broadcast_packets",
  160. "rx_prio_0_packets", "rx_prio_0_bytes",
  161. "rx_prio_1_packets", "rx_prio_1_bytes",
  162. "rx_prio_2_packets", "rx_prio_2_bytes",
  163. "rx_prio_3_packets", "rx_prio_3_bytes",
  164. "rx_prio_4_packets", "rx_prio_4_bytes",
  165. "rx_prio_5_packets", "rx_prio_5_bytes",
  166. "rx_prio_6_packets", "rx_prio_6_bytes",
  167. "rx_prio_7_packets", "rx_prio_7_bytes",
  168. "rx_novlan_packets", "rx_novlan_bytes",
  169. "tx_prio_0_packets", "tx_prio_0_bytes",
  170. "tx_prio_1_packets", "tx_prio_1_bytes",
  171. "tx_prio_2_packets", "tx_prio_2_bytes",
  172. "tx_prio_3_packets", "tx_prio_3_bytes",
  173. "tx_prio_4_packets", "tx_prio_4_bytes",
  174. "tx_prio_5_packets", "tx_prio_5_bytes",
  175. "tx_prio_6_packets", "tx_prio_6_bytes",
  176. "tx_prio_7_packets", "tx_prio_7_bytes",
  177. "tx_novlan_packets", "tx_novlan_bytes",
  178. /* xdp statistics */
  179. "rx_xdp_drop",
  180. "rx_xdp_tx",
  181. "rx_xdp_tx_full",
  182. /* phy statistics */
  183. "rx_packets_phy", "rx_bytes_phy",
  184. "tx_packets_phy", "tx_bytes_phy",
  185. };
  186. static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
  187. "Interrupt Test",
  188. "Link Test",
  189. "Speed Test",
  190. "Register Test",
  191. "Loopback Test",
  192. };
  193. static u32 mlx4_en_get_msglevel(struct net_device *dev)
  194. {
  195. return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
  196. }
  197. static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
  198. {
  199. ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
  200. }
  201. static void mlx4_en_get_wol(struct net_device *netdev,
  202. struct ethtool_wolinfo *wol)
  203. {
  204. struct mlx4_en_priv *priv = netdev_priv(netdev);
  205. struct mlx4_caps *caps = &priv->mdev->dev->caps;
  206. int err = 0;
  207. u64 config = 0;
  208. u64 mask;
  209. if ((priv->port < 1) || (priv->port > 2)) {
  210. en_err(priv, "Failed to get WoL information\n");
  211. return;
  212. }
  213. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  214. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  215. if (!(caps->flags & mask)) {
  216. wol->supported = 0;
  217. wol->wolopts = 0;
  218. return;
  219. }
  220. if (caps->wol_port[priv->port])
  221. wol->supported = WAKE_MAGIC;
  222. else
  223. wol->supported = 0;
  224. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  225. if (err) {
  226. en_err(priv, "Failed to get WoL information\n");
  227. return;
  228. }
  229. if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
  230. wol->wolopts = WAKE_MAGIC;
  231. else
  232. wol->wolopts = 0;
  233. }
  234. static int mlx4_en_set_wol(struct net_device *netdev,
  235. struct ethtool_wolinfo *wol)
  236. {
  237. struct mlx4_en_priv *priv = netdev_priv(netdev);
  238. u64 config = 0;
  239. int err = 0;
  240. u64 mask;
  241. if ((priv->port < 1) || (priv->port > 2))
  242. return -EOPNOTSUPP;
  243. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  244. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  245. if (!(priv->mdev->dev->caps.flags & mask))
  246. return -EOPNOTSUPP;
  247. if (wol->supported & ~WAKE_MAGIC)
  248. return -EINVAL;
  249. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  250. if (err) {
  251. en_err(priv, "Failed to get WoL info, unable to modify\n");
  252. return err;
  253. }
  254. if (wol->wolopts & WAKE_MAGIC) {
  255. config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
  256. MLX4_EN_WOL_MAGIC;
  257. } else {
  258. config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
  259. config |= MLX4_EN_WOL_DO_MODIFY;
  260. }
  261. err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
  262. if (err)
  263. en_err(priv, "Failed to set WoL information\n");
  264. return err;
  265. }
  266. struct bitmap_iterator {
  267. unsigned long *stats_bitmap;
  268. unsigned int count;
  269. unsigned int iterator;
  270. bool advance_array; /* if set, force no increments */
  271. };
  272. static inline void bitmap_iterator_init(struct bitmap_iterator *h,
  273. unsigned long *stats_bitmap,
  274. int count)
  275. {
  276. h->iterator = 0;
  277. h->advance_array = !bitmap_empty(stats_bitmap, count);
  278. h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
  279. : count;
  280. h->stats_bitmap = stats_bitmap;
  281. }
  282. static inline int bitmap_iterator_test(struct bitmap_iterator *h)
  283. {
  284. return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
  285. }
  286. static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
  287. {
  288. return h->iterator++;
  289. }
  290. static inline unsigned int
  291. bitmap_iterator_count(struct bitmap_iterator *h)
  292. {
  293. return h->count;
  294. }
  295. static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
  296. {
  297. struct mlx4_en_priv *priv = netdev_priv(dev);
  298. struct bitmap_iterator it;
  299. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  300. switch (sset) {
  301. case ETH_SS_STATS:
  302. return bitmap_iterator_count(&it) +
  303. (priv->tx_ring_num[TX] * 2) +
  304. (priv->rx_ring_num * (3 + NUM_XDP_STATS));
  305. case ETH_SS_TEST:
  306. return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
  307. & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
  308. case ETH_SS_PRIV_FLAGS:
  309. return ARRAY_SIZE(mlx4_en_priv_flags);
  310. default:
  311. return -EOPNOTSUPP;
  312. }
  313. }
  314. static void mlx4_en_get_ethtool_stats(struct net_device *dev,
  315. struct ethtool_stats *stats, uint64_t *data)
  316. {
  317. struct mlx4_en_priv *priv = netdev_priv(dev);
  318. int index = 0;
  319. int i;
  320. struct bitmap_iterator it;
  321. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  322. spin_lock_bh(&priv->stats_lock);
  323. mlx4_en_fold_software_stats(dev);
  324. for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
  325. if (bitmap_iterator_test(&it))
  326. data[index++] = ((unsigned long *)&dev->stats)[i];
  327. for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
  328. if (bitmap_iterator_test(&it))
  329. data[index++] = ((unsigned long *)&priv->port_stats)[i];
  330. for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
  331. if (bitmap_iterator_test(&it))
  332. data[index++] =
  333. ((unsigned long *)&priv->pf_stats)[i];
  334. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
  335. i++, bitmap_iterator_inc(&it))
  336. if (bitmap_iterator_test(&it))
  337. data[index++] =
  338. ((u64 *)&priv->rx_priority_flowstats)[i];
  339. for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
  340. if (bitmap_iterator_test(&it))
  341. data[index++] = ((u64 *)&priv->rx_flowstats)[i];
  342. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
  343. i++, bitmap_iterator_inc(&it))
  344. if (bitmap_iterator_test(&it))
  345. data[index++] =
  346. ((u64 *)&priv->tx_priority_flowstats)[i];
  347. for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
  348. if (bitmap_iterator_test(&it))
  349. data[index++] = ((u64 *)&priv->tx_flowstats)[i];
  350. for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
  351. if (bitmap_iterator_test(&it))
  352. data[index++] = ((unsigned long *)&priv->pkstats)[i];
  353. for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
  354. if (bitmap_iterator_test(&it))
  355. data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
  356. for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it))
  357. if (bitmap_iterator_test(&it))
  358. data[index++] = ((unsigned long *)&priv->phy_stats)[i];
  359. for (i = 0; i < priv->tx_ring_num[TX]; i++) {
  360. data[index++] = priv->tx_ring[TX][i]->packets;
  361. data[index++] = priv->tx_ring[TX][i]->bytes;
  362. }
  363. for (i = 0; i < priv->rx_ring_num; i++) {
  364. data[index++] = priv->rx_ring[i]->packets;
  365. data[index++] = priv->rx_ring[i]->bytes;
  366. data[index++] = priv->rx_ring[i]->dropped;
  367. data[index++] = priv->rx_ring[i]->xdp_drop;
  368. data[index++] = priv->rx_ring[i]->xdp_tx;
  369. data[index++] = priv->rx_ring[i]->xdp_tx_full;
  370. }
  371. spin_unlock_bh(&priv->stats_lock);
  372. }
  373. static void mlx4_en_self_test(struct net_device *dev,
  374. struct ethtool_test *etest, u64 *buf)
  375. {
  376. mlx4_en_ex_selftest(dev, &etest->flags, buf);
  377. }
  378. static void mlx4_en_get_strings(struct net_device *dev,
  379. uint32_t stringset, uint8_t *data)
  380. {
  381. struct mlx4_en_priv *priv = netdev_priv(dev);
  382. int index = 0;
  383. int i, strings = 0;
  384. struct bitmap_iterator it;
  385. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  386. switch (stringset) {
  387. case ETH_SS_TEST:
  388. for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
  389. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  390. if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
  391. for (; i < MLX4_EN_NUM_SELF_TEST; i++)
  392. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  393. break;
  394. case ETH_SS_STATS:
  395. /* Add main counters */
  396. for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
  397. bitmap_iterator_inc(&it))
  398. if (bitmap_iterator_test(&it))
  399. strcpy(data + (index++) * ETH_GSTRING_LEN,
  400. main_strings[strings]);
  401. for (i = 0; i < NUM_PORT_STATS; i++, strings++,
  402. bitmap_iterator_inc(&it))
  403. if (bitmap_iterator_test(&it))
  404. strcpy(data + (index++) * ETH_GSTRING_LEN,
  405. main_strings[strings]);
  406. for (i = 0; i < NUM_PF_STATS; i++, strings++,
  407. bitmap_iterator_inc(&it))
  408. if (bitmap_iterator_test(&it))
  409. strcpy(data + (index++) * ETH_GSTRING_LEN,
  410. main_strings[strings]);
  411. for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
  412. bitmap_iterator_inc(&it))
  413. if (bitmap_iterator_test(&it))
  414. strcpy(data + (index++) * ETH_GSTRING_LEN,
  415. main_strings[strings]);
  416. for (i = 0; i < NUM_PKT_STATS; i++, strings++,
  417. bitmap_iterator_inc(&it))
  418. if (bitmap_iterator_test(&it))
  419. strcpy(data + (index++) * ETH_GSTRING_LEN,
  420. main_strings[strings]);
  421. for (i = 0; i < NUM_XDP_STATS; i++, strings++,
  422. bitmap_iterator_inc(&it))
  423. if (bitmap_iterator_test(&it))
  424. strcpy(data + (index++) * ETH_GSTRING_LEN,
  425. main_strings[strings]);
  426. for (i = 0; i < NUM_PHY_STATS; i++, strings++,
  427. bitmap_iterator_inc(&it))
  428. if (bitmap_iterator_test(&it))
  429. strcpy(data + (index++) * ETH_GSTRING_LEN,
  430. main_strings[strings]);
  431. for (i = 0; i < priv->tx_ring_num[TX]; i++) {
  432. sprintf(data + (index++) * ETH_GSTRING_LEN,
  433. "tx%d_packets", i);
  434. sprintf(data + (index++) * ETH_GSTRING_LEN,
  435. "tx%d_bytes", i);
  436. }
  437. for (i = 0; i < priv->rx_ring_num; i++) {
  438. sprintf(data + (index++) * ETH_GSTRING_LEN,
  439. "rx%d_packets", i);
  440. sprintf(data + (index++) * ETH_GSTRING_LEN,
  441. "rx%d_bytes", i);
  442. sprintf(data + (index++) * ETH_GSTRING_LEN,
  443. "rx%d_dropped", i);
  444. sprintf(data + (index++) * ETH_GSTRING_LEN,
  445. "rx%d_xdp_drop", i);
  446. sprintf(data + (index++) * ETH_GSTRING_LEN,
  447. "rx%d_xdp_tx", i);
  448. sprintf(data + (index++) * ETH_GSTRING_LEN,
  449. "rx%d_xdp_tx_full", i);
  450. }
  451. break;
  452. case ETH_SS_PRIV_FLAGS:
  453. for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
  454. strcpy(data + i * ETH_GSTRING_LEN,
  455. mlx4_en_priv_flags[i]);
  456. break;
  457. }
  458. }
  459. static u32 mlx4_en_autoneg_get(struct net_device *dev)
  460. {
  461. struct mlx4_en_priv *priv = netdev_priv(dev);
  462. struct mlx4_en_dev *mdev = priv->mdev;
  463. u32 autoneg = AUTONEG_DISABLE;
  464. if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
  465. (priv->port_state.flags & MLX4_EN_PORT_ANE))
  466. autoneg = AUTONEG_ENABLE;
  467. return autoneg;
  468. }
  469. static void ptys2ethtool_update_supported_port(unsigned long *mask,
  470. struct mlx4_ptys_reg *ptys_reg)
  471. {
  472. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  473. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  474. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  475. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  476. __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
  477. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  478. | MLX4_PROT_MASK(MLX4_10GBASE_SR)
  479. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  480. | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
  481. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  482. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  483. __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
  484. } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  485. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  486. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  487. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  488. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  489. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  490. __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
  491. }
  492. }
  493. static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
  494. {
  495. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
  496. if (!eth_proto) /* link down */
  497. eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  498. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  499. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  500. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  501. return PORT_TP;
  502. }
  503. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
  504. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  505. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  506. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  507. return PORT_FIBRE;
  508. }
  509. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  510. | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
  511. | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
  512. return PORT_DA;
  513. }
  514. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  515. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  516. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  517. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  518. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  519. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  520. return PORT_NONE;
  521. }
  522. return PORT_OTHER;
  523. }
  524. #define MLX4_LINK_MODES_SZ \
  525. (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
  526. enum ethtool_report {
  527. SUPPORTED = 0,
  528. ADVERTISED = 1,
  529. };
  530. struct ptys2ethtool_config {
  531. __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
  532. __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
  533. u32 speed;
  534. };
  535. static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
  536. enum ethtool_report report)
  537. {
  538. switch (report) {
  539. case SUPPORTED:
  540. return cfg->supported;
  541. case ADVERTISED:
  542. return cfg->advertised;
  543. }
  544. return NULL;
  545. }
  546. #define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
  547. ({ \
  548. struct ptys2ethtool_config *cfg; \
  549. const unsigned int modes[] = { __VA_ARGS__ }; \
  550. unsigned int i; \
  551. cfg = &ptys2ethtool_map[reg_]; \
  552. cfg->speed = speed_; \
  553. bitmap_zero(cfg->supported, \
  554. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  555. bitmap_zero(cfg->advertised, \
  556. __ETHTOOL_LINK_MODE_MASK_NBITS); \
  557. for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
  558. __set_bit(modes[i], cfg->supported); \
  559. __set_bit(modes[i], cfg->advertised); \
  560. } \
  561. })
  562. /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
  563. static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ];
  564. void __init mlx4_en_init_ptys2ethtool_map(void)
  565. {
  566. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100,
  567. ETHTOOL_LINK_MODE_100baseT_Full_BIT);
  568. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
  569. ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
  570. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
  571. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  572. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
  573. ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
  574. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
  575. ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
  576. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000,
  577. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  578. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000,
  579. ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
  580. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
  581. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  582. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
  583. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  584. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
  585. ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
  586. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
  587. ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
  588. ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
  589. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000,
  590. ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
  591. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000,
  592. ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
  593. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000,
  594. ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
  595. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000,
  596. ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
  597. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000,
  598. ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT);
  599. MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
  600. ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
  601. };
  602. static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
  603. u32 eth_proto,
  604. enum ethtool_report report)
  605. {
  606. int i;
  607. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  608. if (eth_proto & MLX4_PROT_MASK(i))
  609. bitmap_or(link_modes, link_modes,
  610. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  611. report),
  612. __ETHTOOL_LINK_MODE_MASK_NBITS);
  613. }
  614. }
  615. static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
  616. enum ethtool_report report)
  617. {
  618. int i;
  619. u32 ptys_modes = 0;
  620. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  621. if (bitmap_intersects(
  622. ptys2ethtool_link_mode(&ptys2ethtool_map[i],
  623. report),
  624. link_modes,
  625. __ETHTOOL_LINK_MODE_MASK_NBITS))
  626. ptys_modes |= 1 << i;
  627. }
  628. return ptys_modes;
  629. }
  630. /* Convert actual speed (SPEED_XXX) to ptys link modes */
  631. static u32 speed2ptys_link_modes(u32 speed)
  632. {
  633. int i;
  634. u32 ptys_modes = 0;
  635. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  636. if (ptys2ethtool_map[i].speed == speed)
  637. ptys_modes |= 1 << i;
  638. }
  639. return ptys_modes;
  640. }
  641. static int
  642. ethtool_get_ptys_link_ksettings(struct net_device *dev,
  643. struct ethtool_link_ksettings *link_ksettings)
  644. {
  645. struct mlx4_en_priv *priv = netdev_priv(dev);
  646. struct mlx4_ptys_reg ptys_reg;
  647. u32 eth_proto;
  648. int ret;
  649. memset(&ptys_reg, 0, sizeof(ptys_reg));
  650. ptys_reg.local_port = priv->port;
  651. ptys_reg.proto_mask = MLX4_PTYS_EN;
  652. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  653. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  654. if (ret) {
  655. en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
  656. ret);
  657. return ret;
  658. }
  659. en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
  660. ptys_reg.proto_mask);
  661. en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
  662. be32_to_cpu(ptys_reg.eth_proto_cap));
  663. en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
  664. be32_to_cpu(ptys_reg.eth_proto_admin));
  665. en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
  666. be32_to_cpu(ptys_reg.eth_proto_oper));
  667. en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
  668. be32_to_cpu(ptys_reg.eth_proto_lp_adv));
  669. /* reset supported/advertising masks */
  670. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  671. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  672. ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported,
  673. &ptys_reg);
  674. eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
  675. ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported,
  676. eth_proto, SUPPORTED);
  677. eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
  678. ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising,
  679. eth_proto, ADVERTISED);
  680. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  681. Pause);
  682. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  683. Asym_Pause);
  684. if (priv->prof->tx_pause)
  685. ethtool_link_ksettings_add_link_mode(link_ksettings,
  686. advertising, Pause);
  687. if (priv->prof->tx_pause ^ priv->prof->rx_pause)
  688. ethtool_link_ksettings_add_link_mode(link_ksettings,
  689. advertising, Asym_Pause);
  690. link_ksettings->base.port = ptys_get_active_port(&ptys_reg);
  691. if (mlx4_en_autoneg_get(dev)) {
  692. ethtool_link_ksettings_add_link_mode(link_ksettings,
  693. supported, Autoneg);
  694. ethtool_link_ksettings_add_link_mode(link_ksettings,
  695. advertising, Autoneg);
  696. }
  697. link_ksettings->base.autoneg
  698. = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  699. AUTONEG_ENABLE : AUTONEG_DISABLE;
  700. eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
  701. ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
  702. ptys2ethtool_update_link_modes(
  703. link_ksettings->link_modes.lp_advertising,
  704. eth_proto, ADVERTISED);
  705. if (priv->port_state.flags & MLX4_EN_PORT_ANC)
  706. ethtool_link_ksettings_add_link_mode(link_ksettings,
  707. lp_advertising, Autoneg);
  708. link_ksettings->base.phy_address = 0;
  709. link_ksettings->base.mdio_support = 0;
  710. link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
  711. link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
  712. return ret;
  713. }
  714. static void
  715. ethtool_get_default_link_ksettings(
  716. struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
  717. {
  718. struct mlx4_en_priv *priv = netdev_priv(dev);
  719. int trans_type;
  720. link_ksettings->base.autoneg = AUTONEG_DISABLE;
  721. ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
  722. ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
  723. 10000baseT_Full);
  724. ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
  725. ethtool_link_ksettings_add_link_mode(link_ksettings, advertising,
  726. 10000baseT_Full);
  727. trans_type = priv->port_state.transceiver;
  728. if (trans_type > 0 && trans_type <= 0xC) {
  729. link_ksettings->base.port = PORT_FIBRE;
  730. ethtool_link_ksettings_add_link_mode(link_ksettings,
  731. supported, FIBRE);
  732. ethtool_link_ksettings_add_link_mode(link_ksettings,
  733. advertising, FIBRE);
  734. } else if (trans_type == 0x80 || trans_type == 0) {
  735. link_ksettings->base.port = PORT_TP;
  736. ethtool_link_ksettings_add_link_mode(link_ksettings,
  737. supported, TP);
  738. ethtool_link_ksettings_add_link_mode(link_ksettings,
  739. advertising, TP);
  740. } else {
  741. link_ksettings->base.port = -1;
  742. }
  743. }
  744. static int
  745. mlx4_en_get_link_ksettings(struct net_device *dev,
  746. struct ethtool_link_ksettings *link_ksettings)
  747. {
  748. struct mlx4_en_priv *priv = netdev_priv(dev);
  749. int ret = -EINVAL;
  750. if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
  751. return -ENOMEM;
  752. en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
  753. priv->port_state.flags & MLX4_EN_PORT_ANC,
  754. priv->port_state.flags & MLX4_EN_PORT_ANE);
  755. if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
  756. ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
  757. if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
  758. ethtool_get_default_link_ksettings(dev, link_ksettings);
  759. if (netif_carrier_ok(dev)) {
  760. link_ksettings->base.speed = priv->port_state.link_speed;
  761. link_ksettings->base.duplex = DUPLEX_FULL;
  762. } else {
  763. link_ksettings->base.speed = SPEED_UNKNOWN;
  764. link_ksettings->base.duplex = DUPLEX_UNKNOWN;
  765. }
  766. return 0;
  767. }
  768. /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
  769. static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
  770. __be32 proto_cap)
  771. {
  772. __be32 proto_admin = 0;
  773. if (!speed) { /* Speed = 0 ==> Reset Link modes */
  774. proto_admin = proto_cap;
  775. en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
  776. be32_to_cpu(proto_cap));
  777. } else {
  778. u32 ptys_link_modes = speed2ptys_link_modes(speed);
  779. proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
  780. en_info(priv, "Setting Speed to %d\n", speed);
  781. }
  782. return proto_admin;
  783. }
  784. static int
  785. mlx4_en_set_link_ksettings(struct net_device *dev,
  786. const struct ethtool_link_ksettings *link_ksettings)
  787. {
  788. struct mlx4_en_priv *priv = netdev_priv(dev);
  789. struct mlx4_ptys_reg ptys_reg;
  790. __be32 proto_admin;
  791. u8 cur_autoneg;
  792. int ret;
  793. u32 ptys_adv = ethtool2ptys_link_modes(
  794. link_ksettings->link_modes.advertising, ADVERTISED);
  795. const int speed = link_ksettings->base.speed;
  796. en_dbg(DRV, priv,
  797. "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n",
  798. speed, __ETHTOOL_LINK_MODE_MASK_NBITS,
  799. link_ksettings->link_modes.advertising,
  800. link_ksettings->base.autoneg,
  801. link_ksettings->base.duplex);
  802. if (!(priv->mdev->dev->caps.flags2 &
  803. MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
  804. (link_ksettings->base.duplex == DUPLEX_HALF))
  805. return -EINVAL;
  806. memset(&ptys_reg, 0, sizeof(ptys_reg));
  807. ptys_reg.local_port = priv->port;
  808. ptys_reg.proto_mask = MLX4_PTYS_EN;
  809. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  810. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  811. if (ret) {
  812. en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
  813. ret);
  814. return 0;
  815. }
  816. cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
  817. AUTONEG_DISABLE : AUTONEG_ENABLE;
  818. if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
  819. proto_admin = speed_set_ptys_admin(priv, speed,
  820. ptys_reg.eth_proto_cap);
  821. if ((be32_to_cpu(proto_admin) &
  822. (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
  823. MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
  824. (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
  825. ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
  826. } else {
  827. proto_admin = cpu_to_be32(ptys_adv);
  828. ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
  829. }
  830. proto_admin &= ptys_reg.eth_proto_cap;
  831. if (!proto_admin) {
  832. en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
  833. return -EINVAL; /* nothing to change due to bad input */
  834. }
  835. if ((proto_admin == ptys_reg.eth_proto_admin) &&
  836. ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
  837. (link_ksettings->base.autoneg == cur_autoneg)))
  838. return 0; /* Nothing to change */
  839. en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
  840. be32_to_cpu(proto_admin));
  841. ptys_reg.eth_proto_admin = proto_admin;
  842. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
  843. &ptys_reg);
  844. if (ret) {
  845. en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
  846. be32_to_cpu(ptys_reg.eth_proto_admin), ret);
  847. return ret;
  848. }
  849. mutex_lock(&priv->mdev->state_lock);
  850. if (priv->port_up) {
  851. en_warn(priv, "Port link mode changed, restarting port...\n");
  852. mlx4_en_stop_port(dev, 1);
  853. if (mlx4_en_start_port(dev))
  854. en_err(priv, "Failed restarting port %d\n", priv->port);
  855. }
  856. mutex_unlock(&priv->mdev->state_lock);
  857. return 0;
  858. }
  859. static int mlx4_en_get_coalesce(struct net_device *dev,
  860. struct ethtool_coalesce *coal)
  861. {
  862. struct mlx4_en_priv *priv = netdev_priv(dev);
  863. coal->tx_coalesce_usecs = priv->tx_usecs;
  864. coal->tx_max_coalesced_frames = priv->tx_frames;
  865. coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
  866. coal->rx_coalesce_usecs = priv->rx_usecs;
  867. coal->rx_max_coalesced_frames = priv->rx_frames;
  868. coal->pkt_rate_low = priv->pkt_rate_low;
  869. coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
  870. coal->pkt_rate_high = priv->pkt_rate_high;
  871. coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
  872. coal->rate_sample_interval = priv->sample_interval;
  873. coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
  874. return 0;
  875. }
  876. static int mlx4_en_set_coalesce(struct net_device *dev,
  877. struct ethtool_coalesce *coal)
  878. {
  879. struct mlx4_en_priv *priv = netdev_priv(dev);
  880. if (!coal->tx_max_coalesced_frames_irq)
  881. return -EINVAL;
  882. if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
  883. coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
  884. coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
  885. coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
  886. netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
  887. __func__, MLX4_EN_MAX_COAL_TIME);
  888. return -ERANGE;
  889. }
  890. if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
  891. coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
  892. netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
  893. __func__, MLX4_EN_MAX_COAL_PKTS);
  894. return -ERANGE;
  895. }
  896. priv->rx_frames = (coal->rx_max_coalesced_frames ==
  897. MLX4_EN_AUTO_CONF) ?
  898. MLX4_EN_RX_COAL_TARGET :
  899. coal->rx_max_coalesced_frames;
  900. priv->rx_usecs = (coal->rx_coalesce_usecs ==
  901. MLX4_EN_AUTO_CONF) ?
  902. MLX4_EN_RX_COAL_TIME :
  903. coal->rx_coalesce_usecs;
  904. /* Setting TX coalescing parameters */
  905. if (coal->tx_coalesce_usecs != priv->tx_usecs ||
  906. coal->tx_max_coalesced_frames != priv->tx_frames) {
  907. priv->tx_usecs = coal->tx_coalesce_usecs;
  908. priv->tx_frames = coal->tx_max_coalesced_frames;
  909. }
  910. /* Set adaptive coalescing params */
  911. priv->pkt_rate_low = coal->pkt_rate_low;
  912. priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
  913. priv->pkt_rate_high = coal->pkt_rate_high;
  914. priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
  915. priv->sample_interval = coal->rate_sample_interval;
  916. priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
  917. priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
  918. return mlx4_en_moderation_update(priv);
  919. }
  920. static int mlx4_en_set_pauseparam(struct net_device *dev,
  921. struct ethtool_pauseparam *pause)
  922. {
  923. struct mlx4_en_priv *priv = netdev_priv(dev);
  924. struct mlx4_en_dev *mdev = priv->mdev;
  925. u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
  926. int err;
  927. if (pause->autoneg)
  928. return -EINVAL;
  929. tx_pause = !!(pause->tx_pause);
  930. rx_pause = !!(pause->rx_pause);
  931. rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
  932. tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
  933. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  934. priv->rx_skb_size + ETH_FCS_LEN,
  935. tx_pause, tx_ppp, rx_pause, rx_ppp);
  936. if (err) {
  937. en_err(priv, "Failed setting pause params, err = %d\n", err);
  938. return err;
  939. }
  940. mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
  941. rx_ppp, rx_pause, tx_ppp, tx_pause);
  942. priv->prof->tx_pause = tx_pause;
  943. priv->prof->rx_pause = rx_pause;
  944. priv->prof->tx_ppp = tx_ppp;
  945. priv->prof->rx_ppp = rx_ppp;
  946. return err;
  947. }
  948. static void mlx4_en_get_pauseparam(struct net_device *dev,
  949. struct ethtool_pauseparam *pause)
  950. {
  951. struct mlx4_en_priv *priv = netdev_priv(dev);
  952. pause->tx_pause = priv->prof->tx_pause;
  953. pause->rx_pause = priv->prof->rx_pause;
  954. }
  955. static int mlx4_en_set_ringparam(struct net_device *dev,
  956. struct ethtool_ringparam *param)
  957. {
  958. struct mlx4_en_priv *priv = netdev_priv(dev);
  959. struct mlx4_en_dev *mdev = priv->mdev;
  960. struct mlx4_en_port_profile new_prof;
  961. struct mlx4_en_priv *tmp;
  962. u32 rx_size, tx_size;
  963. int port_up = 0;
  964. int err = 0;
  965. if (param->rx_jumbo_pending || param->rx_mini_pending)
  966. return -EINVAL;
  967. if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
  968. en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
  969. __func__, param->rx_pending,
  970. MLX4_EN_MIN_RX_SIZE);
  971. return -EINVAL;
  972. }
  973. if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
  974. en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
  975. __func__, param->tx_pending,
  976. MLX4_EN_MIN_TX_SIZE);
  977. return -EINVAL;
  978. }
  979. rx_size = roundup_pow_of_two(param->rx_pending);
  980. tx_size = roundup_pow_of_two(param->tx_pending);
  981. if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
  982. priv->rx_ring[0]->size) &&
  983. tx_size == priv->tx_ring[TX][0]->size)
  984. return 0;
  985. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  986. if (!tmp)
  987. return -ENOMEM;
  988. mutex_lock(&mdev->state_lock);
  989. memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
  990. new_prof.tx_ring_size = tx_size;
  991. new_prof.rx_ring_size = rx_size;
  992. err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
  993. if (err)
  994. goto out;
  995. if (priv->port_up) {
  996. port_up = 1;
  997. mlx4_en_stop_port(dev, 1);
  998. }
  999. mlx4_en_safe_replace_resources(priv, tmp);
  1000. if (port_up) {
  1001. err = mlx4_en_start_port(dev);
  1002. if (err)
  1003. en_err(priv, "Failed starting port\n");
  1004. }
  1005. err = mlx4_en_moderation_update(priv);
  1006. out:
  1007. kfree(tmp);
  1008. mutex_unlock(&mdev->state_lock);
  1009. return err;
  1010. }
  1011. static void mlx4_en_get_ringparam(struct net_device *dev,
  1012. struct ethtool_ringparam *param)
  1013. {
  1014. struct mlx4_en_priv *priv = netdev_priv(dev);
  1015. memset(param, 0, sizeof(*param));
  1016. param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
  1017. param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
  1018. param->rx_pending = priv->port_up ?
  1019. priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
  1020. param->tx_pending = priv->tx_ring[TX][0]->size;
  1021. }
  1022. static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
  1023. {
  1024. struct mlx4_en_priv *priv = netdev_priv(dev);
  1025. return rounddown_pow_of_two(priv->rx_ring_num);
  1026. }
  1027. static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
  1028. {
  1029. return MLX4_EN_RSS_KEY_SIZE;
  1030. }
  1031. static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
  1032. {
  1033. struct mlx4_en_priv *priv = netdev_priv(dev);
  1034. /* check if requested function is supported by the device */
  1035. if (hfunc == ETH_RSS_HASH_TOP) {
  1036. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
  1037. return -EINVAL;
  1038. if (!(dev->features & NETIF_F_RXHASH))
  1039. en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
  1040. return 0;
  1041. } else if (hfunc == ETH_RSS_HASH_XOR) {
  1042. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
  1043. return -EINVAL;
  1044. if (dev->features & NETIF_F_RXHASH)
  1045. en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
  1046. return 0;
  1047. }
  1048. return -EINVAL;
  1049. }
  1050. static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
  1051. u8 *hfunc)
  1052. {
  1053. struct mlx4_en_priv *priv = netdev_priv(dev);
  1054. u32 n = mlx4_en_get_rxfh_indir_size(dev);
  1055. u32 i, rss_rings;
  1056. int err = 0;
  1057. rss_rings = priv->prof->rss_rings ?: n;
  1058. rss_rings = rounddown_pow_of_two(rss_rings);
  1059. for (i = 0; i < n; i++) {
  1060. if (!ring_index)
  1061. break;
  1062. ring_index[i] = i % rss_rings;
  1063. }
  1064. if (key)
  1065. memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
  1066. if (hfunc)
  1067. *hfunc = priv->rss_hash_fn;
  1068. return err;
  1069. }
  1070. static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
  1071. const u8 *key, const u8 hfunc)
  1072. {
  1073. struct mlx4_en_priv *priv = netdev_priv(dev);
  1074. u32 n = mlx4_en_get_rxfh_indir_size(dev);
  1075. struct mlx4_en_dev *mdev = priv->mdev;
  1076. int port_up = 0;
  1077. int err = 0;
  1078. int i;
  1079. int rss_rings = 0;
  1080. /* Calculate RSS table size and make sure flows are spread evenly
  1081. * between rings
  1082. */
  1083. for (i = 0; i < n; i++) {
  1084. if (!ring_index)
  1085. break;
  1086. if (i > 0 && !ring_index[i] && !rss_rings)
  1087. rss_rings = i;
  1088. if (ring_index[i] != (i % (rss_rings ?: n)))
  1089. return -EINVAL;
  1090. }
  1091. if (!rss_rings)
  1092. rss_rings = n;
  1093. /* RSS table size must be an order of 2 */
  1094. if (!is_power_of_2(rss_rings))
  1095. return -EINVAL;
  1096. if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
  1097. err = mlx4_en_check_rxfh_func(dev, hfunc);
  1098. if (err)
  1099. return err;
  1100. }
  1101. mutex_lock(&mdev->state_lock);
  1102. if (priv->port_up) {
  1103. port_up = 1;
  1104. mlx4_en_stop_port(dev, 1);
  1105. }
  1106. if (ring_index)
  1107. priv->prof->rss_rings = rss_rings;
  1108. if (key)
  1109. memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
  1110. if (hfunc != ETH_RSS_HASH_NO_CHANGE)
  1111. priv->rss_hash_fn = hfunc;
  1112. if (port_up) {
  1113. err = mlx4_en_start_port(dev);
  1114. if (err)
  1115. en_err(priv, "Failed starting port\n");
  1116. }
  1117. mutex_unlock(&mdev->state_lock);
  1118. return err;
  1119. }
  1120. #define all_zeros_or_all_ones(field) \
  1121. ((field) == 0 || (field) == (__force typeof(field))-1)
  1122. static int mlx4_en_validate_flow(struct net_device *dev,
  1123. struct ethtool_rxnfc *cmd)
  1124. {
  1125. struct ethtool_usrip4_spec *l3_mask;
  1126. struct ethtool_tcpip4_spec *l4_mask;
  1127. struct ethhdr *eth_mask;
  1128. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1129. return -EINVAL;
  1130. if (cmd->fs.flow_type & FLOW_MAC_EXT) {
  1131. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1132. if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
  1133. return -EINVAL;
  1134. }
  1135. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1136. case TCP_V4_FLOW:
  1137. case UDP_V4_FLOW:
  1138. if (cmd->fs.m_u.tcp_ip4_spec.tos)
  1139. return -EINVAL;
  1140. l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1141. /* don't allow mask which isn't all 0 or 1 */
  1142. if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
  1143. !all_zeros_or_all_ones(l4_mask->ip4dst) ||
  1144. !all_zeros_or_all_ones(l4_mask->psrc) ||
  1145. !all_zeros_or_all_ones(l4_mask->pdst))
  1146. return -EINVAL;
  1147. break;
  1148. case IP_USER_FLOW:
  1149. l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1150. if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
  1151. cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
  1152. (!l3_mask->ip4src && !l3_mask->ip4dst) ||
  1153. !all_zeros_or_all_ones(l3_mask->ip4src) ||
  1154. !all_zeros_or_all_ones(l3_mask->ip4dst))
  1155. return -EINVAL;
  1156. break;
  1157. case ETHER_FLOW:
  1158. eth_mask = &cmd->fs.m_u.ether_spec;
  1159. /* source mac mask must not be set */
  1160. if (!is_zero_ether_addr(eth_mask->h_source))
  1161. return -EINVAL;
  1162. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1163. if (!is_broadcast_ether_addr(eth_mask->h_dest))
  1164. return -EINVAL;
  1165. if (!all_zeros_or_all_ones(eth_mask->h_proto))
  1166. return -EINVAL;
  1167. break;
  1168. default:
  1169. return -EINVAL;
  1170. }
  1171. if ((cmd->fs.flow_type & FLOW_EXT)) {
  1172. if (cmd->fs.m_ext.vlan_etype ||
  1173. !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1174. 0 ||
  1175. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1176. cpu_to_be16(VLAN_VID_MASK)))
  1177. return -EINVAL;
  1178. if (cmd->fs.m_ext.vlan_tci) {
  1179. if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
  1180. return -EINVAL;
  1181. }
  1182. }
  1183. return 0;
  1184. }
  1185. static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
  1186. struct list_head *rule_list_h,
  1187. struct mlx4_spec_list *spec_l2,
  1188. unsigned char *mac)
  1189. {
  1190. int err = 0;
  1191. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  1192. spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
  1193. memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
  1194. memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
  1195. if ((cmd->fs.flow_type & FLOW_EXT) &&
  1196. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
  1197. spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
  1198. spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
  1199. }
  1200. list_add_tail(&spec_l2->list, rule_list_h);
  1201. return err;
  1202. }
  1203. static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
  1204. struct ethtool_rxnfc *cmd,
  1205. struct list_head *rule_list_h,
  1206. struct mlx4_spec_list *spec_l2,
  1207. __be32 ipv4_dst)
  1208. {
  1209. #ifdef CONFIG_INET
  1210. unsigned char mac[ETH_ALEN];
  1211. if (!ipv4_is_multicast(ipv4_dst)) {
  1212. if (cmd->fs.flow_type & FLOW_MAC_EXT)
  1213. memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
  1214. else
  1215. memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
  1216. } else {
  1217. ip_eth_mc_map(ipv4_dst, mac);
  1218. }
  1219. return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
  1220. #else
  1221. return -EINVAL;
  1222. #endif
  1223. }
  1224. static int add_ip_rule(struct mlx4_en_priv *priv,
  1225. struct ethtool_rxnfc *cmd,
  1226. struct list_head *list_h)
  1227. {
  1228. int err;
  1229. struct mlx4_spec_list *spec_l2 = NULL;
  1230. struct mlx4_spec_list *spec_l3 = NULL;
  1231. struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1232. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1233. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1234. if (!spec_l2 || !spec_l3) {
  1235. err = -ENOMEM;
  1236. goto free_spec;
  1237. }
  1238. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
  1239. cmd->fs.h_u.
  1240. usr_ip4_spec.ip4dst);
  1241. if (err)
  1242. goto free_spec;
  1243. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1244. spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
  1245. if (l3_mask->ip4src)
  1246. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1247. spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
  1248. if (l3_mask->ip4dst)
  1249. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1250. list_add_tail(&spec_l3->list, list_h);
  1251. return 0;
  1252. free_spec:
  1253. kfree(spec_l2);
  1254. kfree(spec_l3);
  1255. return err;
  1256. }
  1257. static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
  1258. struct ethtool_rxnfc *cmd,
  1259. struct list_head *list_h, int proto)
  1260. {
  1261. int err;
  1262. struct mlx4_spec_list *spec_l2 = NULL;
  1263. struct mlx4_spec_list *spec_l3 = NULL;
  1264. struct mlx4_spec_list *spec_l4 = NULL;
  1265. struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1266. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1267. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1268. spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
  1269. if (!spec_l2 || !spec_l3 || !spec_l4) {
  1270. err = -ENOMEM;
  1271. goto free_spec;
  1272. }
  1273. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1274. if (proto == TCP_V4_FLOW) {
  1275. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1276. spec_l2,
  1277. cmd->fs.h_u.
  1278. tcp_ip4_spec.ip4dst);
  1279. if (err)
  1280. goto free_spec;
  1281. spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
  1282. spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
  1283. spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
  1284. spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
  1285. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
  1286. } else {
  1287. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1288. spec_l2,
  1289. cmd->fs.h_u.
  1290. udp_ip4_spec.ip4dst);
  1291. if (err)
  1292. goto free_spec;
  1293. spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
  1294. spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
  1295. spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
  1296. spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
  1297. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
  1298. }
  1299. if (l4_mask->ip4src)
  1300. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1301. if (l4_mask->ip4dst)
  1302. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1303. if (l4_mask->psrc)
  1304. spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
  1305. if (l4_mask->pdst)
  1306. spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
  1307. list_add_tail(&spec_l3->list, list_h);
  1308. list_add_tail(&spec_l4->list, list_h);
  1309. return 0;
  1310. free_spec:
  1311. kfree(spec_l2);
  1312. kfree(spec_l3);
  1313. kfree(spec_l4);
  1314. return err;
  1315. }
  1316. static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
  1317. struct ethtool_rxnfc *cmd,
  1318. struct list_head *rule_list_h)
  1319. {
  1320. int err;
  1321. struct ethhdr *eth_spec;
  1322. struct mlx4_spec_list *spec_l2;
  1323. struct mlx4_en_priv *priv = netdev_priv(dev);
  1324. err = mlx4_en_validate_flow(dev, cmd);
  1325. if (err)
  1326. return err;
  1327. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1328. case ETHER_FLOW:
  1329. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1330. if (!spec_l2)
  1331. return -ENOMEM;
  1332. eth_spec = &cmd->fs.h_u.ether_spec;
  1333. mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
  1334. &eth_spec->h_dest[0]);
  1335. spec_l2->eth.ether_type = eth_spec->h_proto;
  1336. if (eth_spec->h_proto)
  1337. spec_l2->eth.ether_type_enable = 1;
  1338. break;
  1339. case IP_USER_FLOW:
  1340. err = add_ip_rule(priv, cmd, rule_list_h);
  1341. break;
  1342. case TCP_V4_FLOW:
  1343. err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
  1344. break;
  1345. case UDP_V4_FLOW:
  1346. err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
  1347. break;
  1348. }
  1349. return err;
  1350. }
  1351. static int mlx4_en_flow_replace(struct net_device *dev,
  1352. struct ethtool_rxnfc *cmd)
  1353. {
  1354. int err;
  1355. struct mlx4_en_priv *priv = netdev_priv(dev);
  1356. struct ethtool_flow_id *loc_rule;
  1357. struct mlx4_spec_list *spec, *tmp_spec;
  1358. u32 qpn;
  1359. u64 reg_id;
  1360. struct mlx4_net_trans_rule rule = {
  1361. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  1362. .exclusive = 0,
  1363. .allow_loopback = 1,
  1364. .promisc_mode = MLX4_FS_REGULAR,
  1365. };
  1366. rule.port = priv->port;
  1367. rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
  1368. INIT_LIST_HEAD(&rule.list);
  1369. /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
  1370. if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
  1371. qpn = priv->drop_qp.qpn;
  1372. else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
  1373. qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
  1374. } else {
  1375. if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
  1376. en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
  1377. cmd->fs.ring_cookie);
  1378. return -EINVAL;
  1379. }
  1380. qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
  1381. if (!qpn) {
  1382. en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
  1383. cmd->fs.ring_cookie);
  1384. return -EINVAL;
  1385. }
  1386. }
  1387. rule.qpn = qpn;
  1388. err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
  1389. if (err)
  1390. goto out_free_list;
  1391. loc_rule = &priv->ethtool_rules[cmd->fs.location];
  1392. if (loc_rule->id) {
  1393. err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
  1394. if (err) {
  1395. en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
  1396. cmd->fs.location, loc_rule->id);
  1397. goto out_free_list;
  1398. }
  1399. loc_rule->id = 0;
  1400. memset(&loc_rule->flow_spec, 0,
  1401. sizeof(struct ethtool_rx_flow_spec));
  1402. list_del(&loc_rule->list);
  1403. }
  1404. err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
  1405. if (err) {
  1406. en_err(priv, "Fail to attach network rule at location %d\n",
  1407. cmd->fs.location);
  1408. goto out_free_list;
  1409. }
  1410. loc_rule->id = reg_id;
  1411. memcpy(&loc_rule->flow_spec, &cmd->fs,
  1412. sizeof(struct ethtool_rx_flow_spec));
  1413. list_add_tail(&loc_rule->list, &priv->ethtool_list);
  1414. out_free_list:
  1415. list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
  1416. list_del(&spec->list);
  1417. kfree(spec);
  1418. }
  1419. return err;
  1420. }
  1421. static int mlx4_en_flow_detach(struct net_device *dev,
  1422. struct ethtool_rxnfc *cmd)
  1423. {
  1424. int err = 0;
  1425. struct ethtool_flow_id *rule;
  1426. struct mlx4_en_priv *priv = netdev_priv(dev);
  1427. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1428. return -EINVAL;
  1429. rule = &priv->ethtool_rules[cmd->fs.location];
  1430. if (!rule->id) {
  1431. err = -ENOENT;
  1432. goto out;
  1433. }
  1434. err = mlx4_flow_detach(priv->mdev->dev, rule->id);
  1435. if (err) {
  1436. en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
  1437. cmd->fs.location, rule->id);
  1438. goto out;
  1439. }
  1440. rule->id = 0;
  1441. memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
  1442. list_del(&rule->list);
  1443. out:
  1444. return err;
  1445. }
  1446. static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1447. int loc)
  1448. {
  1449. int err = 0;
  1450. struct ethtool_flow_id *rule;
  1451. struct mlx4_en_priv *priv = netdev_priv(dev);
  1452. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  1453. return -EINVAL;
  1454. rule = &priv->ethtool_rules[loc];
  1455. if (rule->id)
  1456. memcpy(&cmd->fs, &rule->flow_spec,
  1457. sizeof(struct ethtool_rx_flow_spec));
  1458. else
  1459. err = -ENOENT;
  1460. return err;
  1461. }
  1462. static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
  1463. {
  1464. int i, res = 0;
  1465. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  1466. if (priv->ethtool_rules[i].id)
  1467. res++;
  1468. }
  1469. return res;
  1470. }
  1471. static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1472. u32 *rule_locs)
  1473. {
  1474. struct mlx4_en_priv *priv = netdev_priv(dev);
  1475. struct mlx4_en_dev *mdev = priv->mdev;
  1476. int err = 0;
  1477. int i = 0, priority = 0;
  1478. if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
  1479. cmd->cmd == ETHTOOL_GRXCLSRULE ||
  1480. cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
  1481. (mdev->dev->caps.steering_mode !=
  1482. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
  1483. return -EINVAL;
  1484. switch (cmd->cmd) {
  1485. case ETHTOOL_GRXRINGS:
  1486. cmd->data = priv->rx_ring_num;
  1487. break;
  1488. case ETHTOOL_GRXCLSRLCNT:
  1489. cmd->rule_cnt = mlx4_en_get_num_flows(priv);
  1490. break;
  1491. case ETHTOOL_GRXCLSRULE:
  1492. err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
  1493. break;
  1494. case ETHTOOL_GRXCLSRLALL:
  1495. cmd->data = MAX_NUM_OF_FS_RULES;
  1496. while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
  1497. err = mlx4_en_get_flow(dev, cmd, i);
  1498. if (!err)
  1499. rule_locs[priority++] = i;
  1500. i++;
  1501. }
  1502. err = 0;
  1503. break;
  1504. default:
  1505. err = -EOPNOTSUPP;
  1506. break;
  1507. }
  1508. return err;
  1509. }
  1510. static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1511. {
  1512. int err = 0;
  1513. struct mlx4_en_priv *priv = netdev_priv(dev);
  1514. struct mlx4_en_dev *mdev = priv->mdev;
  1515. if (mdev->dev->caps.steering_mode !=
  1516. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
  1517. return -EINVAL;
  1518. switch (cmd->cmd) {
  1519. case ETHTOOL_SRXCLSRLINS:
  1520. err = mlx4_en_flow_replace(dev, cmd);
  1521. break;
  1522. case ETHTOOL_SRXCLSRLDEL:
  1523. err = mlx4_en_flow_detach(dev, cmd);
  1524. break;
  1525. default:
  1526. en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
  1527. return -EINVAL;
  1528. }
  1529. return err;
  1530. }
  1531. static int mlx4_en_get_max_num_rx_rings(struct net_device *dev)
  1532. {
  1533. return min_t(int, num_online_cpus(), MAX_RX_RINGS);
  1534. }
  1535. static void mlx4_en_get_channels(struct net_device *dev,
  1536. struct ethtool_channels *channel)
  1537. {
  1538. struct mlx4_en_priv *priv = netdev_priv(dev);
  1539. channel->max_rx = mlx4_en_get_max_num_rx_rings(dev);
  1540. channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
  1541. channel->rx_count = priv->rx_ring_num;
  1542. channel->tx_count = priv->tx_ring_num[TX] /
  1543. priv->prof->num_up;
  1544. }
  1545. static int mlx4_en_set_channels(struct net_device *dev,
  1546. struct ethtool_channels *channel)
  1547. {
  1548. struct mlx4_en_priv *priv = netdev_priv(dev);
  1549. struct mlx4_en_dev *mdev = priv->mdev;
  1550. struct mlx4_en_port_profile new_prof;
  1551. struct mlx4_en_priv *tmp;
  1552. int total_tx_count;
  1553. int port_up = 0;
  1554. int xdp_count;
  1555. int err = 0;
  1556. u8 up;
  1557. if (!channel->tx_count || !channel->rx_count)
  1558. return -EINVAL;
  1559. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  1560. if (!tmp)
  1561. return -ENOMEM;
  1562. mutex_lock(&mdev->state_lock);
  1563. xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
  1564. total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
  1565. if (total_tx_count > MAX_TX_RINGS) {
  1566. err = -EINVAL;
  1567. en_err(priv,
  1568. "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
  1569. total_tx_count, MAX_TX_RINGS);
  1570. goto out;
  1571. }
  1572. memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
  1573. new_prof.num_tx_rings_p_up = channel->tx_count;
  1574. new_prof.tx_ring_num[TX] = channel->tx_count * priv->prof->num_up;
  1575. new_prof.tx_ring_num[TX_XDP] = xdp_count;
  1576. new_prof.rx_ring_num = channel->rx_count;
  1577. err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
  1578. if (err)
  1579. goto out;
  1580. if (priv->port_up) {
  1581. port_up = 1;
  1582. mlx4_en_stop_port(dev, 1);
  1583. }
  1584. mlx4_en_safe_replace_resources(priv, tmp);
  1585. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1586. up = (priv->prof->num_up == MLX4_EN_NUM_UP_LOW) ?
  1587. 0 : priv->prof->num_up;
  1588. mlx4_en_setup_tc(dev, up);
  1589. en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
  1590. en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
  1591. if (port_up) {
  1592. err = mlx4_en_start_port(dev);
  1593. if (err)
  1594. en_err(priv, "Failed starting port\n");
  1595. }
  1596. err = mlx4_en_moderation_update(priv);
  1597. out:
  1598. mutex_unlock(&mdev->state_lock);
  1599. kfree(tmp);
  1600. return err;
  1601. }
  1602. static int mlx4_en_get_ts_info(struct net_device *dev,
  1603. struct ethtool_ts_info *info)
  1604. {
  1605. struct mlx4_en_priv *priv = netdev_priv(dev);
  1606. struct mlx4_en_dev *mdev = priv->mdev;
  1607. int ret;
  1608. ret = ethtool_op_get_ts_info(dev, info);
  1609. if (ret)
  1610. return ret;
  1611. if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
  1612. info->so_timestamping |=
  1613. SOF_TIMESTAMPING_TX_HARDWARE |
  1614. SOF_TIMESTAMPING_RX_HARDWARE |
  1615. SOF_TIMESTAMPING_RAW_HARDWARE;
  1616. info->tx_types =
  1617. (1 << HWTSTAMP_TX_OFF) |
  1618. (1 << HWTSTAMP_TX_ON);
  1619. info->rx_filters =
  1620. (1 << HWTSTAMP_FILTER_NONE) |
  1621. (1 << HWTSTAMP_FILTER_ALL);
  1622. if (mdev->ptp_clock)
  1623. info->phc_index = ptp_clock_index(mdev->ptp_clock);
  1624. }
  1625. return ret;
  1626. }
  1627. static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
  1628. {
  1629. struct mlx4_en_priv *priv = netdev_priv(dev);
  1630. struct mlx4_en_dev *mdev = priv->mdev;
  1631. bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1632. bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1633. bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
  1634. bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
  1635. int i;
  1636. int ret = 0;
  1637. if (bf_enabled_new != bf_enabled_old) {
  1638. int t;
  1639. if (bf_enabled_new) {
  1640. bool bf_supported = true;
  1641. for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
  1642. for (i = 0; i < priv->tx_ring_num[t]; i++)
  1643. bf_supported &=
  1644. priv->tx_ring[t][i]->bf_alloced;
  1645. if (!bf_supported) {
  1646. en_err(priv, "BlueFlame is not supported\n");
  1647. return -EINVAL;
  1648. }
  1649. priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1650. } else {
  1651. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1652. }
  1653. for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
  1654. for (i = 0; i < priv->tx_ring_num[t]; i++)
  1655. priv->tx_ring[t][i]->bf_enabled =
  1656. bf_enabled_new;
  1657. en_info(priv, "BlueFlame %s\n",
  1658. bf_enabled_new ? "Enabled" : "Disabled");
  1659. }
  1660. if (phv_enabled_new != phv_enabled_old) {
  1661. ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
  1662. if (ret)
  1663. return ret;
  1664. else if (phv_enabled_new)
  1665. priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
  1666. else
  1667. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
  1668. en_info(priv, "PHV bit %s\n",
  1669. phv_enabled_new ? "Enabled" : "Disabled");
  1670. }
  1671. return 0;
  1672. }
  1673. static u32 mlx4_en_get_priv_flags(struct net_device *dev)
  1674. {
  1675. struct mlx4_en_priv *priv = netdev_priv(dev);
  1676. return priv->pflags;
  1677. }
  1678. static int mlx4_en_get_tunable(struct net_device *dev,
  1679. const struct ethtool_tunable *tuna,
  1680. void *data)
  1681. {
  1682. const struct mlx4_en_priv *priv = netdev_priv(dev);
  1683. int ret = 0;
  1684. switch (tuna->id) {
  1685. case ETHTOOL_TX_COPYBREAK:
  1686. *(u32 *)data = priv->prof->inline_thold;
  1687. break;
  1688. default:
  1689. ret = -EINVAL;
  1690. break;
  1691. }
  1692. return ret;
  1693. }
  1694. static int mlx4_en_set_tunable(struct net_device *dev,
  1695. const struct ethtool_tunable *tuna,
  1696. const void *data)
  1697. {
  1698. struct mlx4_en_priv *priv = netdev_priv(dev);
  1699. int val, ret = 0;
  1700. switch (tuna->id) {
  1701. case ETHTOOL_TX_COPYBREAK:
  1702. val = *(u32 *)data;
  1703. if (val < MIN_PKT_LEN || val > MAX_INLINE)
  1704. ret = -EINVAL;
  1705. else
  1706. priv->prof->inline_thold = val;
  1707. break;
  1708. default:
  1709. ret = -EINVAL;
  1710. break;
  1711. }
  1712. return ret;
  1713. }
  1714. #define MLX4_EEPROM_PAGE_LEN 256
  1715. static int mlx4_en_get_module_info(struct net_device *dev,
  1716. struct ethtool_modinfo *modinfo)
  1717. {
  1718. struct mlx4_en_priv *priv = netdev_priv(dev);
  1719. struct mlx4_en_dev *mdev = priv->mdev;
  1720. int ret;
  1721. u8 data[4];
  1722. /* Read first 2 bytes to get Module & REV ID */
  1723. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1724. 0/*offset*/, 2/*size*/, data);
  1725. if (ret < 2)
  1726. return -EIO;
  1727. switch (data[0] /* identifier */) {
  1728. case MLX4_MODULE_ID_QSFP:
  1729. modinfo->type = ETH_MODULE_SFF_8436;
  1730. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1731. break;
  1732. case MLX4_MODULE_ID_QSFP_PLUS:
  1733. if (data[1] >= 0x3) { /* revision id */
  1734. modinfo->type = ETH_MODULE_SFF_8636;
  1735. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1736. } else {
  1737. modinfo->type = ETH_MODULE_SFF_8436;
  1738. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1739. }
  1740. break;
  1741. case MLX4_MODULE_ID_QSFP28:
  1742. modinfo->type = ETH_MODULE_SFF_8636;
  1743. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1744. break;
  1745. case MLX4_MODULE_ID_SFP:
  1746. modinfo->type = ETH_MODULE_SFF_8472;
  1747. modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
  1748. break;
  1749. default:
  1750. return -EINVAL;
  1751. }
  1752. return 0;
  1753. }
  1754. static int mlx4_en_get_module_eeprom(struct net_device *dev,
  1755. struct ethtool_eeprom *ee,
  1756. u8 *data)
  1757. {
  1758. struct mlx4_en_priv *priv = netdev_priv(dev);
  1759. struct mlx4_en_dev *mdev = priv->mdev;
  1760. int offset = ee->offset;
  1761. int i = 0, ret;
  1762. if (ee->len == 0)
  1763. return -EINVAL;
  1764. memset(data, 0, ee->len);
  1765. while (i < ee->len) {
  1766. en_dbg(DRV, priv,
  1767. "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
  1768. i, offset, ee->len - i);
  1769. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1770. offset, ee->len - i, data + i);
  1771. if (!ret) /* Done reading */
  1772. return 0;
  1773. if (ret < 0) {
  1774. en_err(priv,
  1775. "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
  1776. i, offset, ee->len - i, ret);
  1777. return 0;
  1778. }
  1779. i += ret;
  1780. offset += ret;
  1781. }
  1782. return 0;
  1783. }
  1784. static int mlx4_en_set_phys_id(struct net_device *dev,
  1785. enum ethtool_phys_id_state state)
  1786. {
  1787. int err;
  1788. u16 beacon_duration;
  1789. struct mlx4_en_priv *priv = netdev_priv(dev);
  1790. struct mlx4_en_dev *mdev = priv->mdev;
  1791. if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
  1792. return -EOPNOTSUPP;
  1793. switch (state) {
  1794. case ETHTOOL_ID_ACTIVE:
  1795. beacon_duration = PORT_BEACON_MAX_LIMIT;
  1796. break;
  1797. case ETHTOOL_ID_INACTIVE:
  1798. beacon_duration = 0;
  1799. break;
  1800. default:
  1801. return -EOPNOTSUPP;
  1802. }
  1803. err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
  1804. return err;
  1805. }
  1806. const struct ethtool_ops mlx4_en_ethtool_ops = {
  1807. .get_drvinfo = mlx4_en_get_drvinfo,
  1808. .get_link_ksettings = mlx4_en_get_link_ksettings,
  1809. .set_link_ksettings = mlx4_en_set_link_ksettings,
  1810. .get_link = ethtool_op_get_link,
  1811. .get_strings = mlx4_en_get_strings,
  1812. .get_sset_count = mlx4_en_get_sset_count,
  1813. .get_ethtool_stats = mlx4_en_get_ethtool_stats,
  1814. .self_test = mlx4_en_self_test,
  1815. .set_phys_id = mlx4_en_set_phys_id,
  1816. .get_wol = mlx4_en_get_wol,
  1817. .set_wol = mlx4_en_set_wol,
  1818. .get_msglevel = mlx4_en_get_msglevel,
  1819. .set_msglevel = mlx4_en_set_msglevel,
  1820. .get_coalesce = mlx4_en_get_coalesce,
  1821. .set_coalesce = mlx4_en_set_coalesce,
  1822. .get_pauseparam = mlx4_en_get_pauseparam,
  1823. .set_pauseparam = mlx4_en_set_pauseparam,
  1824. .get_ringparam = mlx4_en_get_ringparam,
  1825. .set_ringparam = mlx4_en_set_ringparam,
  1826. .get_rxnfc = mlx4_en_get_rxnfc,
  1827. .set_rxnfc = mlx4_en_set_rxnfc,
  1828. .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
  1829. .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
  1830. .get_rxfh = mlx4_en_get_rxfh,
  1831. .set_rxfh = mlx4_en_set_rxfh,
  1832. .get_channels = mlx4_en_get_channels,
  1833. .set_channels = mlx4_en_set_channels,
  1834. .get_ts_info = mlx4_en_get_ts_info,
  1835. .set_priv_flags = mlx4_en_set_priv_flags,
  1836. .get_priv_flags = mlx4_en_get_priv_flags,
  1837. .get_tunable = mlx4_en_get_tunable,
  1838. .set_tunable = mlx4_en_set_tunable,
  1839. .get_module_info = mlx4_en_get_module_info,
  1840. .get_module_eeprom = mlx4_en_get_module_eeprom
  1841. };