ethtool.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2013 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/netdevice.h>
  11. #include <linux/ethtool.h>
  12. #include <linux/rtnetlink.h>
  13. #include <linux/in.h>
  14. #include "net_driver.h"
  15. #include "workarounds.h"
  16. #include "selftest.h"
  17. #include "efx.h"
  18. #include "filter.h"
  19. #include "nic.h"
  20. struct efx_sw_stat_desc {
  21. const char *name;
  22. enum {
  23. EFX_ETHTOOL_STAT_SOURCE_nic,
  24. EFX_ETHTOOL_STAT_SOURCE_channel,
  25. EFX_ETHTOOL_STAT_SOURCE_tx_queue
  26. } source;
  27. unsigned offset;
  28. u64(*get_stat) (void *field); /* Reader function */
  29. };
  30. /* Initialiser for a struct efx_sw_stat_desc with type-checking */
  31. #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
  32. get_stat_function) { \
  33. .name = #stat_name, \
  34. .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
  35. .offset = ((((field_type *) 0) == \
  36. &((struct efx_##source_name *)0)->field) ? \
  37. offsetof(struct efx_##source_name, field) : \
  38. offsetof(struct efx_##source_name, field)), \
  39. .get_stat = get_stat_function, \
  40. }
  41. static u64 efx_get_uint_stat(void *field)
  42. {
  43. return *(unsigned int *)field;
  44. }
  45. static u64 efx_get_atomic_stat(void *field)
  46. {
  47. return atomic_read((atomic_t *) field);
  48. }
  49. #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
  50. EFX_ETHTOOL_STAT(field, nic, field, \
  51. atomic_t, efx_get_atomic_stat)
  52. #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
  53. EFX_ETHTOOL_STAT(field, channel, n_##field, \
  54. unsigned int, efx_get_uint_stat)
  55. #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
  56. EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
  57. unsigned int, efx_get_uint_stat)
  58. static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
  59. EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
  60. EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
  61. EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
  62. EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
  63. EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
  64. EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
  65. EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
  66. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
  67. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
  68. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
  69. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
  70. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
  71. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
  72. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
  73. };
  74. #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
  75. #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
  76. /**************************************************************************
  77. *
  78. * Ethtool operations
  79. *
  80. **************************************************************************
  81. */
  82. /* Identify device by flashing LEDs */
  83. static int efx_ethtool_phys_id(struct net_device *net_dev,
  84. enum ethtool_phys_id_state state)
  85. {
  86. struct efx_nic *efx = netdev_priv(net_dev);
  87. enum efx_led_mode mode = EFX_LED_DEFAULT;
  88. switch (state) {
  89. case ETHTOOL_ID_ON:
  90. mode = EFX_LED_ON;
  91. break;
  92. case ETHTOOL_ID_OFF:
  93. mode = EFX_LED_OFF;
  94. break;
  95. case ETHTOOL_ID_INACTIVE:
  96. mode = EFX_LED_DEFAULT;
  97. break;
  98. case ETHTOOL_ID_ACTIVE:
  99. return 1; /* cycle on/off once per second */
  100. }
  101. efx->type->set_id_led(efx, mode);
  102. return 0;
  103. }
  104. /* This must be called with rtnl_lock held. */
  105. static int efx_ethtool_get_settings(struct net_device *net_dev,
  106. struct ethtool_cmd *ecmd)
  107. {
  108. struct efx_nic *efx = netdev_priv(net_dev);
  109. struct efx_link_state *link_state = &efx->link_state;
  110. mutex_lock(&efx->mac_lock);
  111. efx->phy_op->get_settings(efx, ecmd);
  112. mutex_unlock(&efx->mac_lock);
  113. /* Both MACs support pause frames (bidirectional and respond-only) */
  114. ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  115. if (LOOPBACK_INTERNAL(efx)) {
  116. ethtool_cmd_speed_set(ecmd, link_state->speed);
  117. ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
  118. }
  119. return 0;
  120. }
  121. /* This must be called with rtnl_lock held. */
  122. static int efx_ethtool_set_settings(struct net_device *net_dev,
  123. struct ethtool_cmd *ecmd)
  124. {
  125. struct efx_nic *efx = netdev_priv(net_dev);
  126. int rc;
  127. /* GMAC does not support 1000Mbps HD */
  128. if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
  129. (ecmd->duplex != DUPLEX_FULL)) {
  130. netif_dbg(efx, drv, efx->net_dev,
  131. "rejecting unsupported 1000Mbps HD setting\n");
  132. return -EINVAL;
  133. }
  134. mutex_lock(&efx->mac_lock);
  135. rc = efx->phy_op->set_settings(efx, ecmd);
  136. mutex_unlock(&efx->mac_lock);
  137. return rc;
  138. }
  139. static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
  140. struct ethtool_drvinfo *info)
  141. {
  142. struct efx_nic *efx = netdev_priv(net_dev);
  143. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  144. strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
  145. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  146. efx_mcdi_print_fwver(efx, info->fw_version,
  147. sizeof(info->fw_version));
  148. strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
  149. }
  150. static int efx_ethtool_get_regs_len(struct net_device *net_dev)
  151. {
  152. return efx_nic_get_regs_len(netdev_priv(net_dev));
  153. }
  154. static void efx_ethtool_get_regs(struct net_device *net_dev,
  155. struct ethtool_regs *regs, void *buf)
  156. {
  157. struct efx_nic *efx = netdev_priv(net_dev);
  158. regs->version = efx->type->revision;
  159. efx_nic_get_regs(efx, buf);
  160. }
  161. static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
  162. {
  163. struct efx_nic *efx = netdev_priv(net_dev);
  164. return efx->msg_enable;
  165. }
  166. static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
  167. {
  168. struct efx_nic *efx = netdev_priv(net_dev);
  169. efx->msg_enable = msg_enable;
  170. }
  171. /**
  172. * efx_fill_test - fill in an individual self-test entry
  173. * @test_index: Index of the test
  174. * @strings: Ethtool strings, or %NULL
  175. * @data: Ethtool test results, or %NULL
  176. * @test: Pointer to test result (used only if data != %NULL)
  177. * @unit_format: Unit name format (e.g. "chan\%d")
  178. * @unit_id: Unit id (e.g. 0 for "chan0")
  179. * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
  180. * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
  181. *
  182. * Fill in an individual self-test entry.
  183. */
  184. static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
  185. int *test, const char *unit_format, int unit_id,
  186. const char *test_format, const char *test_id)
  187. {
  188. char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
  189. /* Fill data value, if applicable */
  190. if (data)
  191. data[test_index] = *test;
  192. /* Fill string, if applicable */
  193. if (strings) {
  194. if (strchr(unit_format, '%'))
  195. snprintf(unit_str, sizeof(unit_str),
  196. unit_format, unit_id);
  197. else
  198. strcpy(unit_str, unit_format);
  199. snprintf(test_str, sizeof(test_str), test_format, test_id);
  200. snprintf(strings + test_index * ETH_GSTRING_LEN,
  201. ETH_GSTRING_LEN,
  202. "%-6s %-24s", unit_str, test_str);
  203. }
  204. }
  205. #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
  206. #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
  207. #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
  208. #define EFX_LOOPBACK_NAME(_mode, _counter) \
  209. "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
  210. /**
  211. * efx_fill_loopback_test - fill in a block of loopback self-test entries
  212. * @efx: Efx NIC
  213. * @lb_tests: Efx loopback self-test results structure
  214. * @mode: Loopback test mode
  215. * @test_index: Starting index of the test
  216. * @strings: Ethtool strings, or %NULL
  217. * @data: Ethtool test results, or %NULL
  218. *
  219. * Fill in a block of loopback self-test entries. Return new test
  220. * index.
  221. */
  222. static int efx_fill_loopback_test(struct efx_nic *efx,
  223. struct efx_loopback_self_tests *lb_tests,
  224. enum efx_loopback_mode mode,
  225. unsigned int test_index,
  226. u8 *strings, u64 *data)
  227. {
  228. struct efx_channel *channel =
  229. efx_get_channel(efx, efx->tx_channel_offset);
  230. struct efx_tx_queue *tx_queue;
  231. efx_for_each_channel_tx_queue(tx_queue, channel) {
  232. efx_fill_test(test_index++, strings, data,
  233. &lb_tests->tx_sent[tx_queue->queue],
  234. EFX_TX_QUEUE_NAME(tx_queue),
  235. EFX_LOOPBACK_NAME(mode, "tx_sent"));
  236. efx_fill_test(test_index++, strings, data,
  237. &lb_tests->tx_done[tx_queue->queue],
  238. EFX_TX_QUEUE_NAME(tx_queue),
  239. EFX_LOOPBACK_NAME(mode, "tx_done"));
  240. }
  241. efx_fill_test(test_index++, strings, data,
  242. &lb_tests->rx_good,
  243. "rx", 0,
  244. EFX_LOOPBACK_NAME(mode, "rx_good"));
  245. efx_fill_test(test_index++, strings, data,
  246. &lb_tests->rx_bad,
  247. "rx", 0,
  248. EFX_LOOPBACK_NAME(mode, "rx_bad"));
  249. return test_index;
  250. }
  251. /**
  252. * efx_ethtool_fill_self_tests - get self-test details
  253. * @efx: Efx NIC
  254. * @tests: Efx self-test results structure, or %NULL
  255. * @strings: Ethtool strings, or %NULL
  256. * @data: Ethtool test results, or %NULL
  257. *
  258. * Get self-test number of strings, strings, and/or test results.
  259. * Return number of strings (== number of test results).
  260. *
  261. * The reason for merging these three functions is to make sure that
  262. * they can never be inconsistent.
  263. */
  264. static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
  265. struct efx_self_tests *tests,
  266. u8 *strings, u64 *data)
  267. {
  268. struct efx_channel *channel;
  269. unsigned int n = 0, i;
  270. enum efx_loopback_mode mode;
  271. efx_fill_test(n++, strings, data, &tests->phy_alive,
  272. "phy", 0, "alive", NULL);
  273. efx_fill_test(n++, strings, data, &tests->nvram,
  274. "core", 0, "nvram", NULL);
  275. efx_fill_test(n++, strings, data, &tests->interrupt,
  276. "core", 0, "interrupt", NULL);
  277. /* Event queues */
  278. efx_for_each_channel(channel, efx) {
  279. efx_fill_test(n++, strings, data,
  280. &tests->eventq_dma[channel->channel],
  281. EFX_CHANNEL_NAME(channel),
  282. "eventq.dma", NULL);
  283. efx_fill_test(n++, strings, data,
  284. &tests->eventq_int[channel->channel],
  285. EFX_CHANNEL_NAME(channel),
  286. "eventq.int", NULL);
  287. }
  288. efx_fill_test(n++, strings, data, &tests->memory,
  289. "core", 0, "memory", NULL);
  290. efx_fill_test(n++, strings, data, &tests->registers,
  291. "core", 0, "registers", NULL);
  292. if (efx->phy_op->run_tests != NULL) {
  293. EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
  294. for (i = 0; true; ++i) {
  295. const char *name;
  296. EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
  297. name = efx->phy_op->test_name(efx, i);
  298. if (name == NULL)
  299. break;
  300. efx_fill_test(n++, strings, data, &tests->phy_ext[i],
  301. "phy", 0, name, NULL);
  302. }
  303. }
  304. /* Loopback tests */
  305. for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
  306. if (!(efx->loopback_modes & (1 << mode)))
  307. continue;
  308. n = efx_fill_loopback_test(efx,
  309. &tests->loopback[mode], mode, n,
  310. strings, data);
  311. }
  312. return n;
  313. }
  314. static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
  315. {
  316. size_t n_stats = 0;
  317. struct efx_channel *channel;
  318. efx_for_each_channel(channel, efx) {
  319. if (efx_channel_has_tx_queues(channel)) {
  320. n_stats++;
  321. if (strings != NULL) {
  322. snprintf(strings, ETH_GSTRING_LEN,
  323. "tx-%u.tx_packets",
  324. channel->tx_queue[0].queue /
  325. EFX_TXQ_TYPES);
  326. strings += ETH_GSTRING_LEN;
  327. }
  328. }
  329. }
  330. efx_for_each_channel(channel, efx) {
  331. if (efx_channel_has_rx_queue(channel)) {
  332. n_stats++;
  333. if (strings != NULL) {
  334. snprintf(strings, ETH_GSTRING_LEN,
  335. "rx-%d.rx_packets", channel->channel);
  336. strings += ETH_GSTRING_LEN;
  337. }
  338. }
  339. }
  340. return n_stats;
  341. }
  342. static int efx_ethtool_get_sset_count(struct net_device *net_dev,
  343. int string_set)
  344. {
  345. struct efx_nic *efx = netdev_priv(net_dev);
  346. switch (string_set) {
  347. case ETH_SS_STATS:
  348. return efx->type->describe_stats(efx, NULL) +
  349. EFX_ETHTOOL_SW_STAT_COUNT +
  350. efx_describe_per_queue_stats(efx, NULL) +
  351. efx_ptp_describe_stats(efx, NULL);
  352. case ETH_SS_TEST:
  353. return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
  354. default:
  355. return -EINVAL;
  356. }
  357. }
  358. static void efx_ethtool_get_strings(struct net_device *net_dev,
  359. u32 string_set, u8 *strings)
  360. {
  361. struct efx_nic *efx = netdev_priv(net_dev);
  362. int i;
  363. switch (string_set) {
  364. case ETH_SS_STATS:
  365. strings += (efx->type->describe_stats(efx, strings) *
  366. ETH_GSTRING_LEN);
  367. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
  368. strlcpy(strings + i * ETH_GSTRING_LEN,
  369. efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
  370. strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
  371. strings += (efx_describe_per_queue_stats(efx, strings) *
  372. ETH_GSTRING_LEN);
  373. efx_ptp_describe_stats(efx, strings);
  374. break;
  375. case ETH_SS_TEST:
  376. efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
  377. break;
  378. default:
  379. /* No other string sets */
  380. break;
  381. }
  382. }
  383. static void efx_ethtool_get_stats(struct net_device *net_dev,
  384. struct ethtool_stats *stats,
  385. u64 *data)
  386. {
  387. struct efx_nic *efx = netdev_priv(net_dev);
  388. const struct efx_sw_stat_desc *stat;
  389. struct efx_channel *channel;
  390. struct efx_tx_queue *tx_queue;
  391. struct efx_rx_queue *rx_queue;
  392. int i;
  393. spin_lock_bh(&efx->stats_lock);
  394. /* Get NIC statistics */
  395. data += efx->type->update_stats(efx, data, NULL);
  396. /* Get software statistics */
  397. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
  398. stat = &efx_sw_stat_desc[i];
  399. switch (stat->source) {
  400. case EFX_ETHTOOL_STAT_SOURCE_nic:
  401. data[i] = stat->get_stat((void *)efx + stat->offset);
  402. break;
  403. case EFX_ETHTOOL_STAT_SOURCE_channel:
  404. data[i] = 0;
  405. efx_for_each_channel(channel, efx)
  406. data[i] += stat->get_stat((void *)channel +
  407. stat->offset);
  408. break;
  409. case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
  410. data[i] = 0;
  411. efx_for_each_channel(channel, efx) {
  412. efx_for_each_channel_tx_queue(tx_queue, channel)
  413. data[i] +=
  414. stat->get_stat((void *)tx_queue
  415. + stat->offset);
  416. }
  417. break;
  418. }
  419. }
  420. data += EFX_ETHTOOL_SW_STAT_COUNT;
  421. spin_unlock_bh(&efx->stats_lock);
  422. efx_for_each_channel(channel, efx) {
  423. if (efx_channel_has_tx_queues(channel)) {
  424. *data = 0;
  425. efx_for_each_channel_tx_queue(tx_queue, channel) {
  426. *data += tx_queue->tx_packets;
  427. }
  428. data++;
  429. }
  430. }
  431. efx_for_each_channel(channel, efx) {
  432. if (efx_channel_has_rx_queue(channel)) {
  433. *data = 0;
  434. efx_for_each_channel_rx_queue(rx_queue, channel) {
  435. *data += rx_queue->rx_packets;
  436. }
  437. data++;
  438. }
  439. }
  440. efx_ptp_update_stats(efx, data);
  441. }
  442. static void efx_ethtool_self_test(struct net_device *net_dev,
  443. struct ethtool_test *test, u64 *data)
  444. {
  445. struct efx_nic *efx = netdev_priv(net_dev);
  446. struct efx_self_tests *efx_tests;
  447. bool already_up;
  448. int rc = -ENOMEM;
  449. efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
  450. if (!efx_tests)
  451. goto fail;
  452. if (efx->state != STATE_READY) {
  453. rc = -EBUSY;
  454. goto out;
  455. }
  456. netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
  457. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  458. /* We need rx buffers and interrupts. */
  459. already_up = (efx->net_dev->flags & IFF_UP);
  460. if (!already_up) {
  461. rc = dev_open(efx->net_dev);
  462. if (rc) {
  463. netif_err(efx, drv, efx->net_dev,
  464. "failed opening device.\n");
  465. goto out;
  466. }
  467. }
  468. rc = efx_selftest(efx, efx_tests, test->flags);
  469. if (!already_up)
  470. dev_close(efx->net_dev);
  471. netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
  472. rc == 0 ? "passed" : "failed",
  473. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  474. out:
  475. efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
  476. kfree(efx_tests);
  477. fail:
  478. if (rc)
  479. test->flags |= ETH_TEST_FL_FAILED;
  480. }
  481. /* Restart autonegotiation */
  482. static int efx_ethtool_nway_reset(struct net_device *net_dev)
  483. {
  484. struct efx_nic *efx = netdev_priv(net_dev);
  485. return mdio45_nway_restart(&efx->mdio);
  486. }
  487. /*
  488. * Each channel has a single IRQ and moderation timer, started by any
  489. * completion (or other event). Unless the module parameter
  490. * separate_tx_channels is set, IRQs and moderation are therefore
  491. * shared between RX and TX completions. In this case, when RX IRQ
  492. * moderation is explicitly changed then TX IRQ moderation is
  493. * automatically changed too, but otherwise we fail if the two values
  494. * are requested to be different.
  495. *
  496. * The hardware does not support a limit on the number of completions
  497. * before an IRQ, so we do not use the max_frames fields. We should
  498. * report and require that max_frames == (usecs != 0), but this would
  499. * invalidate existing user documentation.
  500. *
  501. * The hardware does not have distinct settings for interrupt
  502. * moderation while the previous IRQ is being handled, so we should
  503. * not use the 'irq' fields. However, an earlier developer
  504. * misunderstood the meaning of the 'irq' fields and the driver did
  505. * not support the standard fields. To avoid invalidating existing
  506. * user documentation, we report and accept changes through either the
  507. * standard or 'irq' fields. If both are changed at the same time, we
  508. * prefer the standard field.
  509. *
  510. * We implement adaptive IRQ moderation, but use a different algorithm
  511. * from that assumed in the definition of struct ethtool_coalesce.
  512. * Therefore we do not use any of the adaptive moderation parameters
  513. * in it.
  514. */
  515. static int efx_ethtool_get_coalesce(struct net_device *net_dev,
  516. struct ethtool_coalesce *coalesce)
  517. {
  518. struct efx_nic *efx = netdev_priv(net_dev);
  519. unsigned int tx_usecs, rx_usecs;
  520. bool rx_adaptive;
  521. efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
  522. coalesce->tx_coalesce_usecs = tx_usecs;
  523. coalesce->tx_coalesce_usecs_irq = tx_usecs;
  524. coalesce->rx_coalesce_usecs = rx_usecs;
  525. coalesce->rx_coalesce_usecs_irq = rx_usecs;
  526. coalesce->use_adaptive_rx_coalesce = rx_adaptive;
  527. return 0;
  528. }
  529. static int efx_ethtool_set_coalesce(struct net_device *net_dev,
  530. struct ethtool_coalesce *coalesce)
  531. {
  532. struct efx_nic *efx = netdev_priv(net_dev);
  533. struct efx_channel *channel;
  534. unsigned int tx_usecs, rx_usecs;
  535. bool adaptive, rx_may_override_tx;
  536. int rc;
  537. if (coalesce->use_adaptive_tx_coalesce)
  538. return -EINVAL;
  539. efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
  540. if (coalesce->rx_coalesce_usecs != rx_usecs)
  541. rx_usecs = coalesce->rx_coalesce_usecs;
  542. else
  543. rx_usecs = coalesce->rx_coalesce_usecs_irq;
  544. adaptive = coalesce->use_adaptive_rx_coalesce;
  545. /* If channels are shared, TX IRQ moderation can be quietly
  546. * overridden unless it is changed from its old value.
  547. */
  548. rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
  549. coalesce->tx_coalesce_usecs_irq == tx_usecs);
  550. if (coalesce->tx_coalesce_usecs != tx_usecs)
  551. tx_usecs = coalesce->tx_coalesce_usecs;
  552. else
  553. tx_usecs = coalesce->tx_coalesce_usecs_irq;
  554. rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
  555. rx_may_override_tx);
  556. if (rc != 0)
  557. return rc;
  558. efx_for_each_channel(channel, efx)
  559. efx->type->push_irq_moderation(channel);
  560. return 0;
  561. }
  562. static void efx_ethtool_get_ringparam(struct net_device *net_dev,
  563. struct ethtool_ringparam *ring)
  564. {
  565. struct efx_nic *efx = netdev_priv(net_dev);
  566. ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
  567. ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
  568. ring->rx_pending = efx->rxq_entries;
  569. ring->tx_pending = efx->txq_entries;
  570. }
  571. static int efx_ethtool_set_ringparam(struct net_device *net_dev,
  572. struct ethtool_ringparam *ring)
  573. {
  574. struct efx_nic *efx = netdev_priv(net_dev);
  575. u32 txq_entries;
  576. if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
  577. ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
  578. ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
  579. return -EINVAL;
  580. if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
  581. netif_err(efx, drv, efx->net_dev,
  582. "RX queues cannot be smaller than %u\n",
  583. EFX_RXQ_MIN_ENT);
  584. return -EINVAL;
  585. }
  586. txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
  587. if (txq_entries != ring->tx_pending)
  588. netif_warn(efx, drv, efx->net_dev,
  589. "increasing TX queue size to minimum of %u\n",
  590. txq_entries);
  591. return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
  592. }
  593. static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
  594. struct ethtool_pauseparam *pause)
  595. {
  596. struct efx_nic *efx = netdev_priv(net_dev);
  597. u8 wanted_fc, old_fc;
  598. u32 old_adv;
  599. int rc = 0;
  600. mutex_lock(&efx->mac_lock);
  601. wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
  602. (pause->tx_pause ? EFX_FC_TX : 0) |
  603. (pause->autoneg ? EFX_FC_AUTO : 0));
  604. if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
  605. netif_dbg(efx, drv, efx->net_dev,
  606. "Flow control unsupported: tx ON rx OFF\n");
  607. rc = -EINVAL;
  608. goto out;
  609. }
  610. if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
  611. netif_dbg(efx, drv, efx->net_dev,
  612. "Autonegotiation is disabled\n");
  613. rc = -EINVAL;
  614. goto out;
  615. }
  616. /* Hook for Falcon bug 11482 workaround */
  617. if (efx->type->prepare_enable_fc_tx &&
  618. (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
  619. efx->type->prepare_enable_fc_tx(efx);
  620. old_adv = efx->link_advertising;
  621. old_fc = efx->wanted_fc;
  622. efx_link_set_wanted_fc(efx, wanted_fc);
  623. if (efx->link_advertising != old_adv ||
  624. (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
  625. rc = efx->phy_op->reconfigure(efx);
  626. if (rc) {
  627. netif_err(efx, drv, efx->net_dev,
  628. "Unable to advertise requested flow "
  629. "control setting\n");
  630. goto out;
  631. }
  632. }
  633. /* Reconfigure the MAC. The PHY *may* generate a link state change event
  634. * if the user just changed the advertised capabilities, but there's no
  635. * harm doing this twice */
  636. efx_mac_reconfigure(efx);
  637. out:
  638. mutex_unlock(&efx->mac_lock);
  639. return rc;
  640. }
  641. static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
  642. struct ethtool_pauseparam *pause)
  643. {
  644. struct efx_nic *efx = netdev_priv(net_dev);
  645. pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
  646. pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
  647. pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
  648. }
  649. static void efx_ethtool_get_wol(struct net_device *net_dev,
  650. struct ethtool_wolinfo *wol)
  651. {
  652. struct efx_nic *efx = netdev_priv(net_dev);
  653. return efx->type->get_wol(efx, wol);
  654. }
  655. static int efx_ethtool_set_wol(struct net_device *net_dev,
  656. struct ethtool_wolinfo *wol)
  657. {
  658. struct efx_nic *efx = netdev_priv(net_dev);
  659. return efx->type->set_wol(efx, wol->wolopts);
  660. }
  661. static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
  662. {
  663. struct efx_nic *efx = netdev_priv(net_dev);
  664. int rc;
  665. rc = efx->type->map_reset_flags(flags);
  666. if (rc < 0)
  667. return rc;
  668. return efx_reset(efx, rc);
  669. }
  670. /* MAC address mask including only I/G bit */
  671. static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
  672. #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
  673. #define IP_PROTO_FULL_MASK 0xFF
  674. #define PORT_FULL_MASK ((__force __be16)~0)
  675. #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
  676. static inline void ip6_fill_mask(__be32 *mask)
  677. {
  678. mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
  679. }
  680. static int efx_ethtool_get_class_rule(struct efx_nic *efx,
  681. struct ethtool_rx_flow_spec *rule)
  682. {
  683. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  684. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  685. struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
  686. struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
  687. struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
  688. struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
  689. struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
  690. struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
  691. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  692. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  693. struct efx_filter_spec spec;
  694. int rc;
  695. rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
  696. rule->location, &spec);
  697. if (rc)
  698. return rc;
  699. if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
  700. rule->ring_cookie = RX_CLS_FLOW_DISC;
  701. else
  702. rule->ring_cookie = spec.dmaq_id;
  703. if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
  704. spec.ether_type == htons(ETH_P_IP) &&
  705. (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
  706. (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
  707. !(spec.match_flags &
  708. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  709. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  710. EFX_FILTER_MATCH_IP_PROTO |
  711. EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
  712. rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
  713. TCP_V4_FLOW : UDP_V4_FLOW);
  714. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  715. ip_entry->ip4dst = spec.loc_host[0];
  716. ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
  717. }
  718. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  719. ip_entry->ip4src = spec.rem_host[0];
  720. ip_mask->ip4src = IP4_ADDR_FULL_MASK;
  721. }
  722. if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
  723. ip_entry->pdst = spec.loc_port;
  724. ip_mask->pdst = PORT_FULL_MASK;
  725. }
  726. if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
  727. ip_entry->psrc = spec.rem_port;
  728. ip_mask->psrc = PORT_FULL_MASK;
  729. }
  730. } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
  731. spec.ether_type == htons(ETH_P_IPV6) &&
  732. (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
  733. (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
  734. !(spec.match_flags &
  735. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  736. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  737. EFX_FILTER_MATCH_IP_PROTO |
  738. EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
  739. rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
  740. TCP_V6_FLOW : UDP_V6_FLOW);
  741. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  742. memcpy(ip6_entry->ip6dst, spec.loc_host,
  743. sizeof(ip6_entry->ip6dst));
  744. ip6_fill_mask(ip6_mask->ip6dst);
  745. }
  746. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  747. memcpy(ip6_entry->ip6src, spec.rem_host,
  748. sizeof(ip6_entry->ip6src));
  749. ip6_fill_mask(ip6_mask->ip6src);
  750. }
  751. if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
  752. ip6_entry->pdst = spec.loc_port;
  753. ip6_mask->pdst = PORT_FULL_MASK;
  754. }
  755. if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
  756. ip6_entry->psrc = spec.rem_port;
  757. ip6_mask->psrc = PORT_FULL_MASK;
  758. }
  759. } else if (!(spec.match_flags &
  760. ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
  761. EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
  762. EFX_FILTER_MATCH_OUTER_VID))) {
  763. rule->flow_type = ETHER_FLOW;
  764. if (spec.match_flags &
  765. (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
  766. ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
  767. if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
  768. eth_broadcast_addr(mac_mask->h_dest);
  769. else
  770. ether_addr_copy(mac_mask->h_dest,
  771. mac_addr_ig_mask);
  772. }
  773. if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
  774. ether_addr_copy(mac_entry->h_source, spec.rem_mac);
  775. eth_broadcast_addr(mac_mask->h_source);
  776. }
  777. if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
  778. mac_entry->h_proto = spec.ether_type;
  779. mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
  780. }
  781. } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
  782. spec.ether_type == htons(ETH_P_IP) &&
  783. !(spec.match_flags &
  784. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  785. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  786. EFX_FILTER_MATCH_IP_PROTO))) {
  787. rule->flow_type = IPV4_USER_FLOW;
  788. uip_entry->ip_ver = ETH_RX_NFC_IP4;
  789. if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
  790. uip_mask->proto = IP_PROTO_FULL_MASK;
  791. uip_entry->proto = spec.ip_proto;
  792. }
  793. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  794. uip_entry->ip4dst = spec.loc_host[0];
  795. uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
  796. }
  797. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  798. uip_entry->ip4src = spec.rem_host[0];
  799. uip_mask->ip4src = IP4_ADDR_FULL_MASK;
  800. }
  801. } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
  802. spec.ether_type == htons(ETH_P_IPV6) &&
  803. !(spec.match_flags &
  804. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  805. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  806. EFX_FILTER_MATCH_IP_PROTO))) {
  807. rule->flow_type = IPV6_USER_FLOW;
  808. if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
  809. uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
  810. uip6_entry->l4_proto = spec.ip_proto;
  811. }
  812. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  813. memcpy(uip6_entry->ip6dst, spec.loc_host,
  814. sizeof(uip6_entry->ip6dst));
  815. ip6_fill_mask(uip6_mask->ip6dst);
  816. }
  817. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  818. memcpy(uip6_entry->ip6src, spec.rem_host,
  819. sizeof(uip6_entry->ip6src));
  820. ip6_fill_mask(uip6_mask->ip6src);
  821. }
  822. } else {
  823. /* The above should handle all filters that we insert */
  824. WARN_ON(1);
  825. return -EINVAL;
  826. }
  827. if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
  828. rule->flow_type |= FLOW_EXT;
  829. rule->h_ext.vlan_tci = spec.outer_vid;
  830. rule->m_ext.vlan_tci = htons(0xfff);
  831. }
  832. return rc;
  833. }
  834. static int
  835. efx_ethtool_get_rxnfc(struct net_device *net_dev,
  836. struct ethtool_rxnfc *info, u32 *rule_locs)
  837. {
  838. struct efx_nic *efx = netdev_priv(net_dev);
  839. switch (info->cmd) {
  840. case ETHTOOL_GRXRINGS:
  841. info->data = efx->n_rx_channels;
  842. return 0;
  843. case ETHTOOL_GRXFH: {
  844. unsigned min_revision = 0;
  845. info->data = 0;
  846. switch (info->flow_type) {
  847. case TCP_V4_FLOW:
  848. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  849. /* fall through */
  850. case UDP_V4_FLOW:
  851. case SCTP_V4_FLOW:
  852. case AH_ESP_V4_FLOW:
  853. case IPV4_FLOW:
  854. info->data |= RXH_IP_SRC | RXH_IP_DST;
  855. min_revision = EFX_REV_FALCON_B0;
  856. break;
  857. case TCP_V6_FLOW:
  858. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  859. /* fall through */
  860. case UDP_V6_FLOW:
  861. case SCTP_V6_FLOW:
  862. case AH_ESP_V6_FLOW:
  863. case IPV6_FLOW:
  864. info->data |= RXH_IP_SRC | RXH_IP_DST;
  865. min_revision = EFX_REV_SIENA_A0;
  866. break;
  867. default:
  868. break;
  869. }
  870. if (efx_nic_rev(efx) < min_revision)
  871. info->data = 0;
  872. return 0;
  873. }
  874. case ETHTOOL_GRXCLSRLCNT:
  875. info->data = efx_filter_get_rx_id_limit(efx);
  876. if (info->data == 0)
  877. return -EOPNOTSUPP;
  878. info->data |= RX_CLS_LOC_SPECIAL;
  879. info->rule_cnt =
  880. efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
  881. return 0;
  882. case ETHTOOL_GRXCLSRULE:
  883. if (efx_filter_get_rx_id_limit(efx) == 0)
  884. return -EOPNOTSUPP;
  885. return efx_ethtool_get_class_rule(efx, &info->fs);
  886. case ETHTOOL_GRXCLSRLALL: {
  887. s32 rc;
  888. info->data = efx_filter_get_rx_id_limit(efx);
  889. if (info->data == 0)
  890. return -EOPNOTSUPP;
  891. rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
  892. rule_locs, info->rule_cnt);
  893. if (rc < 0)
  894. return rc;
  895. info->rule_cnt = rc;
  896. return 0;
  897. }
  898. default:
  899. return -EOPNOTSUPP;
  900. }
  901. }
  902. static inline bool ip6_mask_is_full(__be32 mask[4])
  903. {
  904. return !~(mask[0] & mask[1] & mask[2] & mask[3]);
  905. }
  906. static inline bool ip6_mask_is_empty(__be32 mask[4])
  907. {
  908. return !(mask[0] | mask[1] | mask[2] | mask[3]);
  909. }
  910. static int efx_ethtool_set_class_rule(struct efx_nic *efx,
  911. struct ethtool_rx_flow_spec *rule)
  912. {
  913. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  914. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  915. struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
  916. struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
  917. struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
  918. struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
  919. struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
  920. struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
  921. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  922. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  923. struct efx_filter_spec spec;
  924. int rc;
  925. /* Check that user wants us to choose the location */
  926. if (rule->location != RX_CLS_LOC_ANY)
  927. return -EINVAL;
  928. /* Range-check ring_cookie */
  929. if (rule->ring_cookie >= efx->n_rx_channels &&
  930. rule->ring_cookie != RX_CLS_FLOW_DISC)
  931. return -EINVAL;
  932. /* Check for unsupported extensions */
  933. if ((rule->flow_type & FLOW_EXT) &&
  934. (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
  935. rule->m_ext.data[1]))
  936. return -EINVAL;
  937. efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
  938. efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
  939. (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
  940. EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
  941. switch (rule->flow_type & ~FLOW_EXT) {
  942. case TCP_V4_FLOW:
  943. case UDP_V4_FLOW:
  944. spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
  945. EFX_FILTER_MATCH_IP_PROTO);
  946. spec.ether_type = htons(ETH_P_IP);
  947. spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
  948. IPPROTO_TCP : IPPROTO_UDP);
  949. if (ip_mask->ip4dst) {
  950. if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
  951. return -EINVAL;
  952. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  953. spec.loc_host[0] = ip_entry->ip4dst;
  954. }
  955. if (ip_mask->ip4src) {
  956. if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
  957. return -EINVAL;
  958. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  959. spec.rem_host[0] = ip_entry->ip4src;
  960. }
  961. if (ip_mask->pdst) {
  962. if (ip_mask->pdst != PORT_FULL_MASK)
  963. return -EINVAL;
  964. spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
  965. spec.loc_port = ip_entry->pdst;
  966. }
  967. if (ip_mask->psrc) {
  968. if (ip_mask->psrc != PORT_FULL_MASK)
  969. return -EINVAL;
  970. spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
  971. spec.rem_port = ip_entry->psrc;
  972. }
  973. if (ip_mask->tos)
  974. return -EINVAL;
  975. break;
  976. case TCP_V6_FLOW:
  977. case UDP_V6_FLOW:
  978. spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
  979. EFX_FILTER_MATCH_IP_PROTO);
  980. spec.ether_type = htons(ETH_P_IPV6);
  981. spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ?
  982. IPPROTO_TCP : IPPROTO_UDP);
  983. if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
  984. if (!ip6_mask_is_full(ip6_mask->ip6dst))
  985. return -EINVAL;
  986. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  987. memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
  988. }
  989. if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
  990. if (!ip6_mask_is_full(ip6_mask->ip6src))
  991. return -EINVAL;
  992. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  993. memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
  994. }
  995. if (ip6_mask->pdst) {
  996. if (ip6_mask->pdst != PORT_FULL_MASK)
  997. return -EINVAL;
  998. spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
  999. spec.loc_port = ip6_entry->pdst;
  1000. }
  1001. if (ip6_mask->psrc) {
  1002. if (ip6_mask->psrc != PORT_FULL_MASK)
  1003. return -EINVAL;
  1004. spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
  1005. spec.rem_port = ip6_entry->psrc;
  1006. }
  1007. if (ip6_mask->tclass)
  1008. return -EINVAL;
  1009. break;
  1010. case IPV4_USER_FLOW:
  1011. if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
  1012. uip_entry->ip_ver != ETH_RX_NFC_IP4)
  1013. return -EINVAL;
  1014. spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
  1015. spec.ether_type = htons(ETH_P_IP);
  1016. if (uip_mask->ip4dst) {
  1017. if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
  1018. return -EINVAL;
  1019. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  1020. spec.loc_host[0] = uip_entry->ip4dst;
  1021. }
  1022. if (uip_mask->ip4src) {
  1023. if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
  1024. return -EINVAL;
  1025. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  1026. spec.rem_host[0] = uip_entry->ip4src;
  1027. }
  1028. if (uip_mask->proto) {
  1029. if (uip_mask->proto != IP_PROTO_FULL_MASK)
  1030. return -EINVAL;
  1031. spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
  1032. spec.ip_proto = uip_entry->proto;
  1033. }
  1034. break;
  1035. case IPV6_USER_FLOW:
  1036. if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
  1037. return -EINVAL;
  1038. spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
  1039. spec.ether_type = htons(ETH_P_IPV6);
  1040. if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
  1041. if (!ip6_mask_is_full(uip6_mask->ip6dst))
  1042. return -EINVAL;
  1043. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  1044. memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
  1045. }
  1046. if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
  1047. if (!ip6_mask_is_full(uip6_mask->ip6src))
  1048. return -EINVAL;
  1049. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  1050. memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
  1051. }
  1052. if (uip6_mask->l4_proto) {
  1053. if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
  1054. return -EINVAL;
  1055. spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
  1056. spec.ip_proto = uip6_entry->l4_proto;
  1057. }
  1058. break;
  1059. case ETHER_FLOW:
  1060. if (!is_zero_ether_addr(mac_mask->h_dest)) {
  1061. if (ether_addr_equal(mac_mask->h_dest,
  1062. mac_addr_ig_mask))
  1063. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
  1064. else if (is_broadcast_ether_addr(mac_mask->h_dest))
  1065. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
  1066. else
  1067. return -EINVAL;
  1068. ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
  1069. }
  1070. if (!is_zero_ether_addr(mac_mask->h_source)) {
  1071. if (!is_broadcast_ether_addr(mac_mask->h_source))
  1072. return -EINVAL;
  1073. spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
  1074. ether_addr_copy(spec.rem_mac, mac_entry->h_source);
  1075. }
  1076. if (mac_mask->h_proto) {
  1077. if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
  1078. return -EINVAL;
  1079. spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
  1080. spec.ether_type = mac_entry->h_proto;
  1081. }
  1082. break;
  1083. default:
  1084. return -EINVAL;
  1085. }
  1086. if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
  1087. if (rule->m_ext.vlan_tci != htons(0xfff))
  1088. return -EINVAL;
  1089. spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
  1090. spec.outer_vid = rule->h_ext.vlan_tci;
  1091. }
  1092. rc = efx_filter_insert_filter(efx, &spec, true);
  1093. if (rc < 0)
  1094. return rc;
  1095. rule->location = rc;
  1096. return 0;
  1097. }
  1098. static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
  1099. struct ethtool_rxnfc *info)
  1100. {
  1101. struct efx_nic *efx = netdev_priv(net_dev);
  1102. if (efx_filter_get_rx_id_limit(efx) == 0)
  1103. return -EOPNOTSUPP;
  1104. switch (info->cmd) {
  1105. case ETHTOOL_SRXCLSRLINS:
  1106. return efx_ethtool_set_class_rule(efx, &info->fs);
  1107. case ETHTOOL_SRXCLSRLDEL:
  1108. return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
  1109. info->fs.location);
  1110. default:
  1111. return -EOPNOTSUPP;
  1112. }
  1113. }
  1114. static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
  1115. {
  1116. struct efx_nic *efx = netdev_priv(net_dev);
  1117. return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
  1118. efx->n_rx_channels == 1) ?
  1119. 0 : ARRAY_SIZE(efx->rx_indir_table));
  1120. }
  1121. static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
  1122. u8 *hfunc)
  1123. {
  1124. struct efx_nic *efx = netdev_priv(net_dev);
  1125. if (hfunc)
  1126. *hfunc = ETH_RSS_HASH_TOP;
  1127. if (indir)
  1128. memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
  1129. return 0;
  1130. }
  1131. static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
  1132. const u8 *key, const u8 hfunc)
  1133. {
  1134. struct efx_nic *efx = netdev_priv(net_dev);
  1135. /* We do not allow change in unsupported parameters */
  1136. if (key ||
  1137. (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
  1138. return -EOPNOTSUPP;
  1139. if (!indir)
  1140. return 0;
  1141. return efx->type->rx_push_rss_config(efx, true, indir);
  1142. }
  1143. static int efx_ethtool_get_ts_info(struct net_device *net_dev,
  1144. struct ethtool_ts_info *ts_info)
  1145. {
  1146. struct efx_nic *efx = netdev_priv(net_dev);
  1147. /* Software capabilities */
  1148. ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
  1149. SOF_TIMESTAMPING_SOFTWARE);
  1150. ts_info->phc_index = -1;
  1151. efx_ptp_get_ts_info(efx, ts_info);
  1152. return 0;
  1153. }
  1154. static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
  1155. struct ethtool_eeprom *ee,
  1156. u8 *data)
  1157. {
  1158. struct efx_nic *efx = netdev_priv(net_dev);
  1159. int ret;
  1160. if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
  1161. return -EOPNOTSUPP;
  1162. mutex_lock(&efx->mac_lock);
  1163. ret = efx->phy_op->get_module_eeprom(efx, ee, data);
  1164. mutex_unlock(&efx->mac_lock);
  1165. return ret;
  1166. }
  1167. static int efx_ethtool_get_module_info(struct net_device *net_dev,
  1168. struct ethtool_modinfo *modinfo)
  1169. {
  1170. struct efx_nic *efx = netdev_priv(net_dev);
  1171. int ret;
  1172. if (!efx->phy_op || !efx->phy_op->get_module_info)
  1173. return -EOPNOTSUPP;
  1174. mutex_lock(&efx->mac_lock);
  1175. ret = efx->phy_op->get_module_info(efx, modinfo);
  1176. mutex_unlock(&efx->mac_lock);
  1177. return ret;
  1178. }
  1179. const struct ethtool_ops efx_ethtool_ops = {
  1180. .get_settings = efx_ethtool_get_settings,
  1181. .set_settings = efx_ethtool_set_settings,
  1182. .get_drvinfo = efx_ethtool_get_drvinfo,
  1183. .get_regs_len = efx_ethtool_get_regs_len,
  1184. .get_regs = efx_ethtool_get_regs,
  1185. .get_msglevel = efx_ethtool_get_msglevel,
  1186. .set_msglevel = efx_ethtool_set_msglevel,
  1187. .nway_reset = efx_ethtool_nway_reset,
  1188. .get_link = ethtool_op_get_link,
  1189. .get_coalesce = efx_ethtool_get_coalesce,
  1190. .set_coalesce = efx_ethtool_set_coalesce,
  1191. .get_ringparam = efx_ethtool_get_ringparam,
  1192. .set_ringparam = efx_ethtool_set_ringparam,
  1193. .get_pauseparam = efx_ethtool_get_pauseparam,
  1194. .set_pauseparam = efx_ethtool_set_pauseparam,
  1195. .get_sset_count = efx_ethtool_get_sset_count,
  1196. .self_test = efx_ethtool_self_test,
  1197. .get_strings = efx_ethtool_get_strings,
  1198. .set_phys_id = efx_ethtool_phys_id,
  1199. .get_ethtool_stats = efx_ethtool_get_stats,
  1200. .get_wol = efx_ethtool_get_wol,
  1201. .set_wol = efx_ethtool_set_wol,
  1202. .reset = efx_ethtool_reset,
  1203. .get_rxnfc = efx_ethtool_get_rxnfc,
  1204. .set_rxnfc = efx_ethtool_set_rxnfc,
  1205. .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
  1206. .get_rxfh = efx_ethtool_get_rxfh,
  1207. .set_rxfh = efx_ethtool_set_rxfh,
  1208. .get_ts_info = efx_ethtool_get_ts_info,
  1209. .get_module_info = efx_ethtool_get_module_info,
  1210. .get_module_eeprom = efx_ethtool_get_module_eeprom,
  1211. };