core.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/core.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/device.h>
  39. #include <linux/export.h>
  40. #include <linux/err.h>
  41. #include <linux/if_link.h>
  42. #include <linux/debugfs.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/u64_stats_sync.h>
  45. #include <linux/netdevice.h>
  46. #include <linux/completion.h>
  47. #include <linux/skbuff.h>
  48. #include <linux/etherdevice.h>
  49. #include <linux/types.h>
  50. #include <linux/string.h>
  51. #include <linux/gfp.h>
  52. #include <linux/random.h>
  53. #include <linux/jiffies.h>
  54. #include <linux/mutex.h>
  55. #include <linux/rcupdate.h>
  56. #include <linux/slab.h>
  57. #include <linux/workqueue.h>
  58. #include <asm/byteorder.h>
  59. #include <net/devlink.h>
  60. #include <trace/events/devlink.h>
  61. #include "core.h"
  62. #include "item.h"
  63. #include "cmd.h"
  64. #include "port.h"
  65. #include "trap.h"
  66. #include "emad.h"
  67. #include "reg.h"
  68. static LIST_HEAD(mlxsw_core_driver_list);
  69. static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
  70. static const char mlxsw_core_driver_name[] = "mlxsw_core";
  71. static struct dentry *mlxsw_core_dbg_root;
  72. static struct workqueue_struct *mlxsw_wq;
  73. struct mlxsw_core_pcpu_stats {
  74. u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
  75. u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
  76. u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
  77. u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
  78. struct u64_stats_sync syncp;
  79. u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
  80. u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
  81. u32 trap_rx_invalid;
  82. u32 port_rx_invalid;
  83. };
  84. struct mlxsw_core {
  85. struct mlxsw_driver *driver;
  86. const struct mlxsw_bus *bus;
  87. void *bus_priv;
  88. const struct mlxsw_bus_info *bus_info;
  89. struct list_head rx_listener_list;
  90. struct list_head event_listener_list;
  91. struct {
  92. atomic64_t tid;
  93. struct list_head trans_list;
  94. spinlock_t trans_list_lock; /* protects trans_list writes */
  95. bool use_emad;
  96. } emad;
  97. struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
  98. struct dentry *dbg_dir;
  99. struct {
  100. struct debugfs_blob_wrapper vsd_blob;
  101. struct debugfs_blob_wrapper psid_blob;
  102. } dbg;
  103. struct {
  104. u8 *mapping; /* lag_id+port_index to local_port mapping */
  105. } lag;
  106. struct mlxsw_resources resources;
  107. struct mlxsw_hwmon *hwmon;
  108. unsigned long driver_priv[0];
  109. /* driver_priv has to be always the last item */
  110. };
  111. void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
  112. {
  113. return mlxsw_core->driver_priv;
  114. }
  115. EXPORT_SYMBOL(mlxsw_core_driver_priv);
  116. struct mlxsw_rx_listener_item {
  117. struct list_head list;
  118. struct mlxsw_rx_listener rxl;
  119. void *priv;
  120. };
  121. struct mlxsw_event_listener_item {
  122. struct list_head list;
  123. struct mlxsw_event_listener el;
  124. void *priv;
  125. };
  126. /******************
  127. * EMAD processing
  128. ******************/
  129. /* emad_eth_hdr_dmac
  130. * Destination MAC in EMAD's Ethernet header.
  131. * Must be set to 01:02:c9:00:00:01
  132. */
  133. MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
  134. /* emad_eth_hdr_smac
  135. * Source MAC in EMAD's Ethernet header.
  136. * Must be set to 00:02:c9:01:02:03
  137. */
  138. MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
  139. /* emad_eth_hdr_ethertype
  140. * Ethertype in EMAD's Ethernet header.
  141. * Must be set to 0x8932
  142. */
  143. MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
  144. /* emad_eth_hdr_mlx_proto
  145. * Mellanox protocol.
  146. * Must be set to 0x0.
  147. */
  148. MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
  149. /* emad_eth_hdr_ver
  150. * Mellanox protocol version.
  151. * Must be set to 0x0.
  152. */
  153. MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
  154. /* emad_op_tlv_type
  155. * Type of the TLV.
  156. * Must be set to 0x1 (operation TLV).
  157. */
  158. MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
  159. /* emad_op_tlv_len
  160. * Length of the operation TLV in u32.
  161. * Must be set to 0x4.
  162. */
  163. MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
  164. /* emad_op_tlv_dr
  165. * Direct route bit. Setting to 1 indicates the EMAD is a direct route
  166. * EMAD. DR TLV must follow.
  167. *
  168. * Note: Currently not supported and must not be set.
  169. */
  170. MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
  171. /* emad_op_tlv_status
  172. * Returned status in case of EMAD response. Must be set to 0 in case
  173. * of EMAD request.
  174. * 0x0 - success
  175. * 0x1 - device is busy. Requester should retry
  176. * 0x2 - Mellanox protocol version not supported
  177. * 0x3 - unknown TLV
  178. * 0x4 - register not supported
  179. * 0x5 - operation class not supported
  180. * 0x6 - EMAD method not supported
  181. * 0x7 - bad parameter (e.g. port out of range)
  182. * 0x8 - resource not available
  183. * 0x9 - message receipt acknowledgment. Requester should retry
  184. * 0x70 - internal error
  185. */
  186. MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
  187. /* emad_op_tlv_register_id
  188. * Register ID of register within register TLV.
  189. */
  190. MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
  191. /* emad_op_tlv_r
  192. * Response bit. Setting to 1 indicates Response, otherwise request.
  193. */
  194. MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
  195. /* emad_op_tlv_method
  196. * EMAD method type.
  197. * 0x1 - query
  198. * 0x2 - write
  199. * 0x3 - send (currently not supported)
  200. * 0x4 - event
  201. */
  202. MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
  203. /* emad_op_tlv_class
  204. * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
  205. */
  206. MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
  207. /* emad_op_tlv_tid
  208. * EMAD transaction ID. Used for pairing request and response EMADs.
  209. */
  210. MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
  211. /* emad_reg_tlv_type
  212. * Type of the TLV.
  213. * Must be set to 0x3 (register TLV).
  214. */
  215. MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
  216. /* emad_reg_tlv_len
  217. * Length of the operation TLV in u32.
  218. */
  219. MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
  220. /* emad_end_tlv_type
  221. * Type of the TLV.
  222. * Must be set to 0x0 (end TLV).
  223. */
  224. MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
  225. /* emad_end_tlv_len
  226. * Length of the end TLV in u32.
  227. * Must be set to 1.
  228. */
  229. MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
  230. enum mlxsw_core_reg_access_type {
  231. MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
  232. MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
  233. };
  234. static inline const char *
  235. mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
  236. {
  237. switch (type) {
  238. case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
  239. return "query";
  240. case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
  241. return "write";
  242. }
  243. BUG();
  244. }
  245. static void mlxsw_emad_pack_end_tlv(char *end_tlv)
  246. {
  247. mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
  248. mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
  249. }
  250. static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
  251. const struct mlxsw_reg_info *reg,
  252. char *payload)
  253. {
  254. mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
  255. mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
  256. memcpy(reg_tlv + sizeof(u32), payload, reg->len);
  257. }
  258. static void mlxsw_emad_pack_op_tlv(char *op_tlv,
  259. const struct mlxsw_reg_info *reg,
  260. enum mlxsw_core_reg_access_type type,
  261. u64 tid)
  262. {
  263. mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
  264. mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
  265. mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
  266. mlxsw_emad_op_tlv_status_set(op_tlv, 0);
  267. mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
  268. mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
  269. if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
  270. mlxsw_emad_op_tlv_method_set(op_tlv,
  271. MLXSW_EMAD_OP_TLV_METHOD_QUERY);
  272. else
  273. mlxsw_emad_op_tlv_method_set(op_tlv,
  274. MLXSW_EMAD_OP_TLV_METHOD_WRITE);
  275. mlxsw_emad_op_tlv_class_set(op_tlv,
  276. MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
  277. mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
  278. }
  279. static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
  280. {
  281. char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
  282. mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
  283. mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
  284. mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
  285. mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
  286. mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
  287. skb_reset_mac_header(skb);
  288. return 0;
  289. }
  290. static void mlxsw_emad_construct(struct sk_buff *skb,
  291. const struct mlxsw_reg_info *reg,
  292. char *payload,
  293. enum mlxsw_core_reg_access_type type,
  294. u64 tid)
  295. {
  296. char *buf;
  297. buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
  298. mlxsw_emad_pack_end_tlv(buf);
  299. buf = skb_push(skb, reg->len + sizeof(u32));
  300. mlxsw_emad_pack_reg_tlv(buf, reg, payload);
  301. buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
  302. mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
  303. mlxsw_emad_construct_eth_hdr(skb);
  304. }
  305. static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
  306. {
  307. return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
  308. }
  309. static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
  310. {
  311. return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
  312. MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
  313. }
  314. static char *mlxsw_emad_reg_payload(const char *op_tlv)
  315. {
  316. return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
  317. }
  318. static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
  319. {
  320. char *op_tlv;
  321. op_tlv = mlxsw_emad_op_tlv(skb);
  322. return mlxsw_emad_op_tlv_tid_get(op_tlv);
  323. }
  324. static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
  325. {
  326. char *op_tlv;
  327. op_tlv = mlxsw_emad_op_tlv(skb);
  328. return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
  329. }
  330. static int mlxsw_emad_process_status(char *op_tlv,
  331. enum mlxsw_emad_op_tlv_status *p_status)
  332. {
  333. *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
  334. switch (*p_status) {
  335. case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
  336. return 0;
  337. case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
  338. case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
  339. return -EAGAIN;
  340. case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
  341. case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
  342. case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
  343. case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
  344. case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
  345. case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
  346. case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
  347. case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
  348. default:
  349. return -EIO;
  350. }
  351. }
  352. static int
  353. mlxsw_emad_process_status_skb(struct sk_buff *skb,
  354. enum mlxsw_emad_op_tlv_status *p_status)
  355. {
  356. return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
  357. }
  358. struct mlxsw_reg_trans {
  359. struct list_head list;
  360. struct list_head bulk_list;
  361. struct mlxsw_core *core;
  362. struct sk_buff *tx_skb;
  363. struct mlxsw_tx_info tx_info;
  364. struct delayed_work timeout_dw;
  365. unsigned int retries;
  366. u64 tid;
  367. struct completion completion;
  368. atomic_t active;
  369. mlxsw_reg_trans_cb_t *cb;
  370. unsigned long cb_priv;
  371. const struct mlxsw_reg_info *reg;
  372. enum mlxsw_core_reg_access_type type;
  373. int err;
  374. enum mlxsw_emad_op_tlv_status emad_status;
  375. struct rcu_head rcu;
  376. };
  377. #define MLXSW_EMAD_TIMEOUT_MS 200
  378. static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
  379. {
  380. unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
  381. mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
  382. }
  383. static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
  384. struct mlxsw_reg_trans *trans)
  385. {
  386. struct sk_buff *skb;
  387. int err;
  388. skb = skb_copy(trans->tx_skb, GFP_KERNEL);
  389. if (!skb)
  390. return -ENOMEM;
  391. trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
  392. skb->data + mlxsw_core->driver->txhdr_len,
  393. skb->len - mlxsw_core->driver->txhdr_len);
  394. atomic_set(&trans->active, 1);
  395. err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
  396. if (err) {
  397. dev_kfree_skb(skb);
  398. return err;
  399. }
  400. mlxsw_emad_trans_timeout_schedule(trans);
  401. return 0;
  402. }
  403. static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
  404. {
  405. struct mlxsw_core *mlxsw_core = trans->core;
  406. dev_kfree_skb(trans->tx_skb);
  407. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  408. list_del_rcu(&trans->list);
  409. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  410. trans->err = err;
  411. complete(&trans->completion);
  412. }
  413. static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
  414. struct mlxsw_reg_trans *trans)
  415. {
  416. int err;
  417. if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
  418. trans->retries++;
  419. err = mlxsw_emad_transmit(trans->core, trans);
  420. if (err == 0)
  421. return;
  422. } else {
  423. err = -EIO;
  424. }
  425. mlxsw_emad_trans_finish(trans, err);
  426. }
  427. static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
  428. {
  429. struct mlxsw_reg_trans *trans = container_of(work,
  430. struct mlxsw_reg_trans,
  431. timeout_dw.work);
  432. if (!atomic_dec_and_test(&trans->active))
  433. return;
  434. mlxsw_emad_transmit_retry(trans->core, trans);
  435. }
  436. static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
  437. struct mlxsw_reg_trans *trans,
  438. struct sk_buff *skb)
  439. {
  440. int err;
  441. if (!atomic_dec_and_test(&trans->active))
  442. return;
  443. err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
  444. if (err == -EAGAIN) {
  445. mlxsw_emad_transmit_retry(mlxsw_core, trans);
  446. } else {
  447. if (err == 0) {
  448. char *op_tlv = mlxsw_emad_op_tlv(skb);
  449. if (trans->cb)
  450. trans->cb(mlxsw_core,
  451. mlxsw_emad_reg_payload(op_tlv),
  452. trans->reg->len, trans->cb_priv);
  453. }
  454. mlxsw_emad_trans_finish(trans, err);
  455. }
  456. }
  457. /* called with rcu read lock held */
  458. static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
  459. void *priv)
  460. {
  461. struct mlxsw_core *mlxsw_core = priv;
  462. struct mlxsw_reg_trans *trans;
  463. trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
  464. skb->data, skb->len);
  465. if (!mlxsw_emad_is_resp(skb))
  466. goto free_skb;
  467. list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
  468. if (mlxsw_emad_get_tid(skb) == trans->tid) {
  469. mlxsw_emad_process_response(mlxsw_core, trans, skb);
  470. break;
  471. }
  472. }
  473. free_skb:
  474. dev_kfree_skb(skb);
  475. }
  476. static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
  477. .func = mlxsw_emad_rx_listener_func,
  478. .local_port = MLXSW_PORT_DONT_CARE,
  479. .trap_id = MLXSW_TRAP_ID_ETHEMAD,
  480. };
  481. static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
  482. {
  483. char htgt_pl[MLXSW_REG_HTGT_LEN];
  484. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  485. int err;
  486. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
  487. err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
  488. if (err)
  489. return err;
  490. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
  491. MLXSW_TRAP_ID_ETHEMAD);
  492. return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  493. }
  494. static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
  495. {
  496. u64 tid;
  497. int err;
  498. /* Set the upper 32 bits of the transaction ID field to a random
  499. * number. This allows us to discard EMADs addressed to other
  500. * devices.
  501. */
  502. get_random_bytes(&tid, 4);
  503. tid <<= 32;
  504. atomic64_set(&mlxsw_core->emad.tid, tid);
  505. INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
  506. spin_lock_init(&mlxsw_core->emad.trans_list_lock);
  507. err = mlxsw_core_rx_listener_register(mlxsw_core,
  508. &mlxsw_emad_rx_listener,
  509. mlxsw_core);
  510. if (err)
  511. return err;
  512. err = mlxsw_emad_traps_set(mlxsw_core);
  513. if (err)
  514. goto err_emad_trap_set;
  515. mlxsw_core->emad.use_emad = true;
  516. return 0;
  517. err_emad_trap_set:
  518. mlxsw_core_rx_listener_unregister(mlxsw_core,
  519. &mlxsw_emad_rx_listener,
  520. mlxsw_core);
  521. return err;
  522. }
  523. static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
  524. {
  525. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  526. mlxsw_core->emad.use_emad = false;
  527. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
  528. MLXSW_TRAP_ID_ETHEMAD);
  529. mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  530. mlxsw_core_rx_listener_unregister(mlxsw_core,
  531. &mlxsw_emad_rx_listener,
  532. mlxsw_core);
  533. }
  534. static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
  535. u16 reg_len)
  536. {
  537. struct sk_buff *skb;
  538. u16 emad_len;
  539. emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
  540. (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
  541. sizeof(u32) + mlxsw_core->driver->txhdr_len);
  542. if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
  543. return NULL;
  544. skb = netdev_alloc_skb(NULL, emad_len);
  545. if (!skb)
  546. return NULL;
  547. memset(skb->data, 0, emad_len);
  548. skb_reserve(skb, emad_len);
  549. return skb;
  550. }
  551. static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
  552. const struct mlxsw_reg_info *reg,
  553. char *payload,
  554. enum mlxsw_core_reg_access_type type,
  555. struct mlxsw_reg_trans *trans,
  556. struct list_head *bulk_list,
  557. mlxsw_reg_trans_cb_t *cb,
  558. unsigned long cb_priv, u64 tid)
  559. {
  560. struct sk_buff *skb;
  561. int err;
  562. dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
  563. trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
  564. mlxsw_core_reg_access_type_str(type));
  565. skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
  566. if (!skb)
  567. return -ENOMEM;
  568. list_add_tail(&trans->bulk_list, bulk_list);
  569. trans->core = mlxsw_core;
  570. trans->tx_skb = skb;
  571. trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
  572. trans->tx_info.is_emad = true;
  573. INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
  574. trans->tid = tid;
  575. init_completion(&trans->completion);
  576. trans->cb = cb;
  577. trans->cb_priv = cb_priv;
  578. trans->reg = reg;
  579. trans->type = type;
  580. mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
  581. mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
  582. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  583. list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
  584. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  585. err = mlxsw_emad_transmit(mlxsw_core, trans);
  586. if (err)
  587. goto err_out;
  588. return 0;
  589. err_out:
  590. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  591. list_del_rcu(&trans->list);
  592. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  593. list_del(&trans->bulk_list);
  594. dev_kfree_skb(trans->tx_skb);
  595. return err;
  596. }
  597. /*****************
  598. * Core functions
  599. *****************/
  600. static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
  601. {
  602. struct mlxsw_core *mlxsw_core = file->private;
  603. struct mlxsw_core_pcpu_stats *p;
  604. u64 rx_packets, rx_bytes;
  605. u64 tmp_rx_packets, tmp_rx_bytes;
  606. u32 rx_dropped, rx_invalid;
  607. unsigned int start;
  608. int i;
  609. int j;
  610. static const char hdr[] =
  611. " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
  612. seq_printf(file, hdr);
  613. for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
  614. rx_packets = 0;
  615. rx_bytes = 0;
  616. rx_dropped = 0;
  617. for_each_possible_cpu(j) {
  618. p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
  619. do {
  620. start = u64_stats_fetch_begin(&p->syncp);
  621. tmp_rx_packets = p->trap_rx_packets[i];
  622. tmp_rx_bytes = p->trap_rx_bytes[i];
  623. } while (u64_stats_fetch_retry(&p->syncp, start));
  624. rx_packets += tmp_rx_packets;
  625. rx_bytes += tmp_rx_bytes;
  626. rx_dropped += p->trap_rx_dropped[i];
  627. }
  628. seq_printf(file, "trap %3d %12llu %12llu %10u\n",
  629. i, rx_packets, rx_bytes, rx_dropped);
  630. }
  631. rx_invalid = 0;
  632. for_each_possible_cpu(j) {
  633. p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
  634. rx_invalid += p->trap_rx_invalid;
  635. }
  636. seq_printf(file, "trap INV %10u\n",
  637. rx_invalid);
  638. for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
  639. rx_packets = 0;
  640. rx_bytes = 0;
  641. rx_dropped = 0;
  642. for_each_possible_cpu(j) {
  643. p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
  644. do {
  645. start = u64_stats_fetch_begin(&p->syncp);
  646. tmp_rx_packets = p->port_rx_packets[i];
  647. tmp_rx_bytes = p->port_rx_bytes[i];
  648. } while (u64_stats_fetch_retry(&p->syncp, start));
  649. rx_packets += tmp_rx_packets;
  650. rx_bytes += tmp_rx_bytes;
  651. rx_dropped += p->port_rx_dropped[i];
  652. }
  653. seq_printf(file, "port %3d %12llu %12llu %10u\n",
  654. i, rx_packets, rx_bytes, rx_dropped);
  655. }
  656. rx_invalid = 0;
  657. for_each_possible_cpu(j) {
  658. p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
  659. rx_invalid += p->port_rx_invalid;
  660. }
  661. seq_printf(file, "port INV %10u\n",
  662. rx_invalid);
  663. return 0;
  664. }
  665. static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
  666. {
  667. struct mlxsw_core *mlxsw_core = inode->i_private;
  668. return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
  669. }
  670. static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
  671. .owner = THIS_MODULE,
  672. .open = mlxsw_core_rx_stats_dbg_open,
  673. .release = single_release,
  674. .read = seq_read,
  675. .llseek = seq_lseek
  676. };
  677. int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
  678. {
  679. spin_lock(&mlxsw_core_driver_list_lock);
  680. list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
  681. spin_unlock(&mlxsw_core_driver_list_lock);
  682. return 0;
  683. }
  684. EXPORT_SYMBOL(mlxsw_core_driver_register);
  685. void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
  686. {
  687. spin_lock(&mlxsw_core_driver_list_lock);
  688. list_del(&mlxsw_driver->list);
  689. spin_unlock(&mlxsw_core_driver_list_lock);
  690. }
  691. EXPORT_SYMBOL(mlxsw_core_driver_unregister);
  692. static struct mlxsw_driver *__driver_find(const char *kind)
  693. {
  694. struct mlxsw_driver *mlxsw_driver;
  695. list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
  696. if (strcmp(mlxsw_driver->kind, kind) == 0)
  697. return mlxsw_driver;
  698. }
  699. return NULL;
  700. }
  701. static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
  702. {
  703. struct mlxsw_driver *mlxsw_driver;
  704. spin_lock(&mlxsw_core_driver_list_lock);
  705. mlxsw_driver = __driver_find(kind);
  706. if (!mlxsw_driver) {
  707. spin_unlock(&mlxsw_core_driver_list_lock);
  708. request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
  709. spin_lock(&mlxsw_core_driver_list_lock);
  710. mlxsw_driver = __driver_find(kind);
  711. }
  712. if (mlxsw_driver) {
  713. if (!try_module_get(mlxsw_driver->owner))
  714. mlxsw_driver = NULL;
  715. }
  716. spin_unlock(&mlxsw_core_driver_list_lock);
  717. return mlxsw_driver;
  718. }
  719. static void mlxsw_core_driver_put(const char *kind)
  720. {
  721. struct mlxsw_driver *mlxsw_driver;
  722. spin_lock(&mlxsw_core_driver_list_lock);
  723. mlxsw_driver = __driver_find(kind);
  724. spin_unlock(&mlxsw_core_driver_list_lock);
  725. if (!mlxsw_driver)
  726. return;
  727. module_put(mlxsw_driver->owner);
  728. }
  729. static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
  730. {
  731. const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
  732. mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
  733. mlxsw_core_dbg_root);
  734. if (!mlxsw_core->dbg_dir)
  735. return -ENOMEM;
  736. debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
  737. mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
  738. mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
  739. mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
  740. debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
  741. &mlxsw_core->dbg.vsd_blob);
  742. mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
  743. mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
  744. debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
  745. &mlxsw_core->dbg.psid_blob);
  746. return 0;
  747. }
  748. static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
  749. {
  750. debugfs_remove_recursive(mlxsw_core->dbg_dir);
  751. }
  752. static int mlxsw_devlink_port_split(struct devlink *devlink,
  753. unsigned int port_index,
  754. unsigned int count)
  755. {
  756. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  757. if (port_index >= MLXSW_PORT_MAX_PORTS)
  758. return -EINVAL;
  759. if (!mlxsw_core->driver->port_split)
  760. return -EOPNOTSUPP;
  761. return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
  762. }
  763. static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
  764. unsigned int port_index)
  765. {
  766. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  767. if (port_index >= MLXSW_PORT_MAX_PORTS)
  768. return -EINVAL;
  769. if (!mlxsw_core->driver->port_unsplit)
  770. return -EOPNOTSUPP;
  771. return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
  772. }
  773. static int
  774. mlxsw_devlink_sb_pool_get(struct devlink *devlink,
  775. unsigned int sb_index, u16 pool_index,
  776. struct devlink_sb_pool_info *pool_info)
  777. {
  778. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  779. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  780. if (!mlxsw_driver->sb_pool_get)
  781. return -EOPNOTSUPP;
  782. return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
  783. pool_index, pool_info);
  784. }
  785. static int
  786. mlxsw_devlink_sb_pool_set(struct devlink *devlink,
  787. unsigned int sb_index, u16 pool_index, u32 size,
  788. enum devlink_sb_threshold_type threshold_type)
  789. {
  790. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  791. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  792. if (!mlxsw_driver->sb_pool_set)
  793. return -EOPNOTSUPP;
  794. return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
  795. pool_index, size, threshold_type);
  796. }
  797. static void *__dl_port(struct devlink_port *devlink_port)
  798. {
  799. return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
  800. }
  801. static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
  802. unsigned int sb_index, u16 pool_index,
  803. u32 *p_threshold)
  804. {
  805. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  806. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  807. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  808. if (!mlxsw_driver->sb_port_pool_get)
  809. return -EOPNOTSUPP;
  810. return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
  811. pool_index, p_threshold);
  812. }
  813. static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
  814. unsigned int sb_index, u16 pool_index,
  815. u32 threshold)
  816. {
  817. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  818. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  819. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  820. if (!mlxsw_driver->sb_port_pool_set)
  821. return -EOPNOTSUPP;
  822. return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
  823. pool_index, threshold);
  824. }
  825. static int
  826. mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
  827. unsigned int sb_index, u16 tc_index,
  828. enum devlink_sb_pool_type pool_type,
  829. u16 *p_pool_index, u32 *p_threshold)
  830. {
  831. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  832. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  833. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  834. if (!mlxsw_driver->sb_tc_pool_bind_get)
  835. return -EOPNOTSUPP;
  836. return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
  837. tc_index, pool_type,
  838. p_pool_index, p_threshold);
  839. }
  840. static int
  841. mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
  842. unsigned int sb_index, u16 tc_index,
  843. enum devlink_sb_pool_type pool_type,
  844. u16 pool_index, u32 threshold)
  845. {
  846. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  847. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  848. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  849. if (!mlxsw_driver->sb_tc_pool_bind_set)
  850. return -EOPNOTSUPP;
  851. return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
  852. tc_index, pool_type,
  853. pool_index, threshold);
  854. }
  855. static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
  856. unsigned int sb_index)
  857. {
  858. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  859. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  860. if (!mlxsw_driver->sb_occ_snapshot)
  861. return -EOPNOTSUPP;
  862. return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
  863. }
  864. static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
  865. unsigned int sb_index)
  866. {
  867. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  868. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  869. if (!mlxsw_driver->sb_occ_max_clear)
  870. return -EOPNOTSUPP;
  871. return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
  872. }
  873. static int
  874. mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
  875. unsigned int sb_index, u16 pool_index,
  876. u32 *p_cur, u32 *p_max)
  877. {
  878. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  879. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  880. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  881. if (!mlxsw_driver->sb_occ_port_pool_get)
  882. return -EOPNOTSUPP;
  883. return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
  884. pool_index, p_cur, p_max);
  885. }
  886. static int
  887. mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
  888. unsigned int sb_index, u16 tc_index,
  889. enum devlink_sb_pool_type pool_type,
  890. u32 *p_cur, u32 *p_max)
  891. {
  892. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  893. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  894. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  895. if (!mlxsw_driver->sb_occ_tc_port_bind_get)
  896. return -EOPNOTSUPP;
  897. return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
  898. sb_index, tc_index,
  899. pool_type, p_cur, p_max);
  900. }
  901. static const struct devlink_ops mlxsw_devlink_ops = {
  902. .port_split = mlxsw_devlink_port_split,
  903. .port_unsplit = mlxsw_devlink_port_unsplit,
  904. .sb_pool_get = mlxsw_devlink_sb_pool_get,
  905. .sb_pool_set = mlxsw_devlink_sb_pool_set,
  906. .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
  907. .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
  908. .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
  909. .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
  910. .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
  911. .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
  912. .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
  913. .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
  914. };
  915. int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
  916. const struct mlxsw_bus *mlxsw_bus,
  917. void *bus_priv)
  918. {
  919. const char *device_kind = mlxsw_bus_info->device_kind;
  920. struct mlxsw_core *mlxsw_core;
  921. struct mlxsw_driver *mlxsw_driver;
  922. struct devlink *devlink;
  923. size_t alloc_size;
  924. int err;
  925. mlxsw_driver = mlxsw_core_driver_get(device_kind);
  926. if (!mlxsw_driver)
  927. return -EINVAL;
  928. alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
  929. devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
  930. if (!devlink) {
  931. err = -ENOMEM;
  932. goto err_devlink_alloc;
  933. }
  934. mlxsw_core = devlink_priv(devlink);
  935. INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
  936. INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
  937. mlxsw_core->driver = mlxsw_driver;
  938. mlxsw_core->bus = mlxsw_bus;
  939. mlxsw_core->bus_priv = bus_priv;
  940. mlxsw_core->bus_info = mlxsw_bus_info;
  941. mlxsw_core->pcpu_stats =
  942. netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
  943. if (!mlxsw_core->pcpu_stats) {
  944. err = -ENOMEM;
  945. goto err_alloc_stats;
  946. }
  947. err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
  948. &mlxsw_core->resources);
  949. if (err)
  950. goto err_bus_init;
  951. if (mlxsw_core->resources.max_lag_valid &&
  952. mlxsw_core->resources.max_ports_in_lag_valid) {
  953. alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
  954. mlxsw_core->resources.max_ports_in_lag;
  955. mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
  956. if (!mlxsw_core->lag.mapping) {
  957. err = -ENOMEM;
  958. goto err_alloc_lag_mapping;
  959. }
  960. }
  961. err = mlxsw_emad_init(mlxsw_core);
  962. if (err)
  963. goto err_emad_init;
  964. err = devlink_register(devlink, mlxsw_bus_info->dev);
  965. if (err)
  966. goto err_devlink_register;
  967. err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
  968. if (err)
  969. goto err_hwmon_init;
  970. err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
  971. if (err)
  972. goto err_driver_init;
  973. err = mlxsw_core_debugfs_init(mlxsw_core);
  974. if (err)
  975. goto err_debugfs_init;
  976. return 0;
  977. err_debugfs_init:
  978. mlxsw_core->driver->fini(mlxsw_core);
  979. err_driver_init:
  980. err_hwmon_init:
  981. devlink_unregister(devlink);
  982. err_devlink_register:
  983. mlxsw_emad_fini(mlxsw_core);
  984. err_emad_init:
  985. kfree(mlxsw_core->lag.mapping);
  986. err_alloc_lag_mapping:
  987. mlxsw_bus->fini(bus_priv);
  988. err_bus_init:
  989. free_percpu(mlxsw_core->pcpu_stats);
  990. err_alloc_stats:
  991. devlink_free(devlink);
  992. err_devlink_alloc:
  993. mlxsw_core_driver_put(device_kind);
  994. return err;
  995. }
  996. EXPORT_SYMBOL(mlxsw_core_bus_device_register);
  997. void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
  998. {
  999. const char *device_kind = mlxsw_core->bus_info->device_kind;
  1000. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  1001. mlxsw_core_debugfs_fini(mlxsw_core);
  1002. mlxsw_core->driver->fini(mlxsw_core);
  1003. devlink_unregister(devlink);
  1004. mlxsw_emad_fini(mlxsw_core);
  1005. mlxsw_core->bus->fini(mlxsw_core->bus_priv);
  1006. kfree(mlxsw_core->lag.mapping);
  1007. free_percpu(mlxsw_core->pcpu_stats);
  1008. devlink_free(devlink);
  1009. mlxsw_core_driver_put(device_kind);
  1010. }
  1011. EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
  1012. bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
  1013. const struct mlxsw_tx_info *tx_info)
  1014. {
  1015. return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
  1016. tx_info);
  1017. }
  1018. EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
  1019. int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
  1020. const struct mlxsw_tx_info *tx_info)
  1021. {
  1022. return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
  1023. tx_info);
  1024. }
  1025. EXPORT_SYMBOL(mlxsw_core_skb_transmit);
  1026. static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
  1027. const struct mlxsw_rx_listener *rxl_b)
  1028. {
  1029. return (rxl_a->func == rxl_b->func &&
  1030. rxl_a->local_port == rxl_b->local_port &&
  1031. rxl_a->trap_id == rxl_b->trap_id);
  1032. }
  1033. static struct mlxsw_rx_listener_item *
  1034. __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
  1035. const struct mlxsw_rx_listener *rxl,
  1036. void *priv)
  1037. {
  1038. struct mlxsw_rx_listener_item *rxl_item;
  1039. list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
  1040. if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
  1041. rxl_item->priv == priv)
  1042. return rxl_item;
  1043. }
  1044. return NULL;
  1045. }
  1046. int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
  1047. const struct mlxsw_rx_listener *rxl,
  1048. void *priv)
  1049. {
  1050. struct mlxsw_rx_listener_item *rxl_item;
  1051. rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
  1052. if (rxl_item)
  1053. return -EEXIST;
  1054. rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
  1055. if (!rxl_item)
  1056. return -ENOMEM;
  1057. rxl_item->rxl = *rxl;
  1058. rxl_item->priv = priv;
  1059. list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
  1060. return 0;
  1061. }
  1062. EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
  1063. void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
  1064. const struct mlxsw_rx_listener *rxl,
  1065. void *priv)
  1066. {
  1067. struct mlxsw_rx_listener_item *rxl_item;
  1068. rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
  1069. if (!rxl_item)
  1070. return;
  1071. list_del_rcu(&rxl_item->list);
  1072. synchronize_rcu();
  1073. kfree(rxl_item);
  1074. }
  1075. EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
  1076. static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
  1077. void *priv)
  1078. {
  1079. struct mlxsw_event_listener_item *event_listener_item = priv;
  1080. struct mlxsw_reg_info reg;
  1081. char *payload;
  1082. char *op_tlv = mlxsw_emad_op_tlv(skb);
  1083. char *reg_tlv = mlxsw_emad_reg_tlv(skb);
  1084. reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
  1085. reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
  1086. payload = mlxsw_emad_reg_payload(op_tlv);
  1087. event_listener_item->el.func(&reg, payload, event_listener_item->priv);
  1088. dev_kfree_skb(skb);
  1089. }
  1090. static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
  1091. const struct mlxsw_event_listener *el_b)
  1092. {
  1093. return (el_a->func == el_b->func &&
  1094. el_a->trap_id == el_b->trap_id);
  1095. }
  1096. static struct mlxsw_event_listener_item *
  1097. __find_event_listener_item(struct mlxsw_core *mlxsw_core,
  1098. const struct mlxsw_event_listener *el,
  1099. void *priv)
  1100. {
  1101. struct mlxsw_event_listener_item *el_item;
  1102. list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
  1103. if (__is_event_listener_equal(&el_item->el, el) &&
  1104. el_item->priv == priv)
  1105. return el_item;
  1106. }
  1107. return NULL;
  1108. }
  1109. int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
  1110. const struct mlxsw_event_listener *el,
  1111. void *priv)
  1112. {
  1113. int err;
  1114. struct mlxsw_event_listener_item *el_item;
  1115. const struct mlxsw_rx_listener rxl = {
  1116. .func = mlxsw_core_event_listener_func,
  1117. .local_port = MLXSW_PORT_DONT_CARE,
  1118. .trap_id = el->trap_id,
  1119. };
  1120. el_item = __find_event_listener_item(mlxsw_core, el, priv);
  1121. if (el_item)
  1122. return -EEXIST;
  1123. el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
  1124. if (!el_item)
  1125. return -ENOMEM;
  1126. el_item->el = *el;
  1127. el_item->priv = priv;
  1128. err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
  1129. if (err)
  1130. goto err_rx_listener_register;
  1131. /* No reason to save item if we did not manage to register an RX
  1132. * listener for it.
  1133. */
  1134. list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
  1135. return 0;
  1136. err_rx_listener_register:
  1137. kfree(el_item);
  1138. return err;
  1139. }
  1140. EXPORT_SYMBOL(mlxsw_core_event_listener_register);
  1141. void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
  1142. const struct mlxsw_event_listener *el,
  1143. void *priv)
  1144. {
  1145. struct mlxsw_event_listener_item *el_item;
  1146. const struct mlxsw_rx_listener rxl = {
  1147. .func = mlxsw_core_event_listener_func,
  1148. .local_port = MLXSW_PORT_DONT_CARE,
  1149. .trap_id = el->trap_id,
  1150. };
  1151. el_item = __find_event_listener_item(mlxsw_core, el, priv);
  1152. if (!el_item)
  1153. return;
  1154. mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
  1155. list_del(&el_item->list);
  1156. kfree(el_item);
  1157. }
  1158. EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
  1159. static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
  1160. {
  1161. return atomic64_inc_return(&mlxsw_core->emad.tid);
  1162. }
  1163. static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
  1164. const struct mlxsw_reg_info *reg,
  1165. char *payload,
  1166. enum mlxsw_core_reg_access_type type,
  1167. struct list_head *bulk_list,
  1168. mlxsw_reg_trans_cb_t *cb,
  1169. unsigned long cb_priv)
  1170. {
  1171. u64 tid = mlxsw_core_tid_get(mlxsw_core);
  1172. struct mlxsw_reg_trans *trans;
  1173. int err;
  1174. trans = kzalloc(sizeof(*trans), GFP_KERNEL);
  1175. if (!trans)
  1176. return -ENOMEM;
  1177. err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
  1178. bulk_list, cb, cb_priv, tid);
  1179. if (err) {
  1180. kfree(trans);
  1181. return err;
  1182. }
  1183. return 0;
  1184. }
  1185. int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
  1186. const struct mlxsw_reg_info *reg, char *payload,
  1187. struct list_head *bulk_list,
  1188. mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
  1189. {
  1190. return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
  1191. MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
  1192. bulk_list, cb, cb_priv);
  1193. }
  1194. EXPORT_SYMBOL(mlxsw_reg_trans_query);
  1195. int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
  1196. const struct mlxsw_reg_info *reg, char *payload,
  1197. struct list_head *bulk_list,
  1198. mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
  1199. {
  1200. return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
  1201. MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
  1202. bulk_list, cb, cb_priv);
  1203. }
  1204. EXPORT_SYMBOL(mlxsw_reg_trans_write);
  1205. static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
  1206. {
  1207. struct mlxsw_core *mlxsw_core = trans->core;
  1208. int err;
  1209. wait_for_completion(&trans->completion);
  1210. cancel_delayed_work_sync(&trans->timeout_dw);
  1211. err = trans->err;
  1212. if (trans->retries)
  1213. dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
  1214. trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
  1215. if (err)
  1216. dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
  1217. trans->tid, trans->reg->id,
  1218. mlxsw_reg_id_str(trans->reg->id),
  1219. mlxsw_core_reg_access_type_str(trans->type),
  1220. trans->emad_status,
  1221. mlxsw_emad_op_tlv_status_str(trans->emad_status));
  1222. list_del(&trans->bulk_list);
  1223. kfree_rcu(trans, rcu);
  1224. return err;
  1225. }
  1226. int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
  1227. {
  1228. struct mlxsw_reg_trans *trans;
  1229. struct mlxsw_reg_trans *tmp;
  1230. int sum_err = 0;
  1231. int err;
  1232. list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
  1233. err = mlxsw_reg_trans_wait(trans);
  1234. if (err && sum_err == 0)
  1235. sum_err = err; /* first error to be returned */
  1236. }
  1237. return sum_err;
  1238. }
  1239. EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
  1240. static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
  1241. const struct mlxsw_reg_info *reg,
  1242. char *payload,
  1243. enum mlxsw_core_reg_access_type type)
  1244. {
  1245. enum mlxsw_emad_op_tlv_status status;
  1246. int err, n_retry;
  1247. char *in_mbox, *out_mbox, *tmp;
  1248. dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
  1249. reg->id, mlxsw_reg_id_str(reg->id),
  1250. mlxsw_core_reg_access_type_str(type));
  1251. in_mbox = mlxsw_cmd_mbox_alloc();
  1252. if (!in_mbox)
  1253. return -ENOMEM;
  1254. out_mbox = mlxsw_cmd_mbox_alloc();
  1255. if (!out_mbox) {
  1256. err = -ENOMEM;
  1257. goto free_in_mbox;
  1258. }
  1259. mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
  1260. mlxsw_core_tid_get(mlxsw_core));
  1261. tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
  1262. mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
  1263. n_retry = 0;
  1264. retry:
  1265. err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
  1266. if (!err) {
  1267. err = mlxsw_emad_process_status(out_mbox, &status);
  1268. if (err) {
  1269. if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
  1270. goto retry;
  1271. dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
  1272. status, mlxsw_emad_op_tlv_status_str(status));
  1273. }
  1274. }
  1275. if (!err)
  1276. memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
  1277. reg->len);
  1278. mlxsw_cmd_mbox_free(out_mbox);
  1279. free_in_mbox:
  1280. mlxsw_cmd_mbox_free(in_mbox);
  1281. if (err)
  1282. dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
  1283. reg->id, mlxsw_reg_id_str(reg->id),
  1284. mlxsw_core_reg_access_type_str(type));
  1285. return err;
  1286. }
  1287. static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
  1288. char *payload, size_t payload_len,
  1289. unsigned long cb_priv)
  1290. {
  1291. char *orig_payload = (char *) cb_priv;
  1292. memcpy(orig_payload, payload, payload_len);
  1293. }
  1294. static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
  1295. const struct mlxsw_reg_info *reg,
  1296. char *payload,
  1297. enum mlxsw_core_reg_access_type type)
  1298. {
  1299. LIST_HEAD(bulk_list);
  1300. int err;
  1301. /* During initialization EMAD interface is not available to us,
  1302. * so we default to command interface. We switch to EMAD interface
  1303. * after setting the appropriate traps.
  1304. */
  1305. if (!mlxsw_core->emad.use_emad)
  1306. return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
  1307. payload, type);
  1308. err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
  1309. payload, type, &bulk_list,
  1310. mlxsw_core_reg_access_cb,
  1311. (unsigned long) payload);
  1312. if (err)
  1313. return err;
  1314. return mlxsw_reg_trans_bulk_wait(&bulk_list);
  1315. }
  1316. int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
  1317. const struct mlxsw_reg_info *reg, char *payload)
  1318. {
  1319. return mlxsw_core_reg_access(mlxsw_core, reg, payload,
  1320. MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
  1321. }
  1322. EXPORT_SYMBOL(mlxsw_reg_query);
  1323. int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
  1324. const struct mlxsw_reg_info *reg, char *payload)
  1325. {
  1326. return mlxsw_core_reg_access(mlxsw_core, reg, payload,
  1327. MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
  1328. }
  1329. EXPORT_SYMBOL(mlxsw_reg_write);
  1330. void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
  1331. struct mlxsw_rx_info *rx_info)
  1332. {
  1333. struct mlxsw_rx_listener_item *rxl_item;
  1334. const struct mlxsw_rx_listener *rxl;
  1335. struct mlxsw_core_pcpu_stats *pcpu_stats;
  1336. u8 local_port;
  1337. bool found = false;
  1338. if (rx_info->is_lag) {
  1339. dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
  1340. __func__, rx_info->u.lag_id,
  1341. rx_info->trap_id);
  1342. /* Upper layer does not care if the skb came from LAG or not,
  1343. * so just get the local_port for the lag port and push it up.
  1344. */
  1345. local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
  1346. rx_info->u.lag_id,
  1347. rx_info->lag_port_index);
  1348. } else {
  1349. local_port = rx_info->u.sys_port;
  1350. }
  1351. dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
  1352. __func__, local_port, rx_info->trap_id);
  1353. if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
  1354. (local_port >= MLXSW_PORT_MAX_PORTS))
  1355. goto drop;
  1356. rcu_read_lock();
  1357. list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
  1358. rxl = &rxl_item->rxl;
  1359. if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
  1360. rxl->local_port == local_port) &&
  1361. rxl->trap_id == rx_info->trap_id) {
  1362. found = true;
  1363. break;
  1364. }
  1365. }
  1366. rcu_read_unlock();
  1367. if (!found)
  1368. goto drop;
  1369. pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
  1370. u64_stats_update_begin(&pcpu_stats->syncp);
  1371. pcpu_stats->port_rx_packets[local_port]++;
  1372. pcpu_stats->port_rx_bytes[local_port] += skb->len;
  1373. pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
  1374. pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
  1375. u64_stats_update_end(&pcpu_stats->syncp);
  1376. rxl->func(skb, local_port, rxl_item->priv);
  1377. return;
  1378. drop:
  1379. if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
  1380. this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
  1381. else
  1382. this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
  1383. if (local_port >= MLXSW_PORT_MAX_PORTS)
  1384. this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
  1385. else
  1386. this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
  1387. dev_kfree_skb(skb);
  1388. }
  1389. EXPORT_SYMBOL(mlxsw_core_skb_receive);
  1390. static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
  1391. u16 lag_id, u8 port_index)
  1392. {
  1393. return mlxsw_core->resources.max_ports_in_lag * lag_id +
  1394. port_index;
  1395. }
  1396. void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
  1397. u16 lag_id, u8 port_index, u8 local_port)
  1398. {
  1399. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1400. lag_id, port_index);
  1401. mlxsw_core->lag.mapping[index] = local_port;
  1402. }
  1403. EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
  1404. u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
  1405. u16 lag_id, u8 port_index)
  1406. {
  1407. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1408. lag_id, port_index);
  1409. return mlxsw_core->lag.mapping[index];
  1410. }
  1411. EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
  1412. void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
  1413. u16 lag_id, u8 local_port)
  1414. {
  1415. int i;
  1416. for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
  1417. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1418. lag_id, i);
  1419. if (mlxsw_core->lag.mapping[index] == local_port)
  1420. mlxsw_core->lag.mapping[index] = 0;
  1421. }
  1422. }
  1423. EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
  1424. struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
  1425. {
  1426. return &mlxsw_core->resources;
  1427. }
  1428. EXPORT_SYMBOL(mlxsw_core_resources_get);
  1429. int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
  1430. struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
  1431. struct net_device *dev, bool split, u32 split_group)
  1432. {
  1433. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  1434. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1435. if (split)
  1436. devlink_port_split_set(devlink_port, split_group);
  1437. devlink_port_type_eth_set(devlink_port, dev);
  1438. return devlink_port_register(devlink, devlink_port, local_port);
  1439. }
  1440. EXPORT_SYMBOL(mlxsw_core_port_init);
  1441. void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
  1442. {
  1443. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1444. devlink_port_unregister(devlink_port);
  1445. }
  1446. EXPORT_SYMBOL(mlxsw_core_port_fini);
  1447. static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
  1448. const char *buf, size_t size)
  1449. {
  1450. __be32 *m = (__be32 *) buf;
  1451. int i;
  1452. int count = size / sizeof(__be32);
  1453. for (i = count - 1; i >= 0; i--)
  1454. if (m[i])
  1455. break;
  1456. i++;
  1457. count = i ? i : 1;
  1458. for (i = 0; i < count; i += 4)
  1459. dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
  1460. i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
  1461. be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
  1462. }
  1463. int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
  1464. u32 in_mod, bool out_mbox_direct,
  1465. char *in_mbox, size_t in_mbox_size,
  1466. char *out_mbox, size_t out_mbox_size)
  1467. {
  1468. u8 status;
  1469. int err;
  1470. BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
  1471. if (!mlxsw_core->bus->cmd_exec)
  1472. return -EOPNOTSUPP;
  1473. dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
  1474. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
  1475. if (in_mbox) {
  1476. dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
  1477. mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
  1478. }
  1479. err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
  1480. opcode_mod, in_mod, out_mbox_direct,
  1481. in_mbox, in_mbox_size,
  1482. out_mbox, out_mbox_size, &status);
  1483. if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
  1484. dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
  1485. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
  1486. in_mod, status, mlxsw_cmd_status_str(status));
  1487. } else if (err == -ETIMEDOUT) {
  1488. dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
  1489. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
  1490. in_mod);
  1491. }
  1492. if (!err && out_mbox) {
  1493. dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
  1494. mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
  1495. }
  1496. return err;
  1497. }
  1498. EXPORT_SYMBOL(mlxsw_cmd_exec);
  1499. int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
  1500. {
  1501. return queue_delayed_work(mlxsw_wq, dwork, delay);
  1502. }
  1503. EXPORT_SYMBOL(mlxsw_core_schedule_dw);
  1504. static int __init mlxsw_core_module_init(void)
  1505. {
  1506. int err;
  1507. mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
  1508. if (!mlxsw_wq)
  1509. return -ENOMEM;
  1510. mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
  1511. if (!mlxsw_core_dbg_root) {
  1512. err = -ENOMEM;
  1513. goto err_debugfs_create_dir;
  1514. }
  1515. return 0;
  1516. err_debugfs_create_dir:
  1517. destroy_workqueue(mlxsw_wq);
  1518. return err;
  1519. }
  1520. static void __exit mlxsw_core_module_exit(void)
  1521. {
  1522. debugfs_remove_recursive(mlxsw_core_dbg_root);
  1523. destroy_workqueue(mlxsw_wq);
  1524. }
  1525. module_init(mlxsw_core_module_init);
  1526. module_exit(mlxsw_core_module_exit);
  1527. MODULE_LICENSE("Dual BSD/GPL");
  1528. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  1529. MODULE_DESCRIPTION("Mellanox switch device core driver");