fman_dtsec.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572
  1. /*
  2. * Copyright 2008-2015 Freescale Semiconductor Inc.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are met:
  6. * * Redistributions of source code must retain the above copyright
  7. * notice, this list of conditions and the following disclaimer.
  8. * * Redistributions in binary form must reproduce the above copyright
  9. * notice, this list of conditions and the following disclaimer in the
  10. * documentation and/or other materials provided with the distribution.
  11. * * Neither the name of Freescale Semiconductor nor the
  12. * names of its contributors may be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. *
  16. * ALTERNATIVELY, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") as published by the Free Software
  18. * Foundation, either version 2 of that License or (at your option) any
  19. * later version.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  22. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  23. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  25. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  26. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  27. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  28. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  30. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #include "fman_dtsec.h"
  34. #include "fman.h"
  35. #include <linux/slab.h>
  36. #include <linux/bitrev.h>
  37. #include <linux/io.h>
  38. #include <linux/delay.h>
  39. #include <linux/phy.h>
  40. #include <linux/crc32.h>
  41. #include <linux/of_mdio.h>
  42. #include <linux/mii.h>
  43. /* TBI register addresses */
  44. #define MII_TBICON 0x11
  45. /* TBICON register bit fields */
  46. #define TBICON_SOFT_RESET 0x8000 /* Soft reset */
  47. #define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
  48. #define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
  49. #define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
  50. #define TBICON_CLK_SELECT 0x0020 /* Clock select */
  51. #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
  52. #define TBIANA_SGMII 0x4001
  53. #define TBIANA_1000X 0x01a0
  54. /* Interrupt Mask Register (IMASK) */
  55. #define DTSEC_IMASK_BREN 0x80000000
  56. #define DTSEC_IMASK_RXCEN 0x40000000
  57. #define DTSEC_IMASK_MSROEN 0x04000000
  58. #define DTSEC_IMASK_GTSCEN 0x02000000
  59. #define DTSEC_IMASK_BTEN 0x01000000
  60. #define DTSEC_IMASK_TXCEN 0x00800000
  61. #define DTSEC_IMASK_TXEEN 0x00400000
  62. #define DTSEC_IMASK_LCEN 0x00040000
  63. #define DTSEC_IMASK_CRLEN 0x00020000
  64. #define DTSEC_IMASK_XFUNEN 0x00010000
  65. #define DTSEC_IMASK_ABRTEN 0x00008000
  66. #define DTSEC_IMASK_IFERREN 0x00004000
  67. #define DTSEC_IMASK_MAGEN 0x00000800
  68. #define DTSEC_IMASK_MMRDEN 0x00000400
  69. #define DTSEC_IMASK_MMWREN 0x00000200
  70. #define DTSEC_IMASK_GRSCEN 0x00000100
  71. #define DTSEC_IMASK_TDPEEN 0x00000002
  72. #define DTSEC_IMASK_RDPEEN 0x00000001
  73. #define DTSEC_EVENTS_MASK \
  74. ((u32)(DTSEC_IMASK_BREN | \
  75. DTSEC_IMASK_RXCEN | \
  76. DTSEC_IMASK_BTEN | \
  77. DTSEC_IMASK_TXCEN | \
  78. DTSEC_IMASK_TXEEN | \
  79. DTSEC_IMASK_ABRTEN | \
  80. DTSEC_IMASK_LCEN | \
  81. DTSEC_IMASK_CRLEN | \
  82. DTSEC_IMASK_XFUNEN | \
  83. DTSEC_IMASK_IFERREN | \
  84. DTSEC_IMASK_MAGEN | \
  85. DTSEC_IMASK_TDPEEN | \
  86. DTSEC_IMASK_RDPEEN))
  87. /* dtsec timestamp event bits */
  88. #define TMR_PEMASK_TSREEN 0x00010000
  89. #define TMR_PEVENT_TSRE 0x00010000
  90. /* Group address bit indication */
  91. #define MAC_GROUP_ADDRESS 0x0000010000000000ULL
  92. /* Defaults */
  93. #define DEFAULT_HALFDUP_RETRANSMIT 0xf
  94. #define DEFAULT_HALFDUP_COLL_WINDOW 0x37
  95. #define DEFAULT_TX_PAUSE_TIME 0xf000
  96. #define DEFAULT_RX_PREPEND 0
  97. #define DEFAULT_PREAMBLE_LEN 7
  98. #define DEFAULT_TX_PAUSE_TIME_EXTD 0
  99. #define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
  100. #define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
  101. #define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
  102. #define DEFAULT_BACK_TO_BACK_IPG 0x60
  103. #define DEFAULT_MAXIMUM_FRAME 0x600
  104. /* register related defines (bits, field offsets..) */
  105. #define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
  106. #define DTSEC_ECNTRL_GMIIM 0x00000040
  107. #define DTSEC_ECNTRL_TBIM 0x00000020
  108. #define DTSEC_ECNTRL_SGMIIM 0x00000002
  109. #define DTSEC_ECNTRL_RPM 0x00000010
  110. #define DTSEC_ECNTRL_R100M 0x00000008
  111. #define DTSEC_ECNTRL_QSGMIIM 0x00000001
  112. #define TCTRL_TTSE 0x00000040
  113. #define TCTRL_GTS 0x00000020
  114. #define RCTRL_PAL_MASK 0x001f0000
  115. #define RCTRL_PAL_SHIFT 16
  116. #define RCTRL_GHTX 0x00000400
  117. #define RCTRL_RTSE 0x00000040
  118. #define RCTRL_GRS 0x00000020
  119. #define RCTRL_MPROM 0x00000008
  120. #define RCTRL_RSF 0x00000004
  121. #define RCTRL_UPROM 0x00000001
  122. #define MACCFG1_SOFT_RESET 0x80000000
  123. #define MACCFG1_RX_FLOW 0x00000020
  124. #define MACCFG1_TX_FLOW 0x00000010
  125. #define MACCFG1_TX_EN 0x00000001
  126. #define MACCFG1_RX_EN 0x00000004
  127. #define MACCFG2_NIBBLE_MODE 0x00000100
  128. #define MACCFG2_BYTE_MODE 0x00000200
  129. #define MACCFG2_PAD_CRC_EN 0x00000004
  130. #define MACCFG2_FULL_DUPLEX 0x00000001
  131. #define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
  132. #define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
  133. #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
  134. #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
  135. #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
  136. #define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
  137. #define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
  138. #define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
  139. #define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
  140. #define HAFDUP_EXCESS_DEFER 0x00010000
  141. #define HAFDUP_COLLISION_WINDOW 0x000003ff
  142. #define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
  143. #define HAFDUP_RETRANSMISSION_MAX 0x0000f000
  144. #define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
  145. #define PTV_PTE_MASK 0xffff0000
  146. #define PTV_PT_MASK 0x0000ffff
  147. #define PTV_PTE_SHIFT 16
  148. #define MAX_PACKET_ALIGNMENT 31
  149. #define MAX_INTER_PACKET_GAP 0x7f
  150. #define MAX_RETRANSMISSION 0x0f
  151. #define MAX_COLLISION_WINDOW 0x03ff
  152. /* Hash table size (32 bits*8 regs) */
  153. #define DTSEC_HASH_TABLE_SIZE 256
  154. /* Extended Hash table size (32 bits*16 regs) */
  155. #define EXTENDED_HASH_TABLE_SIZE 512
  156. /* dTSEC Memory Map registers */
  157. struct dtsec_regs {
  158. /* dTSEC General Control and Status Registers */
  159. u32 tsec_id; /* 0x000 ETSEC_ID register */
  160. u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
  161. u32 ievent; /* 0x008 Interrupt event register */
  162. u32 imask; /* 0x00C Interrupt mask register */
  163. u32 reserved0010[1];
  164. u32 ecntrl; /* 0x014 E control register */
  165. u32 ptv; /* 0x018 Pause time value register */
  166. u32 tbipa; /* 0x01C TBI PHY address register */
  167. u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
  168. u32 tmr_pevent; /* 0x024 Time-stamp event register */
  169. u32 tmr_pemask; /* 0x028 Timer event mask register */
  170. u32 reserved002c[5];
  171. u32 tctrl; /* 0x040 Transmit control register */
  172. u32 reserved0044[3];
  173. u32 rctrl; /* 0x050 Receive control register */
  174. u32 reserved0054[11];
  175. u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
  176. u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
  177. u32 reserved00c0[16];
  178. u32 maccfg1; /* 0x100 MAC configuration #1 */
  179. u32 maccfg2; /* 0x104 MAC configuration #2 */
  180. u32 ipgifg; /* 0x108 IPG/IFG */
  181. u32 hafdup; /* 0x10C Half-duplex */
  182. u32 maxfrm; /* 0x110 Maximum frame */
  183. u32 reserved0114[10];
  184. u32 ifstat; /* 0x13C Interface status */
  185. u32 macstnaddr1; /* 0x140 Station Address,part 1 */
  186. u32 macstnaddr2; /* 0x144 Station Address,part 2 */
  187. struct {
  188. u32 exact_match1; /* octets 1-4 */
  189. u32 exact_match2; /* octets 5-6 */
  190. } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
  191. u32 reserved01c0[16];
  192. u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
  193. u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
  194. u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
  195. u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
  196. u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
  197. u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
  198. u32 trmgv;
  199. /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
  200. u32 rbyt; /* 0x21C receive byte counter */
  201. u32 rpkt; /* 0x220 receive packet counter */
  202. u32 rfcs; /* 0x224 receive FCS error counter */
  203. u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
  204. u32 rbca; /* 0x22C Rx broadcast packet counter */
  205. u32 rxcf; /* 0x230 Rx control frame packet counter */
  206. u32 rxpf; /* 0x234 Rx pause frame packet counter */
  207. u32 rxuo; /* 0x238 Rx unknown OP code counter */
  208. u32 raln; /* 0x23C Rx alignment error counter */
  209. u32 rflr; /* 0x240 Rx frame length error counter */
  210. u32 rcde; /* 0x244 Rx code error counter */
  211. u32 rcse; /* 0x248 Rx carrier sense error counter */
  212. u32 rund; /* 0x24C Rx undersize packet counter */
  213. u32 rovr; /* 0x250 Rx oversize packet counter */
  214. u32 rfrg; /* 0x254 Rx fragments counter */
  215. u32 rjbr; /* 0x258 Rx jabber counter */
  216. u32 rdrp; /* 0x25C Rx drop */
  217. u32 tbyt; /* 0x260 Tx byte counter */
  218. u32 tpkt; /* 0x264 Tx packet counter */
  219. u32 tmca; /* 0x268 Tx multicast packet counter */
  220. u32 tbca; /* 0x26C Tx broadcast packet counter */
  221. u32 txpf; /* 0x270 Tx pause control frame counter */
  222. u32 tdfr; /* 0x274 Tx deferral packet counter */
  223. u32 tedf; /* 0x278 Tx excessive deferral packet counter */
  224. u32 tscl; /* 0x27C Tx single collision packet counter */
  225. u32 tmcl; /* 0x280 Tx multiple collision packet counter */
  226. u32 tlcl; /* 0x284 Tx late collision packet counter */
  227. u32 txcl; /* 0x288 Tx excessive collision packet counter */
  228. u32 tncl; /* 0x28C Tx total collision counter */
  229. u32 reserved0290[1];
  230. u32 tdrp; /* 0x294 Tx drop frame counter */
  231. u32 tjbr; /* 0x298 Tx jabber frame counter */
  232. u32 tfcs; /* 0x29C Tx FCS error counter */
  233. u32 txcf; /* 0x2A0 Tx control frame counter */
  234. u32 tovr; /* 0x2A4 Tx oversize frame counter */
  235. u32 tund; /* 0x2A8 Tx undersize frame counter */
  236. u32 tfrg; /* 0x2AC Tx fragments frame counter */
  237. u32 car1; /* 0x2B0 carry register one register* */
  238. u32 car2; /* 0x2B4 carry register two register* */
  239. u32 cam1; /* 0x2B8 carry register one mask register */
  240. u32 cam2; /* 0x2BC carry register two mask register */
  241. u32 reserved02c0[848];
  242. };
  243. /* struct dtsec_cfg - dTSEC configuration
  244. * Transmit half-duplex flow control, under software control for 10/100-Mbps
  245. * half-duplex media. If set, back pressure is applied to media by raising
  246. * carrier.
  247. * halfdup_retransmit:
  248. * Number of retransmission attempts following a collision.
  249. * If this is exceeded dTSEC aborts transmission due to excessive collisions.
  250. * The standard specifies the attempt limit to be 15.
  251. * halfdup_coll_window:
  252. * The number of bytes of the frame during which collisions may occur.
  253. * The default value of 55 corresponds to the frame byte at the end of the
  254. * standard 512-bit slot time window. If collisions are detected after this
  255. * byte, the late collision event is asserted and transmission of current
  256. * frame is aborted.
  257. * tx_pad_crc:
  258. * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
  259. * appends a CRC to every frame regardless of padding requirement.
  260. * tx_pause_time:
  261. * Transmit pause time value. This pause value is used as part of the pause
  262. * frame to be sent when a transmit pause frame is initiated.
  263. * If set to 0 this disables transmission of pause frames.
  264. * preamble_len:
  265. * Length, in bytes, of the preamble field preceding each Ethernet
  266. * start-of-frame delimiter byte. The default value of 0x7 should be used in
  267. * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
  268. * rx_prepend:
  269. * Packet alignment padding length. The specified number of bytes (1-31)
  270. * of zero padding are inserted before the start of each received frame.
  271. * For Ethernet, where optional preamble extraction is enabled, the padding
  272. * appears before the preamble, otherwise the padding precedes the
  273. * layer 2 header.
  274. *
  275. * This structure contains basic dTSEC configuration and must be passed to
  276. * init() function. A default set of configuration values can be
  277. * obtained by calling set_dflts().
  278. */
  279. struct dtsec_cfg {
  280. u16 halfdup_retransmit;
  281. u16 halfdup_coll_window;
  282. bool tx_pad_crc;
  283. u16 tx_pause_time;
  284. bool ptp_tsu_en;
  285. bool ptp_exception_en;
  286. u32 preamble_len;
  287. u32 rx_prepend;
  288. u16 tx_pause_time_extd;
  289. u16 maximum_frame;
  290. u32 non_back_to_back_ipg1;
  291. u32 non_back_to_back_ipg2;
  292. u32 min_ifg_enforcement;
  293. u32 back_to_back_ipg;
  294. };
  295. struct fman_mac {
  296. /* pointer to dTSEC memory mapped registers */
  297. struct dtsec_regs __iomem *regs;
  298. /* MAC address of device */
  299. u64 addr;
  300. /* Ethernet physical interface */
  301. phy_interface_t phy_if;
  302. u16 max_speed;
  303. void *dev_id; /* device cookie used by the exception cbs */
  304. fman_mac_exception_cb *exception_cb;
  305. fman_mac_exception_cb *event_cb;
  306. /* Number of individual addresses in registers for this station */
  307. u8 num_of_ind_addr_in_regs;
  308. /* pointer to driver's global address hash table */
  309. struct eth_hash_t *multicast_addr_hash;
  310. /* pointer to driver's individual address hash table */
  311. struct eth_hash_t *unicast_addr_hash;
  312. u8 mac_id;
  313. u32 exceptions;
  314. bool ptp_tsu_enabled;
  315. bool en_tsu_err_exception;
  316. struct dtsec_cfg *dtsec_drv_param;
  317. void *fm;
  318. struct fman_rev_info fm_rev_info;
  319. bool basex_if;
  320. struct phy_device *tbiphy;
  321. };
  322. static void set_dflts(struct dtsec_cfg *cfg)
  323. {
  324. cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
  325. cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
  326. cfg->tx_pad_crc = true;
  327. cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
  328. /* PHY address 0 is reserved (DPAA RM) */
  329. cfg->rx_prepend = DEFAULT_RX_PREPEND;
  330. cfg->ptp_tsu_en = true;
  331. cfg->ptp_exception_en = true;
  332. cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
  333. cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
  334. cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
  335. cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
  336. cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
  337. cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
  338. cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
  339. }
  340. static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
  341. phy_interface_t iface, u16 iface_speed, u8 *macaddr,
  342. u32 exception_mask, u8 tbi_addr)
  343. {
  344. bool is_rgmii, is_sgmii, is_qsgmii;
  345. int i;
  346. u32 tmp;
  347. /* Soft reset */
  348. iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
  349. iowrite32be(0, &regs->maccfg1);
  350. /* dtsec_id2 */
  351. tmp = ioread32be(&regs->tsec_id2);
  352. /* check RGMII support */
  353. if (iface == PHY_INTERFACE_MODE_RGMII ||
  354. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  355. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  356. iface == PHY_INTERFACE_MODE_RGMII_TXID ||
  357. iface == PHY_INTERFACE_MODE_RMII)
  358. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  359. return -EINVAL;
  360. if (iface == PHY_INTERFACE_MODE_SGMII ||
  361. iface == PHY_INTERFACE_MODE_MII)
  362. if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  363. return -EINVAL;
  364. is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
  365. iface == PHY_INTERFACE_MODE_RGMII_ID ||
  366. iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  367. iface == PHY_INTERFACE_MODE_RGMII_TXID;
  368. is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
  369. is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
  370. tmp = 0;
  371. if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
  372. tmp |= DTSEC_ECNTRL_GMIIM;
  373. if (is_sgmii)
  374. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
  375. if (is_qsgmii)
  376. tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
  377. DTSEC_ECNTRL_QSGMIIM);
  378. if (is_rgmii)
  379. tmp |= DTSEC_ECNTRL_RPM;
  380. if (iface_speed == SPEED_100)
  381. tmp |= DTSEC_ECNTRL_R100M;
  382. iowrite32be(tmp, &regs->ecntrl);
  383. tmp = 0;
  384. if (cfg->tx_pause_time)
  385. tmp |= cfg->tx_pause_time;
  386. if (cfg->tx_pause_time_extd)
  387. tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
  388. iowrite32be(tmp, &regs->ptv);
  389. tmp = 0;
  390. tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
  391. /* Accept short frames */
  392. tmp |= RCTRL_RSF;
  393. iowrite32be(tmp, &regs->rctrl);
  394. /* Assign a Phy Address to the TBI (TBIPA).
  395. * Done also in cases where TBI is not selected to avoid conflict with
  396. * the external PHY's Physical address
  397. */
  398. iowrite32be(tbi_addr, &regs->tbipa);
  399. iowrite32be(0, &regs->tmr_ctrl);
  400. if (cfg->ptp_tsu_en) {
  401. tmp = 0;
  402. tmp |= TMR_PEVENT_TSRE;
  403. iowrite32be(tmp, &regs->tmr_pevent);
  404. if (cfg->ptp_exception_en) {
  405. tmp = 0;
  406. tmp |= TMR_PEMASK_TSREEN;
  407. iowrite32be(tmp, &regs->tmr_pemask);
  408. }
  409. }
  410. tmp = 0;
  411. tmp |= MACCFG1_RX_FLOW;
  412. tmp |= MACCFG1_TX_FLOW;
  413. iowrite32be(tmp, &regs->maccfg1);
  414. tmp = 0;
  415. if (iface_speed < SPEED_1000)
  416. tmp |= MACCFG2_NIBBLE_MODE;
  417. else if (iface_speed == SPEED_1000)
  418. tmp |= MACCFG2_BYTE_MODE;
  419. tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
  420. MACCFG2_PREAMBLE_LENGTH_MASK;
  421. if (cfg->tx_pad_crc)
  422. tmp |= MACCFG2_PAD_CRC_EN;
  423. /* Full Duplex */
  424. tmp |= MACCFG2_FULL_DUPLEX;
  425. iowrite32be(tmp, &regs->maccfg2);
  426. tmp = (((cfg->non_back_to_back_ipg1 <<
  427. IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
  428. & IPGIFG_NON_BACK_TO_BACK_IPG_1)
  429. | ((cfg->non_back_to_back_ipg2 <<
  430. IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
  431. & IPGIFG_NON_BACK_TO_BACK_IPG_2)
  432. | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
  433. & IPGIFG_MIN_IFG_ENFORCEMENT)
  434. | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
  435. iowrite32be(tmp, &regs->ipgifg);
  436. tmp = 0;
  437. tmp |= HAFDUP_EXCESS_DEFER;
  438. tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
  439. & HAFDUP_RETRANSMISSION_MAX);
  440. tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
  441. iowrite32be(tmp, &regs->hafdup);
  442. /* Initialize Maximum frame length */
  443. iowrite32be(cfg->maximum_frame, &regs->maxfrm);
  444. iowrite32be(0xffffffff, &regs->cam1);
  445. iowrite32be(0xffffffff, &regs->cam2);
  446. iowrite32be(exception_mask, &regs->imask);
  447. iowrite32be(0xffffffff, &regs->ievent);
  448. tmp = (u32)((macaddr[5] << 24) |
  449. (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
  450. iowrite32be(tmp, &regs->macstnaddr1);
  451. tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
  452. iowrite32be(tmp, &regs->macstnaddr2);
  453. /* HASH */
  454. for (i = 0; i < NUM_OF_HASH_REGS; i++) {
  455. /* Initialize IADDRx */
  456. iowrite32be(0, &regs->igaddr[i]);
  457. /* Initialize GADDRx */
  458. iowrite32be(0, &regs->gaddr[i]);
  459. }
  460. return 0;
  461. }
  462. static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
  463. {
  464. u32 tmp;
  465. tmp = (u32)((adr[5] << 24) |
  466. (adr[4] << 16) | (adr[3] << 8) | adr[2]);
  467. iowrite32be(tmp, &regs->macstnaddr1);
  468. tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
  469. iowrite32be(tmp, &regs->macstnaddr2);
  470. }
  471. static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
  472. bool enable)
  473. {
  474. int reg_idx = (bucket >> 5) & 0xf;
  475. int bit_idx = bucket & 0x1f;
  476. u32 bit_mask = 0x80000000 >> bit_idx;
  477. u32 __iomem *reg;
  478. if (reg_idx > 7)
  479. reg = &regs->gaddr[reg_idx - 8];
  480. else
  481. reg = &regs->igaddr[reg_idx];
  482. if (enable)
  483. iowrite32be(ioread32be(reg) | bit_mask, reg);
  484. else
  485. iowrite32be(ioread32be(reg) & (~bit_mask), reg);
  486. }
  487. static int check_init_parameters(struct fman_mac *dtsec)
  488. {
  489. if (dtsec->max_speed >= SPEED_10000) {
  490. pr_err("1G MAC driver supports 1G or lower speeds\n");
  491. return -EINVAL;
  492. }
  493. if (dtsec->addr == 0) {
  494. pr_err("Ethernet MAC Must have a valid MAC Address\n");
  495. return -EINVAL;
  496. }
  497. if ((dtsec->dtsec_drv_param)->rx_prepend >
  498. MAX_PACKET_ALIGNMENT) {
  499. pr_err("packetAlignmentPadding can't be > than %d\n",
  500. MAX_PACKET_ALIGNMENT);
  501. return -EINVAL;
  502. }
  503. if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
  504. MAX_INTER_PACKET_GAP) ||
  505. ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
  506. MAX_INTER_PACKET_GAP) ||
  507. ((dtsec->dtsec_drv_param)->back_to_back_ipg >
  508. MAX_INTER_PACKET_GAP)) {
  509. pr_err("Inter packet gap can't be greater than %d\n",
  510. MAX_INTER_PACKET_GAP);
  511. return -EINVAL;
  512. }
  513. if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
  514. MAX_RETRANSMISSION) {
  515. pr_err("maxRetransmission can't be greater than %d\n",
  516. MAX_RETRANSMISSION);
  517. return -EINVAL;
  518. }
  519. if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
  520. MAX_COLLISION_WINDOW) {
  521. pr_err("collisionWindow can't be greater than %d\n",
  522. MAX_COLLISION_WINDOW);
  523. return -EINVAL;
  524. /* If Auto negotiation process is disabled, need to set up the PHY
  525. * using the MII Management Interface
  526. */
  527. }
  528. if (!dtsec->exception_cb) {
  529. pr_err("uninitialized exception_cb\n");
  530. return -EINVAL;
  531. }
  532. if (!dtsec->event_cb) {
  533. pr_err("uninitialized event_cb\n");
  534. return -EINVAL;
  535. }
  536. return 0;
  537. }
  538. static int get_exception_flag(enum fman_mac_exceptions exception)
  539. {
  540. u32 bit_mask;
  541. switch (exception) {
  542. case FM_MAC_EX_1G_BAB_RX:
  543. bit_mask = DTSEC_IMASK_BREN;
  544. break;
  545. case FM_MAC_EX_1G_RX_CTL:
  546. bit_mask = DTSEC_IMASK_RXCEN;
  547. break;
  548. case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
  549. bit_mask = DTSEC_IMASK_GTSCEN;
  550. break;
  551. case FM_MAC_EX_1G_BAB_TX:
  552. bit_mask = DTSEC_IMASK_BTEN;
  553. break;
  554. case FM_MAC_EX_1G_TX_CTL:
  555. bit_mask = DTSEC_IMASK_TXCEN;
  556. break;
  557. case FM_MAC_EX_1G_TX_ERR:
  558. bit_mask = DTSEC_IMASK_TXEEN;
  559. break;
  560. case FM_MAC_EX_1G_LATE_COL:
  561. bit_mask = DTSEC_IMASK_LCEN;
  562. break;
  563. case FM_MAC_EX_1G_COL_RET_LMT:
  564. bit_mask = DTSEC_IMASK_CRLEN;
  565. break;
  566. case FM_MAC_EX_1G_TX_FIFO_UNDRN:
  567. bit_mask = DTSEC_IMASK_XFUNEN;
  568. break;
  569. case FM_MAC_EX_1G_MAG_PCKT:
  570. bit_mask = DTSEC_IMASK_MAGEN;
  571. break;
  572. case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
  573. bit_mask = DTSEC_IMASK_MMRDEN;
  574. break;
  575. case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
  576. bit_mask = DTSEC_IMASK_MMWREN;
  577. break;
  578. case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
  579. bit_mask = DTSEC_IMASK_GRSCEN;
  580. break;
  581. case FM_MAC_EX_1G_DATA_ERR:
  582. bit_mask = DTSEC_IMASK_TDPEEN;
  583. break;
  584. case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
  585. bit_mask = DTSEC_IMASK_MSROEN;
  586. break;
  587. default:
  588. bit_mask = 0;
  589. break;
  590. }
  591. return bit_mask;
  592. }
  593. static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
  594. {
  595. /* Checks if dTSEC driver parameters were initialized */
  596. if (!dtsec_drv_params)
  597. return true;
  598. return false;
  599. }
  600. static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
  601. {
  602. struct dtsec_regs __iomem *regs = dtsec->regs;
  603. if (is_init_done(dtsec->dtsec_drv_param))
  604. return 0;
  605. return (u16)ioread32be(&regs->maxfrm);
  606. }
  607. static void dtsec_isr(void *handle)
  608. {
  609. struct fman_mac *dtsec = (struct fman_mac *)handle;
  610. struct dtsec_regs __iomem *regs = dtsec->regs;
  611. u32 event;
  612. /* do not handle MDIO events */
  613. event = ioread32be(&regs->ievent) &
  614. (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
  615. event &= ioread32be(&regs->imask);
  616. iowrite32be(event, &regs->ievent);
  617. if (event & DTSEC_IMASK_BREN)
  618. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
  619. if (event & DTSEC_IMASK_RXCEN)
  620. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
  621. if (event & DTSEC_IMASK_GTSCEN)
  622. dtsec->exception_cb(dtsec->dev_id,
  623. FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
  624. if (event & DTSEC_IMASK_BTEN)
  625. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
  626. if (event & DTSEC_IMASK_TXCEN)
  627. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
  628. if (event & DTSEC_IMASK_TXEEN)
  629. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
  630. if (event & DTSEC_IMASK_LCEN)
  631. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
  632. if (event & DTSEC_IMASK_CRLEN)
  633. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
  634. if (event & DTSEC_IMASK_XFUNEN) {
  635. /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
  636. if (dtsec->fm_rev_info.major == 2) {
  637. u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
  638. /* a. Write 0x00E0_0C00 to DTSEC_ID
  639. * This is a read only register
  640. * b. Read and save the value of TPKT
  641. */
  642. tpkt1 = ioread32be(&regs->tpkt);
  643. /* c. Read the register at dTSEC address offset 0x32C */
  644. tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
  645. /* d. Compare bits [9:15] to bits [25:31] of the
  646. * register at address offset 0x32C.
  647. */
  648. if ((tmp_reg1 & 0x007F0000) !=
  649. (tmp_reg1 & 0x0000007F)) {
  650. /* If they are not equal, save the value of
  651. * this register and wait for at least
  652. * MAXFRM*16 ns
  653. */
  654. usleep_range((u32)(min
  655. (dtsec_get_max_frame_length(dtsec) *
  656. 16 / 1000, 1)), (u32)
  657. (min(dtsec_get_max_frame_length
  658. (dtsec) * 16 / 1000, 1) + 1));
  659. }
  660. /* e. Read and save TPKT again and read the register
  661. * at dTSEC address offset 0x32C again
  662. */
  663. tpkt2 = ioread32be(&regs->tpkt);
  664. tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
  665. /* f. Compare the value of TPKT saved in step b to
  666. * value read in step e. Also compare bits [9:15] of
  667. * the register at offset 0x32C saved in step d to the
  668. * value of bits [9:15] saved in step e. If the two
  669. * registers values are unchanged, then the transmit
  670. * portion of the dTSEC controller is locked up and
  671. * the user should proceed to the recover sequence.
  672. */
  673. if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
  674. (tmp_reg2 & 0x007F0000))) {
  675. /* recover sequence */
  676. /* a.Write a 1 to RCTRL[GRS] */
  677. iowrite32be(ioread32be(&regs->rctrl) |
  678. RCTRL_GRS, &regs->rctrl);
  679. /* b.Wait until IEVENT[GRSC]=1, or at least
  680. * 100 us has elapsed.
  681. */
  682. for (i = 0; i < 100; i++) {
  683. if (ioread32be(&regs->ievent) &
  684. DTSEC_IMASK_GRSCEN)
  685. break;
  686. udelay(1);
  687. }
  688. if (ioread32be(&regs->ievent) &
  689. DTSEC_IMASK_GRSCEN)
  690. iowrite32be(DTSEC_IMASK_GRSCEN,
  691. &regs->ievent);
  692. else
  693. pr_debug("Rx lockup due to Tx lockup\n");
  694. /* c.Write a 1 to bit n of FM_RSTC
  695. * (offset 0x0CC of FPM)
  696. */
  697. fman_reset_mac(dtsec->fm, dtsec->mac_id);
  698. /* d.Wait 4 Tx clocks (32 ns) */
  699. udelay(1);
  700. /* e.Write a 0 to bit n of FM_RSTC. */
  701. /* cleared by FMAN
  702. */
  703. }
  704. }
  705. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
  706. }
  707. if (event & DTSEC_IMASK_MAGEN)
  708. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
  709. if (event & DTSEC_IMASK_GRSCEN)
  710. dtsec->exception_cb(dtsec->dev_id,
  711. FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
  712. if (event & DTSEC_IMASK_TDPEEN)
  713. dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
  714. if (event & DTSEC_IMASK_RDPEEN)
  715. dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
  716. /* masked interrupts */
  717. WARN_ON(event & DTSEC_IMASK_ABRTEN);
  718. WARN_ON(event & DTSEC_IMASK_IFERREN);
  719. }
  720. static void dtsec_1588_isr(void *handle)
  721. {
  722. struct fman_mac *dtsec = (struct fman_mac *)handle;
  723. struct dtsec_regs __iomem *regs = dtsec->regs;
  724. u32 event;
  725. if (dtsec->ptp_tsu_enabled) {
  726. event = ioread32be(&regs->tmr_pevent);
  727. event &= ioread32be(&regs->tmr_pemask);
  728. if (event) {
  729. iowrite32be(event, &regs->tmr_pevent);
  730. WARN_ON(event & TMR_PEVENT_TSRE);
  731. dtsec->exception_cb(dtsec->dev_id,
  732. FM_MAC_EX_1G_1588_TS_RX_ERR);
  733. }
  734. }
  735. }
  736. static void free_init_resources(struct fman_mac *dtsec)
  737. {
  738. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  739. FMAN_INTR_TYPE_ERR);
  740. fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  741. FMAN_INTR_TYPE_NORMAL);
  742. /* release the driver's group hash table */
  743. free_hash_table(dtsec->multicast_addr_hash);
  744. dtsec->multicast_addr_hash = NULL;
  745. /* release the driver's individual hash table */
  746. free_hash_table(dtsec->unicast_addr_hash);
  747. dtsec->unicast_addr_hash = NULL;
  748. }
  749. int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
  750. {
  751. if (is_init_done(dtsec->dtsec_drv_param))
  752. return -EINVAL;
  753. dtsec->dtsec_drv_param->maximum_frame = new_val;
  754. return 0;
  755. }
  756. int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
  757. {
  758. if (is_init_done(dtsec->dtsec_drv_param))
  759. return -EINVAL;
  760. dtsec->dtsec_drv_param->tx_pad_crc = new_val;
  761. return 0;
  762. }
  763. static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
  764. {
  765. struct dtsec_regs __iomem *regs = dtsec->regs;
  766. if (mode & COMM_MODE_TX)
  767. iowrite32be(ioread32be(&regs->tctrl) &
  768. ~TCTRL_GTS, &regs->tctrl);
  769. if (mode & COMM_MODE_RX)
  770. iowrite32be(ioread32be(&regs->rctrl) &
  771. ~RCTRL_GRS, &regs->rctrl);
  772. }
  773. static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
  774. {
  775. struct dtsec_regs __iomem *regs = dtsec->regs;
  776. u32 tmp;
  777. /* Graceful stop - Assert the graceful Rx stop bit */
  778. if (mode & COMM_MODE_RX) {
  779. tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
  780. iowrite32be(tmp, &regs->rctrl);
  781. if (dtsec->fm_rev_info.major == 2) {
  782. /* Workaround for dTSEC Errata A002 */
  783. usleep_range(100, 200);
  784. } else {
  785. /* Workaround for dTSEC Errata A004839 */
  786. usleep_range(10, 50);
  787. }
  788. }
  789. /* Graceful stop - Assert the graceful Tx stop bit */
  790. if (mode & COMM_MODE_TX) {
  791. if (dtsec->fm_rev_info.major == 2) {
  792. /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
  793. pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
  794. } else {
  795. tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
  796. iowrite32be(tmp, &regs->tctrl);
  797. /* Workaround for dTSEC Errata A0012, A0014 */
  798. usleep_range(10, 50);
  799. }
  800. }
  801. }
  802. int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
  803. {
  804. struct dtsec_regs __iomem *regs = dtsec->regs;
  805. u32 tmp;
  806. if (!is_init_done(dtsec->dtsec_drv_param))
  807. return -EINVAL;
  808. /* Enable */
  809. tmp = ioread32be(&regs->maccfg1);
  810. if (mode & COMM_MODE_RX)
  811. tmp |= MACCFG1_RX_EN;
  812. if (mode & COMM_MODE_TX)
  813. tmp |= MACCFG1_TX_EN;
  814. iowrite32be(tmp, &regs->maccfg1);
  815. /* Graceful start - clear the graceful Rx/Tx stop bit */
  816. graceful_start(dtsec, mode);
  817. return 0;
  818. }
  819. int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
  820. {
  821. struct dtsec_regs __iomem *regs = dtsec->regs;
  822. u32 tmp;
  823. if (!is_init_done(dtsec->dtsec_drv_param))
  824. return -EINVAL;
  825. /* Graceful stop - Assert the graceful Rx/Tx stop bit */
  826. graceful_stop(dtsec, mode);
  827. tmp = ioread32be(&regs->maccfg1);
  828. if (mode & COMM_MODE_RX)
  829. tmp &= ~MACCFG1_RX_EN;
  830. if (mode & COMM_MODE_TX)
  831. tmp &= ~MACCFG1_TX_EN;
  832. iowrite32be(tmp, &regs->maccfg1);
  833. return 0;
  834. }
  835. int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
  836. u8 __maybe_unused priority,
  837. u16 pause_time, u16 __maybe_unused thresh_time)
  838. {
  839. struct dtsec_regs __iomem *regs = dtsec->regs;
  840. enum comm_mode mode = COMM_MODE_NONE;
  841. u32 ptv = 0;
  842. if (!is_init_done(dtsec->dtsec_drv_param))
  843. return -EINVAL;
  844. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  845. mode |= COMM_MODE_RX;
  846. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  847. mode |= COMM_MODE_TX;
  848. graceful_stop(dtsec, mode);
  849. if (pause_time) {
  850. /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
  851. if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
  852. pr_warn("pause-time: %d illegal.Should be > 320\n",
  853. pause_time);
  854. return -EINVAL;
  855. }
  856. ptv = ioread32be(&regs->ptv);
  857. ptv &= PTV_PTE_MASK;
  858. ptv |= pause_time & PTV_PT_MASK;
  859. iowrite32be(ptv, &regs->ptv);
  860. /* trigger the transmission of a flow-control pause frame */
  861. iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
  862. &regs->maccfg1);
  863. } else
  864. iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
  865. &regs->maccfg1);
  866. graceful_start(dtsec, mode);
  867. return 0;
  868. }
  869. int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
  870. {
  871. struct dtsec_regs __iomem *regs = dtsec->regs;
  872. enum comm_mode mode = COMM_MODE_NONE;
  873. u32 tmp;
  874. if (!is_init_done(dtsec->dtsec_drv_param))
  875. return -EINVAL;
  876. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  877. mode |= COMM_MODE_RX;
  878. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  879. mode |= COMM_MODE_TX;
  880. graceful_stop(dtsec, mode);
  881. tmp = ioread32be(&regs->maccfg1);
  882. if (en)
  883. tmp |= MACCFG1_RX_FLOW;
  884. else
  885. tmp &= ~MACCFG1_RX_FLOW;
  886. iowrite32be(tmp, &regs->maccfg1);
  887. graceful_start(dtsec, mode);
  888. return 0;
  889. }
  890. int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
  891. {
  892. struct dtsec_regs __iomem *regs = dtsec->regs;
  893. enum comm_mode mode = COMM_MODE_NONE;
  894. if (!is_init_done(dtsec->dtsec_drv_param))
  895. return -EINVAL;
  896. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  897. mode |= COMM_MODE_RX;
  898. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  899. mode |= COMM_MODE_TX;
  900. graceful_stop(dtsec, mode);
  901. /* Initialize MAC Station Address registers (1 & 2)
  902. * Station address have to be swapped (big endian to little endian
  903. */
  904. dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
  905. set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
  906. graceful_start(dtsec, mode);
  907. return 0;
  908. }
  909. int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  910. {
  911. struct dtsec_regs __iomem *regs = dtsec->regs;
  912. struct eth_hash_entry *hash_entry;
  913. u64 addr;
  914. s32 bucket;
  915. u32 crc = 0xFFFFFFFF;
  916. bool mcast, ghtx;
  917. if (!is_init_done(dtsec->dtsec_drv_param))
  918. return -EINVAL;
  919. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  920. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  921. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  922. /* Cannot handle unicast mac addr when GHTX is on */
  923. if (ghtx && !mcast) {
  924. pr_err("Could not compute hash bucket\n");
  925. return -EINVAL;
  926. }
  927. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  928. crc = bitrev32(crc);
  929. /* considering the 9 highest order bits in crc H[8:0]:
  930. *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
  931. *and H[5:1] (next 5 bits) identify the hash bit
  932. *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
  933. *and H[4:0] (next 5 bits) identify the hash bit.
  934. *
  935. *In bucket index output the low 5 bits identify the hash register
  936. *bit, while the higher 4 bits identify the hash register
  937. */
  938. if (ghtx) {
  939. bucket = (s32)((crc >> 23) & 0x1ff);
  940. } else {
  941. bucket = (s32)((crc >> 24) & 0xff);
  942. /* if !ghtx and mcast the bit must be set in gaddr instead of
  943. *igaddr.
  944. */
  945. if (mcast)
  946. bucket += 0x100;
  947. }
  948. set_bucket(dtsec->regs, bucket, true);
  949. /* Create element to be added to the driver hash table */
  950. hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
  951. if (!hash_entry)
  952. return -ENOMEM;
  953. hash_entry->addr = addr;
  954. INIT_LIST_HEAD(&hash_entry->node);
  955. if (addr & MAC_GROUP_ADDRESS)
  956. /* Group Address */
  957. list_add_tail(&hash_entry->node,
  958. &dtsec->multicast_addr_hash->lsts[bucket]);
  959. else
  960. list_add_tail(&hash_entry->node,
  961. &dtsec->unicast_addr_hash->lsts[bucket]);
  962. return 0;
  963. }
  964. int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
  965. {
  966. u32 tmp;
  967. struct dtsec_regs __iomem *regs = dtsec->regs;
  968. if (!is_init_done(dtsec->dtsec_drv_param))
  969. return -EINVAL;
  970. tmp = ioread32be(&regs->rctrl);
  971. if (enable)
  972. tmp |= RCTRL_MPROM;
  973. else
  974. tmp &= ~RCTRL_MPROM;
  975. iowrite32be(tmp, &regs->rctrl);
  976. return 0;
  977. }
  978. int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
  979. {
  980. struct dtsec_regs __iomem *regs = dtsec->regs;
  981. u32 rctrl, tctrl;
  982. if (!is_init_done(dtsec->dtsec_drv_param))
  983. return -EINVAL;
  984. rctrl = ioread32be(&regs->rctrl);
  985. tctrl = ioread32be(&regs->tctrl);
  986. if (enable) {
  987. rctrl |= RCTRL_RTSE;
  988. tctrl |= TCTRL_TTSE;
  989. } else {
  990. rctrl &= ~RCTRL_RTSE;
  991. tctrl &= ~TCTRL_TTSE;
  992. }
  993. iowrite32be(rctrl, &regs->rctrl);
  994. iowrite32be(tctrl, &regs->tctrl);
  995. return 0;
  996. }
  997. int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
  998. {
  999. struct dtsec_regs __iomem *regs = dtsec->regs;
  1000. struct list_head *pos;
  1001. struct eth_hash_entry *hash_entry = NULL;
  1002. u64 addr;
  1003. s32 bucket;
  1004. u32 crc = 0xFFFFFFFF;
  1005. bool mcast, ghtx;
  1006. if (!is_init_done(dtsec->dtsec_drv_param))
  1007. return -EINVAL;
  1008. addr = ENET_ADDR_TO_UINT64(*eth_addr);
  1009. ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  1010. mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
  1011. /* Cannot handle unicast mac addr when GHTX is on */
  1012. if (ghtx && !mcast) {
  1013. pr_err("Could not compute hash bucket\n");
  1014. return -EINVAL;
  1015. }
  1016. crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
  1017. crc = bitrev32(crc);
  1018. if (ghtx) {
  1019. bucket = (s32)((crc >> 23) & 0x1ff);
  1020. } else {
  1021. bucket = (s32)((crc >> 24) & 0xff);
  1022. /* if !ghtx and mcast the bit must be set
  1023. * in gaddr instead of igaddr.
  1024. */
  1025. if (mcast)
  1026. bucket += 0x100;
  1027. }
  1028. if (addr & MAC_GROUP_ADDRESS) {
  1029. /* Group Address */
  1030. list_for_each(pos,
  1031. &dtsec->multicast_addr_hash->lsts[bucket]) {
  1032. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  1033. if (hash_entry->addr == addr) {
  1034. list_del_init(&hash_entry->node);
  1035. kfree(hash_entry);
  1036. break;
  1037. }
  1038. }
  1039. if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
  1040. set_bucket(dtsec->regs, bucket, false);
  1041. } else {
  1042. /* Individual Address */
  1043. list_for_each(pos,
  1044. &dtsec->unicast_addr_hash->lsts[bucket]) {
  1045. hash_entry = ETH_HASH_ENTRY_OBJ(pos);
  1046. if (hash_entry->addr == addr) {
  1047. list_del_init(&hash_entry->node);
  1048. kfree(hash_entry);
  1049. break;
  1050. }
  1051. }
  1052. if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
  1053. set_bucket(dtsec->regs, bucket, false);
  1054. }
  1055. /* address does not exist */
  1056. WARN_ON(!hash_entry);
  1057. return 0;
  1058. }
  1059. int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
  1060. {
  1061. struct dtsec_regs __iomem *regs = dtsec->regs;
  1062. u32 tmp;
  1063. if (!is_init_done(dtsec->dtsec_drv_param))
  1064. return -EINVAL;
  1065. /* Set unicast promiscuous */
  1066. tmp = ioread32be(&regs->rctrl);
  1067. if (new_val)
  1068. tmp |= RCTRL_UPROM;
  1069. else
  1070. tmp &= ~RCTRL_UPROM;
  1071. iowrite32be(tmp, &regs->rctrl);
  1072. /* Set multicast promiscuous */
  1073. tmp = ioread32be(&regs->rctrl);
  1074. if (new_val)
  1075. tmp |= RCTRL_MPROM;
  1076. else
  1077. tmp &= ~RCTRL_MPROM;
  1078. iowrite32be(tmp, &regs->rctrl);
  1079. return 0;
  1080. }
  1081. int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
  1082. {
  1083. struct dtsec_regs __iomem *regs = dtsec->regs;
  1084. enum comm_mode mode = COMM_MODE_NONE;
  1085. u32 tmp;
  1086. if (!is_init_done(dtsec->dtsec_drv_param))
  1087. return -EINVAL;
  1088. if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
  1089. mode |= COMM_MODE_RX;
  1090. if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
  1091. mode |= COMM_MODE_TX;
  1092. graceful_stop(dtsec, mode);
  1093. tmp = ioread32be(&regs->maccfg2);
  1094. /* Full Duplex */
  1095. tmp |= MACCFG2_FULL_DUPLEX;
  1096. tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
  1097. if (speed < SPEED_1000)
  1098. tmp |= MACCFG2_NIBBLE_MODE;
  1099. else if (speed == SPEED_1000)
  1100. tmp |= MACCFG2_BYTE_MODE;
  1101. iowrite32be(tmp, &regs->maccfg2);
  1102. tmp = ioread32be(&regs->ecntrl);
  1103. if (speed == SPEED_100)
  1104. tmp |= DTSEC_ECNTRL_R100M;
  1105. else
  1106. tmp &= ~DTSEC_ECNTRL_R100M;
  1107. iowrite32be(tmp, &regs->ecntrl);
  1108. graceful_start(dtsec, mode);
  1109. return 0;
  1110. }
  1111. int dtsec_restart_autoneg(struct fman_mac *dtsec)
  1112. {
  1113. u16 tmp_reg16;
  1114. if (!is_init_done(dtsec->dtsec_drv_param))
  1115. return -EINVAL;
  1116. tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
  1117. tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
  1118. tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
  1119. BMCR_FULLDPLX | BMCR_SPEED1000);
  1120. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1121. return 0;
  1122. }
  1123. int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
  1124. {
  1125. struct dtsec_regs __iomem *regs = dtsec->regs;
  1126. if (!is_init_done(dtsec->dtsec_drv_param))
  1127. return -EINVAL;
  1128. *mac_version = ioread32be(&regs->tsec_id);
  1129. return 0;
  1130. }
  1131. int dtsec_set_exception(struct fman_mac *dtsec,
  1132. enum fman_mac_exceptions exception, bool enable)
  1133. {
  1134. struct dtsec_regs __iomem *regs = dtsec->regs;
  1135. u32 bit_mask = 0;
  1136. if (!is_init_done(dtsec->dtsec_drv_param))
  1137. return -EINVAL;
  1138. if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
  1139. bit_mask = get_exception_flag(exception);
  1140. if (bit_mask) {
  1141. if (enable)
  1142. dtsec->exceptions |= bit_mask;
  1143. else
  1144. dtsec->exceptions &= ~bit_mask;
  1145. } else {
  1146. pr_err("Undefined exception\n");
  1147. return -EINVAL;
  1148. }
  1149. if (enable)
  1150. iowrite32be(ioread32be(&regs->imask) | bit_mask,
  1151. &regs->imask);
  1152. else
  1153. iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
  1154. &regs->imask);
  1155. } else {
  1156. if (!dtsec->ptp_tsu_enabled) {
  1157. pr_err("Exception valid for 1588 only\n");
  1158. return -EINVAL;
  1159. }
  1160. switch (exception) {
  1161. case FM_MAC_EX_1G_1588_TS_RX_ERR:
  1162. if (enable) {
  1163. dtsec->en_tsu_err_exception = true;
  1164. iowrite32be(ioread32be(&regs->tmr_pemask) |
  1165. TMR_PEMASK_TSREEN,
  1166. &regs->tmr_pemask);
  1167. } else {
  1168. dtsec->en_tsu_err_exception = false;
  1169. iowrite32be(ioread32be(&regs->tmr_pemask) &
  1170. ~TMR_PEMASK_TSREEN,
  1171. &regs->tmr_pemask);
  1172. }
  1173. break;
  1174. default:
  1175. pr_err("Undefined exception\n");
  1176. return -EINVAL;
  1177. }
  1178. }
  1179. return 0;
  1180. }
  1181. int dtsec_init(struct fman_mac *dtsec)
  1182. {
  1183. struct dtsec_regs __iomem *regs = dtsec->regs;
  1184. struct dtsec_cfg *dtsec_drv_param;
  1185. int err;
  1186. u16 max_frm_ln;
  1187. enet_addr_t eth_addr;
  1188. if (is_init_done(dtsec->dtsec_drv_param))
  1189. return -EINVAL;
  1190. if (DEFAULT_RESET_ON_INIT &&
  1191. (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
  1192. pr_err("Can't reset MAC!\n");
  1193. return -EINVAL;
  1194. }
  1195. err = check_init_parameters(dtsec);
  1196. if (err)
  1197. return err;
  1198. dtsec_drv_param = dtsec->dtsec_drv_param;
  1199. MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
  1200. err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
  1201. dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
  1202. dtsec->tbiphy->mdio.addr);
  1203. if (err) {
  1204. free_init_resources(dtsec);
  1205. pr_err("DTSEC version doesn't support this i/f mode\n");
  1206. return err;
  1207. }
  1208. if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
  1209. u16 tmp_reg16;
  1210. /* Configure the TBI PHY Control Register */
  1211. tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
  1212. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1213. tmp_reg16 = TBICON_CLK_SELECT;
  1214. phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  1215. tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
  1216. BMCR_FULLDPLX | BMCR_SPEED1000);
  1217. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1218. if (dtsec->basex_if)
  1219. tmp_reg16 = TBIANA_1000X;
  1220. else
  1221. tmp_reg16 = TBIANA_SGMII;
  1222. phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
  1223. tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
  1224. BMCR_FULLDPLX | BMCR_SPEED1000);
  1225. phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  1226. }
  1227. /* Max Frame Length */
  1228. max_frm_ln = (u16)ioread32be(&regs->maxfrm);
  1229. err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
  1230. if (err) {
  1231. pr_err("Setting max frame length failed\n");
  1232. free_init_resources(dtsec);
  1233. return -EINVAL;
  1234. }
  1235. dtsec->multicast_addr_hash =
  1236. alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
  1237. if (!dtsec->multicast_addr_hash) {
  1238. free_init_resources(dtsec);
  1239. pr_err("MC hash table is failed\n");
  1240. return -ENOMEM;
  1241. }
  1242. dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
  1243. if (!dtsec->unicast_addr_hash) {
  1244. free_init_resources(dtsec);
  1245. pr_err("UC hash table is failed\n");
  1246. return -ENOMEM;
  1247. }
  1248. /* register err intr handler for dtsec to FPM (err) */
  1249. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1250. FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
  1251. /* register 1588 intr handler for TMR to FPM (normal) */
  1252. fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
  1253. FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
  1254. kfree(dtsec_drv_param);
  1255. dtsec->dtsec_drv_param = NULL;
  1256. return 0;
  1257. }
  1258. int dtsec_free(struct fman_mac *dtsec)
  1259. {
  1260. free_init_resources(dtsec);
  1261. kfree(dtsec->dtsec_drv_param);
  1262. dtsec->dtsec_drv_param = NULL;
  1263. kfree(dtsec);
  1264. return 0;
  1265. }
  1266. struct fman_mac *dtsec_config(struct fman_mac_params *params)
  1267. {
  1268. struct fman_mac *dtsec;
  1269. struct dtsec_cfg *dtsec_drv_param;
  1270. void __iomem *base_addr;
  1271. base_addr = params->base_addr;
  1272. /* allocate memory for the UCC GETH data structure. */
  1273. dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
  1274. if (!dtsec)
  1275. return NULL;
  1276. /* allocate memory for the d_tsec driver parameters data structure. */
  1277. dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
  1278. if (!dtsec_drv_param)
  1279. goto err_dtsec;
  1280. /* Plant parameter structure pointer */
  1281. dtsec->dtsec_drv_param = dtsec_drv_param;
  1282. set_dflts(dtsec_drv_param);
  1283. dtsec->regs = base_addr;
  1284. dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
  1285. dtsec->max_speed = params->max_speed;
  1286. dtsec->phy_if = params->phy_if;
  1287. dtsec->mac_id = params->mac_id;
  1288. dtsec->exceptions = (DTSEC_IMASK_BREN |
  1289. DTSEC_IMASK_RXCEN |
  1290. DTSEC_IMASK_BTEN |
  1291. DTSEC_IMASK_TXCEN |
  1292. DTSEC_IMASK_TXEEN |
  1293. DTSEC_IMASK_ABRTEN |
  1294. DTSEC_IMASK_LCEN |
  1295. DTSEC_IMASK_CRLEN |
  1296. DTSEC_IMASK_XFUNEN |
  1297. DTSEC_IMASK_IFERREN |
  1298. DTSEC_IMASK_MAGEN |
  1299. DTSEC_IMASK_TDPEEN |
  1300. DTSEC_IMASK_RDPEEN);
  1301. dtsec->exception_cb = params->exception_cb;
  1302. dtsec->event_cb = params->event_cb;
  1303. dtsec->dev_id = params->dev_id;
  1304. dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
  1305. dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
  1306. dtsec->fm = params->fm;
  1307. dtsec->basex_if = params->basex_if;
  1308. if (!params->internal_phy_node) {
  1309. pr_err("TBI PHY node is not available\n");
  1310. goto err_dtsec_drv_param;
  1311. }
  1312. dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
  1313. if (!dtsec->tbiphy) {
  1314. pr_err("of_phy_find_device (TBI PHY) failed\n");
  1315. goto err_dtsec_drv_param;
  1316. }
  1317. put_device(&dtsec->tbiphy->mdio.dev);
  1318. /* Save FMan revision */
  1319. fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
  1320. return dtsec;
  1321. err_dtsec_drv_param:
  1322. kfree(dtsec_drv_param);
  1323. err_dtsec:
  1324. kfree(dtsec);
  1325. return NULL;
  1326. }