xilinx_can.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Xilinx CAN device driver
  3. *
  4. * Copyright (C) 2012 - 2014 Xilinx, Inc.
  5. * Copyright (C) 2009 PetaLogix. All rights reserved.
  6. * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
  7. *
  8. * Description:
  9. * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/errno.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/of.h>
  20. #include <linux/of_device.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/string.h>
  25. #include <linux/types.h>
  26. #include <linux/can/dev.h>
  27. #include <linux/can/error.h>
  28. #include <linux/can/led.h>
  29. #include <linux/pm_runtime.h>
  30. #define DRIVER_NAME "xilinx_can"
  31. /* CAN registers set */
  32. enum xcan_reg {
  33. XCAN_SRR_OFFSET = 0x00, /* Software reset */
  34. XCAN_MSR_OFFSET = 0x04, /* Mode select */
  35. XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
  36. XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
  37. XCAN_ECR_OFFSET = 0x10, /* Error counter */
  38. XCAN_ESR_OFFSET = 0x14, /* Error status */
  39. XCAN_SR_OFFSET = 0x18, /* Status */
  40. XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
  41. XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
  42. XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
  43. /* not on CAN FD cores */
  44. XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
  45. XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
  46. XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
  47. /* only on CAN FD cores */
  48. XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
  49. * Prescalar
  50. */
  51. XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
  52. XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
  53. XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
  54. XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
  55. XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
  56. XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
  57. XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
  58. XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
  59. XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
  60. };
  61. #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
  62. #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
  63. #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
  64. #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
  65. #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
  66. #define XCAN_CANFD_FRAME_SIZE 0x48
  67. #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
  68. XCAN_CANFD_FRAME_SIZE * (n))
  69. #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
  70. XCAN_CANFD_FRAME_SIZE * (n))
  71. #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
  72. XCAN_CANFD_FRAME_SIZE * (n))
  73. /* the single TX mailbox used by this driver on CAN FD HW */
  74. #define XCAN_TX_MAILBOX_IDX 0
  75. /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
  76. #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
  77. #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
  78. #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
  79. #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
  80. #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
  81. #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
  82. #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
  83. #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
  84. #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
  85. #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
  86. #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
  87. #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
  88. #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
  89. #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
  90. #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
  91. #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
  92. #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
  93. #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
  94. #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
  95. #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
  96. #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
  97. #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
  98. #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
  99. #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
  100. #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
  101. #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
  102. #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
  103. #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
  104. #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
  105. #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
  106. #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
  107. #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
  108. #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
  109. #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
  110. #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
  111. #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
  112. #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
  113. #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
  114. #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
  115. #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
  116. #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
  117. #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
  118. #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
  119. #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
  120. #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
  121. #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
  122. #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
  123. #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
  124. #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
  125. /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
  126. #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
  127. #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
  128. #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
  129. #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
  130. #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
  131. #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
  132. #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
  133. #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
  134. /* CAN frame length constants */
  135. #define XCAN_FRAME_MAX_DATA_LEN 8
  136. #define XCANFD_DW_BYTES 4
  137. #define XCAN_TIMEOUT (1 * HZ)
  138. /* TX-FIFO-empty interrupt available */
  139. #define XCAN_FLAG_TXFEMP 0x0001
  140. /* RX Match Not Finished interrupt available */
  141. #define XCAN_FLAG_RXMNF 0x0002
  142. /* Extended acceptance filters with control at 0xE0 */
  143. #define XCAN_FLAG_EXT_FILTERS 0x0004
  144. /* TX mailboxes instead of TX FIFO */
  145. #define XCAN_FLAG_TX_MAILBOXES 0x0008
  146. /* RX FIFO with each buffer in separate registers at 0x1100
  147. * instead of the regular FIFO at 0x50
  148. */
  149. #define XCAN_FLAG_RX_FIFO_MULTI 0x0010
  150. #define XCAN_FLAG_CANFD_2 0x0020
  151. enum xcan_ip_type {
  152. XAXI_CAN = 0,
  153. XZYNQ_CANPS,
  154. XAXI_CANFD,
  155. XAXI_CANFD_2_0,
  156. };
  157. struct xcan_devtype_data {
  158. enum xcan_ip_type cantype;
  159. unsigned int flags;
  160. const struct can_bittiming_const *bittiming_const;
  161. const char *bus_clk_name;
  162. unsigned int btr_ts2_shift;
  163. unsigned int btr_sjw_shift;
  164. };
  165. /**
  166. * struct xcan_priv - This definition define CAN driver instance
  167. * @can: CAN private data structure.
  168. * @tx_lock: Lock for synchronizing TX interrupt handling
  169. * @tx_head: Tx CAN packets ready to send on the queue
  170. * @tx_tail: Tx CAN packets successfully sended on the queue
  171. * @tx_max: Maximum number packets the driver can send
  172. * @napi: NAPI structure
  173. * @read_reg: For reading data from CAN registers
  174. * @write_reg: For writing data to CAN registers
  175. * @dev: Network device data structure
  176. * @reg_base: Ioremapped address to registers
  177. * @irq_flags: For request_irq()
  178. * @bus_clk: Pointer to struct clk
  179. * @can_clk: Pointer to struct clk
  180. * @devtype: Device type specific constants
  181. */
  182. struct xcan_priv {
  183. struct can_priv can;
  184. spinlock_t tx_lock;
  185. unsigned int tx_head;
  186. unsigned int tx_tail;
  187. unsigned int tx_max;
  188. struct napi_struct napi;
  189. u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
  190. void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
  191. u32 val);
  192. struct device *dev;
  193. void __iomem *reg_base;
  194. unsigned long irq_flags;
  195. struct clk *bus_clk;
  196. struct clk *can_clk;
  197. struct xcan_devtype_data devtype;
  198. };
  199. /* CAN Bittiming constants as per Xilinx CAN specs */
  200. static const struct can_bittiming_const xcan_bittiming_const = {
  201. .name = DRIVER_NAME,
  202. .tseg1_min = 1,
  203. .tseg1_max = 16,
  204. .tseg2_min = 1,
  205. .tseg2_max = 8,
  206. .sjw_max = 4,
  207. .brp_min = 1,
  208. .brp_max = 256,
  209. .brp_inc = 1,
  210. };
  211. /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
  212. static const struct can_bittiming_const xcan_bittiming_const_canfd = {
  213. .name = DRIVER_NAME,
  214. .tseg1_min = 1,
  215. .tseg1_max = 64,
  216. .tseg2_min = 1,
  217. .tseg2_max = 16,
  218. .sjw_max = 16,
  219. .brp_min = 1,
  220. .brp_max = 256,
  221. .brp_inc = 1,
  222. };
  223. /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
  224. static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
  225. .name = DRIVER_NAME,
  226. .tseg1_min = 1,
  227. .tseg1_max = 16,
  228. .tseg2_min = 1,
  229. .tseg2_max = 8,
  230. .sjw_max = 8,
  231. .brp_min = 1,
  232. .brp_max = 256,
  233. .brp_inc = 1,
  234. };
  235. /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
  236. static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
  237. .name = DRIVER_NAME,
  238. .tseg1_min = 1,
  239. .tseg1_max = 256,
  240. .tseg2_min = 1,
  241. .tseg2_max = 128,
  242. .sjw_max = 128,
  243. .brp_min = 1,
  244. .brp_max = 256,
  245. .brp_inc = 1,
  246. };
  247. /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
  248. static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
  249. .name = DRIVER_NAME,
  250. .tseg1_min = 1,
  251. .tseg1_max = 32,
  252. .tseg2_min = 1,
  253. .tseg2_max = 16,
  254. .sjw_max = 16,
  255. .brp_min = 1,
  256. .brp_max = 256,
  257. .brp_inc = 1,
  258. };
  259. /**
  260. * xcan_write_reg_le - Write a value to the device register little endian
  261. * @priv: Driver private data structure
  262. * @reg: Register offset
  263. * @val: Value to write at the Register offset
  264. *
  265. * Write data to the paricular CAN register
  266. */
  267. static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
  268. u32 val)
  269. {
  270. iowrite32(val, priv->reg_base + reg);
  271. }
  272. /**
  273. * xcan_read_reg_le - Read a value from the device register little endian
  274. * @priv: Driver private data structure
  275. * @reg: Register offset
  276. *
  277. * Read data from the particular CAN register
  278. * Return: value read from the CAN register
  279. */
  280. static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
  281. {
  282. return ioread32(priv->reg_base + reg);
  283. }
  284. /**
  285. * xcan_write_reg_be - Write a value to the device register big endian
  286. * @priv: Driver private data structure
  287. * @reg: Register offset
  288. * @val: Value to write at the Register offset
  289. *
  290. * Write data to the paricular CAN register
  291. */
  292. static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
  293. u32 val)
  294. {
  295. iowrite32be(val, priv->reg_base + reg);
  296. }
  297. /**
  298. * xcan_read_reg_be - Read a value from the device register big endian
  299. * @priv: Driver private data structure
  300. * @reg: Register offset
  301. *
  302. * Read data from the particular CAN register
  303. * Return: value read from the CAN register
  304. */
  305. static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
  306. {
  307. return ioread32be(priv->reg_base + reg);
  308. }
  309. /**
  310. * xcan_rx_int_mask - Get the mask for the receive interrupt
  311. * @priv: Driver private data structure
  312. *
  313. * Return: The receive interrupt mask used by the driver on this HW
  314. */
  315. static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
  316. {
  317. /* RXNEMP is better suited for our use case as it cannot be cleared
  318. * while the FIFO is non-empty, but CAN FD HW does not have it
  319. */
  320. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
  321. return XCAN_IXR_RXOK_MASK;
  322. else
  323. return XCAN_IXR_RXNEMP_MASK;
  324. }
  325. /**
  326. * set_reset_mode - Resets the CAN device mode
  327. * @ndev: Pointer to net_device structure
  328. *
  329. * This is the driver reset mode routine.The driver
  330. * enters into configuration mode.
  331. *
  332. * Return: 0 on success and failure value on error
  333. */
  334. static int set_reset_mode(struct net_device *ndev)
  335. {
  336. struct xcan_priv *priv = netdev_priv(ndev);
  337. unsigned long timeout;
  338. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  339. timeout = jiffies + XCAN_TIMEOUT;
  340. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
  341. if (time_after(jiffies, timeout)) {
  342. netdev_warn(ndev, "timed out for config mode\n");
  343. return -ETIMEDOUT;
  344. }
  345. usleep_range(500, 10000);
  346. }
  347. /* reset clears FIFOs */
  348. priv->tx_head = 0;
  349. priv->tx_tail = 0;
  350. return 0;
  351. }
  352. /**
  353. * xcan_set_bittiming - CAN set bit timing routine
  354. * @ndev: Pointer to net_device structure
  355. *
  356. * This is the driver set bittiming routine.
  357. * Return: 0 on success and failure value on error
  358. */
  359. static int xcan_set_bittiming(struct net_device *ndev)
  360. {
  361. struct xcan_priv *priv = netdev_priv(ndev);
  362. struct can_bittiming *bt = &priv->can.bittiming;
  363. struct can_bittiming *dbt = &priv->can.data_bittiming;
  364. u32 btr0, btr1;
  365. u32 is_config_mode;
  366. /* Check whether Xilinx CAN is in configuration mode.
  367. * It cannot set bit timing if Xilinx CAN is not in configuration mode.
  368. */
  369. is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
  370. XCAN_SR_CONFIG_MASK;
  371. if (!is_config_mode) {
  372. netdev_alert(ndev,
  373. "BUG! Cannot set bittiming - CAN is not in config mode\n");
  374. return -EPERM;
  375. }
  376. /* Setting Baud Rate prescalar value in BRPR Register */
  377. btr0 = (bt->brp - 1);
  378. /* Setting Time Segment 1 in BTR Register */
  379. btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
  380. /* Setting Time Segment 2 in BTR Register */
  381. btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
  382. /* Setting Synchronous jump width in BTR Register */
  383. btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
  384. priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
  385. priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
  386. if (priv->devtype.cantype == XAXI_CANFD ||
  387. priv->devtype.cantype == XAXI_CANFD_2_0) {
  388. /* Setting Baud Rate prescalar value in F_BRPR Register */
  389. btr0 = dbt->brp - 1;
  390. /* Setting Time Segment 1 in BTR Register */
  391. btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
  392. /* Setting Time Segment 2 in BTR Register */
  393. btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
  394. /* Setting Synchronous jump width in BTR Register */
  395. btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
  396. priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
  397. priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
  398. }
  399. netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
  400. priv->read_reg(priv, XCAN_BRPR_OFFSET),
  401. priv->read_reg(priv, XCAN_BTR_OFFSET));
  402. return 0;
  403. }
  404. /**
  405. * xcan_chip_start - This the drivers start routine
  406. * @ndev: Pointer to net_device structure
  407. *
  408. * This is the drivers start routine.
  409. * Based on the State of the CAN device it puts
  410. * the CAN device into a proper mode.
  411. *
  412. * Return: 0 on success and failure value on error
  413. */
  414. static int xcan_chip_start(struct net_device *ndev)
  415. {
  416. struct xcan_priv *priv = netdev_priv(ndev);
  417. u32 reg_msr;
  418. int err;
  419. u32 ier;
  420. /* Check if it is in reset mode */
  421. err = set_reset_mode(ndev);
  422. if (err < 0)
  423. return err;
  424. err = xcan_set_bittiming(ndev);
  425. if (err < 0)
  426. return err;
  427. /* Enable interrupts */
  428. ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
  429. XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
  430. XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  431. XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
  432. if (priv->devtype.flags & XCAN_FLAG_RXMNF)
  433. ier |= XCAN_IXR_RXMNF_MASK;
  434. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  435. /* Check whether it is loopback mode or normal mode */
  436. if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
  437. reg_msr = XCAN_MSR_LBACK_MASK;
  438. } else {
  439. reg_msr = 0x0;
  440. }
  441. /* enable the first extended filter, if any, as cores with extended
  442. * filtering default to non-receipt if all filters are disabled
  443. */
  444. if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
  445. priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
  446. priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
  447. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
  448. netdev_dbg(ndev, "status:#x%08x\n",
  449. priv->read_reg(priv, XCAN_SR_OFFSET));
  450. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  451. return 0;
  452. }
  453. /**
  454. * xcan_do_set_mode - This sets the mode of the driver
  455. * @ndev: Pointer to net_device structure
  456. * @mode: Tells the mode of the driver
  457. *
  458. * This check the drivers state and calls the
  459. * the corresponding modes to set.
  460. *
  461. * Return: 0 on success and failure value on error
  462. */
  463. static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
  464. {
  465. int ret;
  466. switch (mode) {
  467. case CAN_MODE_START:
  468. ret = xcan_chip_start(ndev);
  469. if (ret < 0) {
  470. netdev_err(ndev, "xcan_chip_start failed!\n");
  471. return ret;
  472. }
  473. netif_wake_queue(ndev);
  474. break;
  475. default:
  476. ret = -EOPNOTSUPP;
  477. break;
  478. }
  479. return ret;
  480. }
  481. /**
  482. * xcan_write_frame - Write a frame to HW
  483. * @priv: Driver private data structure
  484. * @skb: sk_buff pointer that contains data to be Txed
  485. * @frame_offset: Register offset to write the frame to
  486. */
  487. static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
  488. int frame_offset)
  489. {
  490. u32 id, dlc, data[2] = {0, 0};
  491. struct canfd_frame *cf = (struct canfd_frame *)skb->data;
  492. u32 ramoff, dwindex = 0, i;
  493. /* Watch carefully on the bit sequence */
  494. if (cf->can_id & CAN_EFF_FLAG) {
  495. /* Extended CAN ID format */
  496. id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
  497. XCAN_IDR_ID2_MASK;
  498. id |= (((cf->can_id & CAN_EFF_MASK) >>
  499. (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
  500. XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
  501. /* The substibute remote TX request bit should be "1"
  502. * for extended frames as in the Xilinx CAN datasheet
  503. */
  504. id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
  505. if (cf->can_id & CAN_RTR_FLAG)
  506. /* Extended frames remote TX request */
  507. id |= XCAN_IDR_RTR_MASK;
  508. } else {
  509. /* Standard CAN ID format */
  510. id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
  511. XCAN_IDR_ID1_MASK;
  512. if (cf->can_id & CAN_RTR_FLAG)
  513. /* Standard frames remote TX request */
  514. id |= XCAN_IDR_SRR_MASK;
  515. }
  516. dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
  517. if (can_is_canfd_skb(skb)) {
  518. if (cf->flags & CANFD_BRS)
  519. dlc |= XCAN_DLCR_BRS_MASK;
  520. dlc |= XCAN_DLCR_EDL_MASK;
  521. }
  522. priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
  523. /* If the CAN frame is RTR frame this write triggers transmission
  524. * (not on CAN FD)
  525. */
  526. priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
  527. if (priv->devtype.cantype == XAXI_CANFD ||
  528. priv->devtype.cantype == XAXI_CANFD_2_0) {
  529. for (i = 0; i < cf->len; i += 4) {
  530. ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
  531. (dwindex * XCANFD_DW_BYTES);
  532. priv->write_reg(priv, ramoff,
  533. be32_to_cpup((__be32 *)(cf->data + i)));
  534. dwindex++;
  535. }
  536. } else {
  537. if (cf->len > 0)
  538. data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
  539. if (cf->len > 4)
  540. data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
  541. if (!(cf->can_id & CAN_RTR_FLAG)) {
  542. priv->write_reg(priv,
  543. XCAN_FRAME_DW1_OFFSET(frame_offset),
  544. data[0]);
  545. /* If the CAN frame is Standard/Extended frame this
  546. * write triggers transmission (not on CAN FD)
  547. */
  548. priv->write_reg(priv,
  549. XCAN_FRAME_DW2_OFFSET(frame_offset),
  550. data[1]);
  551. }
  552. }
  553. }
  554. /**
  555. * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
  556. * @skb: sk_buff pointer that contains data to be Txed
  557. * @ndev: Pointer to net_device structure
  558. *
  559. * Return: 0 on success, -ENOSPC if FIFO is full.
  560. */
  561. static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
  562. {
  563. struct xcan_priv *priv = netdev_priv(ndev);
  564. unsigned long flags;
  565. /* Check if the TX buffer is full */
  566. if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
  567. XCAN_SR_TXFLL_MASK))
  568. return -ENOSPC;
  569. can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
  570. spin_lock_irqsave(&priv->tx_lock, flags);
  571. priv->tx_head++;
  572. xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
  573. /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
  574. if (priv->tx_max > 1)
  575. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
  576. /* Check if the TX buffer is full */
  577. if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
  578. netif_stop_queue(ndev);
  579. spin_unlock_irqrestore(&priv->tx_lock, flags);
  580. return 0;
  581. }
  582. /**
  583. * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
  584. * @skb: sk_buff pointer that contains data to be Txed
  585. * @ndev: Pointer to net_device structure
  586. *
  587. * Return: 0 on success, -ENOSPC if there is no space
  588. */
  589. static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
  590. {
  591. struct xcan_priv *priv = netdev_priv(ndev);
  592. unsigned long flags;
  593. if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
  594. BIT(XCAN_TX_MAILBOX_IDX)))
  595. return -ENOSPC;
  596. can_put_echo_skb(skb, ndev, 0);
  597. spin_lock_irqsave(&priv->tx_lock, flags);
  598. priv->tx_head++;
  599. xcan_write_frame(priv, skb,
  600. XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
  601. /* Mark buffer as ready for transmit */
  602. priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
  603. netif_stop_queue(ndev);
  604. spin_unlock_irqrestore(&priv->tx_lock, flags);
  605. return 0;
  606. }
  607. /**
  608. * xcan_start_xmit - Starts the transmission
  609. * @skb: sk_buff pointer that contains data to be Txed
  610. * @ndev: Pointer to net_device structure
  611. *
  612. * This function is invoked from upper layers to initiate transmission.
  613. *
  614. * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
  615. */
  616. static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  617. {
  618. struct xcan_priv *priv = netdev_priv(ndev);
  619. int ret;
  620. if (can_dropped_invalid_skb(ndev, skb))
  621. return NETDEV_TX_OK;
  622. if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
  623. ret = xcan_start_xmit_mailbox(skb, ndev);
  624. else
  625. ret = xcan_start_xmit_fifo(skb, ndev);
  626. if (ret < 0) {
  627. netdev_err(ndev, "BUG!, TX full when queue awake!\n");
  628. netif_stop_queue(ndev);
  629. return NETDEV_TX_BUSY;
  630. }
  631. return NETDEV_TX_OK;
  632. }
  633. /**
  634. * xcan_rx - Is called from CAN isr to complete the received
  635. * frame processing
  636. * @ndev: Pointer to net_device structure
  637. * @frame_base: Register offset to the frame to be read
  638. *
  639. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  640. * does minimal processing and invokes "netif_receive_skb" to complete further
  641. * processing.
  642. * Return: 1 on success and 0 on failure.
  643. */
  644. static int xcan_rx(struct net_device *ndev, int frame_base)
  645. {
  646. struct xcan_priv *priv = netdev_priv(ndev);
  647. struct net_device_stats *stats = &ndev->stats;
  648. struct can_frame *cf;
  649. struct sk_buff *skb;
  650. u32 id_xcan, dlc, data[2] = {0, 0};
  651. skb = alloc_can_skb(ndev, &cf);
  652. if (unlikely(!skb)) {
  653. stats->rx_dropped++;
  654. return 0;
  655. }
  656. /* Read a frame from Xilinx zynq CANPS */
  657. id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
  658. dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
  659. XCAN_DLCR_DLC_SHIFT;
  660. /* Change Xilinx CAN data length format to socketCAN data format */
  661. cf->can_dlc = get_can_dlc(dlc);
  662. /* Change Xilinx CAN ID format to socketCAN ID format */
  663. if (id_xcan & XCAN_IDR_IDE_MASK) {
  664. /* The received frame is an Extended format frame */
  665. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  666. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  667. XCAN_IDR_ID2_SHIFT;
  668. cf->can_id |= CAN_EFF_FLAG;
  669. if (id_xcan & XCAN_IDR_RTR_MASK)
  670. cf->can_id |= CAN_RTR_FLAG;
  671. } else {
  672. /* The received frame is a standard format frame */
  673. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  674. XCAN_IDR_ID1_SHIFT;
  675. if (id_xcan & XCAN_IDR_SRR_MASK)
  676. cf->can_id |= CAN_RTR_FLAG;
  677. }
  678. /* DW1/DW2 must always be read to remove message from RXFIFO */
  679. data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
  680. data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
  681. if (!(cf->can_id & CAN_RTR_FLAG)) {
  682. /* Change Xilinx CAN data format to socketCAN data format */
  683. if (cf->can_dlc > 0)
  684. *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
  685. if (cf->can_dlc > 4)
  686. *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
  687. }
  688. stats->rx_bytes += cf->can_dlc;
  689. stats->rx_packets++;
  690. netif_receive_skb(skb);
  691. return 1;
  692. }
  693. /**
  694. * xcanfd_rx - Is called from CAN isr to complete the received
  695. * frame processing
  696. * @ndev: Pointer to net_device structure
  697. * @frame_base: Register offset to the frame to be read
  698. *
  699. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  700. * does minimal processing and invokes "netif_receive_skb" to complete further
  701. * processing.
  702. * Return: 1 on success and 0 on failure.
  703. */
  704. static int xcanfd_rx(struct net_device *ndev, int frame_base)
  705. {
  706. struct xcan_priv *priv = netdev_priv(ndev);
  707. struct net_device_stats *stats = &ndev->stats;
  708. struct canfd_frame *cf;
  709. struct sk_buff *skb;
  710. u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
  711. id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
  712. dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
  713. if (dlc & XCAN_DLCR_EDL_MASK)
  714. skb = alloc_canfd_skb(ndev, &cf);
  715. else
  716. skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
  717. if (unlikely(!skb)) {
  718. stats->rx_dropped++;
  719. return 0;
  720. }
  721. /* Change Xilinx CANFD data length format to socketCAN data
  722. * format
  723. */
  724. if (dlc & XCAN_DLCR_EDL_MASK)
  725. cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
  726. XCAN_DLCR_DLC_SHIFT);
  727. else
  728. cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
  729. XCAN_DLCR_DLC_SHIFT);
  730. /* Change Xilinx CAN ID format to socketCAN ID format */
  731. if (id_xcan & XCAN_IDR_IDE_MASK) {
  732. /* The received frame is an Extended format frame */
  733. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  734. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  735. XCAN_IDR_ID2_SHIFT;
  736. cf->can_id |= CAN_EFF_FLAG;
  737. if (id_xcan & XCAN_IDR_RTR_MASK)
  738. cf->can_id |= CAN_RTR_FLAG;
  739. } else {
  740. /* The received frame is a standard format frame */
  741. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  742. XCAN_IDR_ID1_SHIFT;
  743. if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
  744. XCAN_IDR_SRR_MASK))
  745. cf->can_id |= CAN_RTR_FLAG;
  746. }
  747. /* Check the frame received is FD or not*/
  748. if (dlc & XCAN_DLCR_EDL_MASK) {
  749. for (i = 0; i < cf->len; i += 4) {
  750. dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
  751. (dwindex * XCANFD_DW_BYTES);
  752. data[0] = priv->read_reg(priv, dw_offset);
  753. *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
  754. dwindex++;
  755. }
  756. } else {
  757. for (i = 0; i < cf->len; i += 4) {
  758. dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
  759. data[0] = priv->read_reg(priv, dw_offset + i);
  760. *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
  761. }
  762. }
  763. stats->rx_bytes += cf->len;
  764. stats->rx_packets++;
  765. netif_receive_skb(skb);
  766. return 1;
  767. }
  768. /**
  769. * xcan_current_error_state - Get current error state from HW
  770. * @ndev: Pointer to net_device structure
  771. *
  772. * Checks the current CAN error state from the HW. Note that this
  773. * only checks for ERROR_PASSIVE and ERROR_WARNING.
  774. *
  775. * Return:
  776. * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
  777. * otherwise.
  778. */
  779. static enum can_state xcan_current_error_state(struct net_device *ndev)
  780. {
  781. struct xcan_priv *priv = netdev_priv(ndev);
  782. u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
  783. if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
  784. return CAN_STATE_ERROR_PASSIVE;
  785. else if (status & XCAN_SR_ERRWRN_MASK)
  786. return CAN_STATE_ERROR_WARNING;
  787. else
  788. return CAN_STATE_ERROR_ACTIVE;
  789. }
  790. /**
  791. * xcan_set_error_state - Set new CAN error state
  792. * @ndev: Pointer to net_device structure
  793. * @new_state: The new CAN state to be set
  794. * @cf: Error frame to be populated or NULL
  795. *
  796. * Set new CAN error state for the device, updating statistics and
  797. * populating the error frame if given.
  798. */
  799. static void xcan_set_error_state(struct net_device *ndev,
  800. enum can_state new_state,
  801. struct can_frame *cf)
  802. {
  803. struct xcan_priv *priv = netdev_priv(ndev);
  804. u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
  805. u32 txerr = ecr & XCAN_ECR_TEC_MASK;
  806. u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
  807. enum can_state tx_state = txerr >= rxerr ? new_state : 0;
  808. enum can_state rx_state = txerr <= rxerr ? new_state : 0;
  809. /* non-ERROR states are handled elsewhere */
  810. if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
  811. return;
  812. can_change_state(ndev, cf, tx_state, rx_state);
  813. if (cf) {
  814. cf->data[6] = txerr;
  815. cf->data[7] = rxerr;
  816. }
  817. }
  818. /**
  819. * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
  820. * @ndev: Pointer to net_device structure
  821. *
  822. * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
  823. * the performed RX/TX has caused it to drop to a lesser state and set
  824. * the interface state accordingly.
  825. */
  826. static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
  827. {
  828. struct xcan_priv *priv = netdev_priv(ndev);
  829. enum can_state old_state = priv->can.state;
  830. enum can_state new_state;
  831. /* changing error state due to successful frame RX/TX can only
  832. * occur from these states
  833. */
  834. if (old_state != CAN_STATE_ERROR_WARNING &&
  835. old_state != CAN_STATE_ERROR_PASSIVE)
  836. return;
  837. new_state = xcan_current_error_state(ndev);
  838. if (new_state != old_state) {
  839. struct sk_buff *skb;
  840. struct can_frame *cf;
  841. skb = alloc_can_err_skb(ndev, &cf);
  842. xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
  843. if (skb) {
  844. struct net_device_stats *stats = &ndev->stats;
  845. stats->rx_packets++;
  846. stats->rx_bytes += cf->can_dlc;
  847. netif_rx(skb);
  848. }
  849. }
  850. }
  851. /**
  852. * xcan_err_interrupt - error frame Isr
  853. * @ndev: net_device pointer
  854. * @isr: interrupt status register value
  855. *
  856. * This is the CAN error interrupt and it will
  857. * check the the type of error and forward the error
  858. * frame to upper layers.
  859. */
  860. static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
  861. {
  862. struct xcan_priv *priv = netdev_priv(ndev);
  863. struct net_device_stats *stats = &ndev->stats;
  864. struct can_frame *cf;
  865. struct sk_buff *skb;
  866. u32 err_status;
  867. skb = alloc_can_err_skb(ndev, &cf);
  868. err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
  869. priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
  870. if (isr & XCAN_IXR_BSOFF_MASK) {
  871. priv->can.state = CAN_STATE_BUS_OFF;
  872. priv->can.can_stats.bus_off++;
  873. /* Leave device in Config Mode in bus-off state */
  874. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  875. can_bus_off(ndev);
  876. if (skb)
  877. cf->can_id |= CAN_ERR_BUSOFF;
  878. } else {
  879. enum can_state new_state = xcan_current_error_state(ndev);
  880. if (new_state != priv->can.state)
  881. xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
  882. }
  883. /* Check for Arbitration lost interrupt */
  884. if (isr & XCAN_IXR_ARBLST_MASK) {
  885. priv->can.can_stats.arbitration_lost++;
  886. if (skb) {
  887. cf->can_id |= CAN_ERR_LOSTARB;
  888. cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
  889. }
  890. }
  891. /* Check for RX FIFO Overflow interrupt */
  892. if (isr & XCAN_IXR_RXOFLW_MASK) {
  893. stats->rx_over_errors++;
  894. stats->rx_errors++;
  895. if (skb) {
  896. cf->can_id |= CAN_ERR_CRTL;
  897. cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
  898. }
  899. }
  900. /* Check for RX Match Not Finished interrupt */
  901. if (isr & XCAN_IXR_RXMNF_MASK) {
  902. stats->rx_dropped++;
  903. stats->rx_errors++;
  904. netdev_err(ndev, "RX match not finished, frame discarded\n");
  905. if (skb) {
  906. cf->can_id |= CAN_ERR_CRTL;
  907. cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
  908. }
  909. }
  910. /* Check for error interrupt */
  911. if (isr & XCAN_IXR_ERROR_MASK) {
  912. if (skb)
  913. cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  914. /* Check for Ack error interrupt */
  915. if (err_status & XCAN_ESR_ACKER_MASK) {
  916. stats->tx_errors++;
  917. if (skb) {
  918. cf->can_id |= CAN_ERR_ACK;
  919. cf->data[3] = CAN_ERR_PROT_LOC_ACK;
  920. }
  921. }
  922. /* Check for Bit error interrupt */
  923. if (err_status & XCAN_ESR_BERR_MASK) {
  924. stats->tx_errors++;
  925. if (skb) {
  926. cf->can_id |= CAN_ERR_PROT;
  927. cf->data[2] = CAN_ERR_PROT_BIT;
  928. }
  929. }
  930. /* Check for Stuff error interrupt */
  931. if (err_status & XCAN_ESR_STER_MASK) {
  932. stats->rx_errors++;
  933. if (skb) {
  934. cf->can_id |= CAN_ERR_PROT;
  935. cf->data[2] = CAN_ERR_PROT_STUFF;
  936. }
  937. }
  938. /* Check for Form error interrupt */
  939. if (err_status & XCAN_ESR_FMER_MASK) {
  940. stats->rx_errors++;
  941. if (skb) {
  942. cf->can_id |= CAN_ERR_PROT;
  943. cf->data[2] = CAN_ERR_PROT_FORM;
  944. }
  945. }
  946. /* Check for CRC error interrupt */
  947. if (err_status & XCAN_ESR_CRCER_MASK) {
  948. stats->rx_errors++;
  949. if (skb) {
  950. cf->can_id |= CAN_ERR_PROT;
  951. cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
  952. }
  953. }
  954. priv->can.can_stats.bus_error++;
  955. }
  956. if (skb) {
  957. stats->rx_packets++;
  958. stats->rx_bytes += cf->can_dlc;
  959. netif_rx(skb);
  960. }
  961. netdev_dbg(ndev, "%s: error status register:0x%x\n",
  962. __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
  963. }
  964. /**
  965. * xcan_state_interrupt - It will check the state of the CAN device
  966. * @ndev: net_device pointer
  967. * @isr: interrupt status register value
  968. *
  969. * This will checks the state of the CAN device
  970. * and puts the device into appropriate state.
  971. */
  972. static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
  973. {
  974. struct xcan_priv *priv = netdev_priv(ndev);
  975. /* Check for Sleep interrupt if set put CAN device in sleep state */
  976. if (isr & XCAN_IXR_SLP_MASK)
  977. priv->can.state = CAN_STATE_SLEEPING;
  978. /* Check for Wake up interrupt if set put CAN device in Active state */
  979. if (isr & XCAN_IXR_WKUP_MASK)
  980. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  981. }
  982. /**
  983. * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
  984. * @priv: Driver private data structure
  985. *
  986. * Return: Register offset of the next frame in RX FIFO.
  987. */
  988. static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
  989. {
  990. int offset;
  991. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
  992. u32 fsr, mask;
  993. /* clear RXOK before the is-empty check so that any newly
  994. * received frame will reassert it without a race
  995. */
  996. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
  997. fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
  998. /* check if RX FIFO is empty */
  999. if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
  1000. mask = XCAN_2_FSR_FL_MASK;
  1001. else
  1002. mask = XCAN_FSR_FL_MASK;
  1003. if (!(fsr & mask))
  1004. return -ENOENT;
  1005. if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
  1006. offset =
  1007. XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
  1008. else
  1009. offset =
  1010. XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
  1011. } else {
  1012. /* check if RX FIFO is empty */
  1013. if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
  1014. XCAN_IXR_RXNEMP_MASK))
  1015. return -ENOENT;
  1016. /* frames are read from a static offset */
  1017. offset = XCAN_RXFIFO_OFFSET;
  1018. }
  1019. return offset;
  1020. }
  1021. /**
  1022. * xcan_rx_poll - Poll routine for rx packets (NAPI)
  1023. * @napi: napi structure pointer
  1024. * @quota: Max number of rx packets to be processed.
  1025. *
  1026. * This is the poll routine for rx part.
  1027. * It will process the packets maximux quota value.
  1028. *
  1029. * Return: number of packets received
  1030. */
  1031. static int xcan_rx_poll(struct napi_struct *napi, int quota)
  1032. {
  1033. struct net_device *ndev = napi->dev;
  1034. struct xcan_priv *priv = netdev_priv(ndev);
  1035. u32 ier;
  1036. int work_done = 0;
  1037. int frame_offset;
  1038. while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
  1039. (work_done < quota)) {
  1040. if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
  1041. work_done += xcanfd_rx(ndev, frame_offset);
  1042. else
  1043. work_done += xcan_rx(ndev, frame_offset);
  1044. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
  1045. /* increment read index */
  1046. priv->write_reg(priv, XCAN_FSR_OFFSET,
  1047. XCAN_FSR_IRI_MASK);
  1048. else
  1049. /* clear rx-not-empty (will actually clear only if
  1050. * empty)
  1051. */
  1052. priv->write_reg(priv, XCAN_ICR_OFFSET,
  1053. XCAN_IXR_RXNEMP_MASK);
  1054. }
  1055. if (work_done) {
  1056. can_led_event(ndev, CAN_LED_EVENT_RX);
  1057. xcan_update_error_state_after_rxtx(ndev);
  1058. }
  1059. if (work_done < quota) {
  1060. napi_complete_done(napi, work_done);
  1061. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  1062. ier |= xcan_rx_int_mask(priv);
  1063. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  1064. }
  1065. return work_done;
  1066. }
  1067. /**
  1068. * xcan_tx_interrupt - Tx Done Isr
  1069. * @ndev: net_device pointer
  1070. * @isr: Interrupt status register value
  1071. */
  1072. static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
  1073. {
  1074. struct xcan_priv *priv = netdev_priv(ndev);
  1075. struct net_device_stats *stats = &ndev->stats;
  1076. unsigned int frames_in_fifo;
  1077. int frames_sent = 1; /* TXOK => at least 1 frame was sent */
  1078. unsigned long flags;
  1079. int retries = 0;
  1080. /* Synchronize with xmit as we need to know the exact number
  1081. * of frames in the FIFO to stay in sync due to the TXFEMP
  1082. * handling.
  1083. * This also prevents a race between netif_wake_queue() and
  1084. * netif_stop_queue().
  1085. */
  1086. spin_lock_irqsave(&priv->tx_lock, flags);
  1087. frames_in_fifo = priv->tx_head - priv->tx_tail;
  1088. if (WARN_ON_ONCE(frames_in_fifo == 0)) {
  1089. /* clear TXOK anyway to avoid getting back here */
  1090. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  1091. spin_unlock_irqrestore(&priv->tx_lock, flags);
  1092. return;
  1093. }
  1094. /* Check if 2 frames were sent (TXOK only means that at least 1
  1095. * frame was sent).
  1096. */
  1097. if (frames_in_fifo > 1) {
  1098. WARN_ON(frames_in_fifo > priv->tx_max);
  1099. /* Synchronize TXOK and isr so that after the loop:
  1100. * (1) isr variable is up-to-date at least up to TXOK clear
  1101. * time. This avoids us clearing a TXOK of a second frame
  1102. * but not noticing that the FIFO is now empty and thus
  1103. * marking only a single frame as sent.
  1104. * (2) No TXOK is left. Having one could mean leaving a
  1105. * stray TXOK as we might process the associated frame
  1106. * via TXFEMP handling as we read TXFEMP *after* TXOK
  1107. * clear to satisfy (1).
  1108. */
  1109. while ((isr & XCAN_IXR_TXOK_MASK) &&
  1110. !WARN_ON(++retries == 100)) {
  1111. priv->write_reg(priv, XCAN_ICR_OFFSET,
  1112. XCAN_IXR_TXOK_MASK);
  1113. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  1114. }
  1115. if (isr & XCAN_IXR_TXFEMP_MASK) {
  1116. /* nothing in FIFO anymore */
  1117. frames_sent = frames_in_fifo;
  1118. }
  1119. } else {
  1120. /* single frame in fifo, just clear TXOK */
  1121. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  1122. }
  1123. while (frames_sent--) {
  1124. stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
  1125. priv->tx_max);
  1126. priv->tx_tail++;
  1127. stats->tx_packets++;
  1128. }
  1129. netif_wake_queue(ndev);
  1130. spin_unlock_irqrestore(&priv->tx_lock, flags);
  1131. can_led_event(ndev, CAN_LED_EVENT_TX);
  1132. xcan_update_error_state_after_rxtx(ndev);
  1133. }
  1134. /**
  1135. * xcan_interrupt - CAN Isr
  1136. * @irq: irq number
  1137. * @dev_id: device id poniter
  1138. *
  1139. * This is the xilinx CAN Isr. It checks for the type of interrupt
  1140. * and invokes the corresponding ISR.
  1141. *
  1142. * Return:
  1143. * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
  1144. */
  1145. static irqreturn_t xcan_interrupt(int irq, void *dev_id)
  1146. {
  1147. struct net_device *ndev = (struct net_device *)dev_id;
  1148. struct xcan_priv *priv = netdev_priv(ndev);
  1149. u32 isr, ier;
  1150. u32 isr_errors;
  1151. u32 rx_int_mask = xcan_rx_int_mask(priv);
  1152. /* Get the interrupt status from Xilinx CAN */
  1153. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  1154. if (!isr)
  1155. return IRQ_NONE;
  1156. /* Check for the type of interrupt and Processing it */
  1157. if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
  1158. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
  1159. XCAN_IXR_WKUP_MASK));
  1160. xcan_state_interrupt(ndev, isr);
  1161. }
  1162. /* Check for Tx interrupt and Processing it */
  1163. if (isr & XCAN_IXR_TXOK_MASK)
  1164. xcan_tx_interrupt(ndev, isr);
  1165. /* Check for the type of error interrupt and Processing it */
  1166. isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  1167. XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
  1168. XCAN_IXR_RXMNF_MASK);
  1169. if (isr_errors) {
  1170. priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
  1171. xcan_err_interrupt(ndev, isr);
  1172. }
  1173. /* Check for the type of receive interrupt and Processing it */
  1174. if (isr & rx_int_mask) {
  1175. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  1176. ier &= ~rx_int_mask;
  1177. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  1178. napi_schedule(&priv->napi);
  1179. }
  1180. return IRQ_HANDLED;
  1181. }
  1182. /**
  1183. * xcan_chip_stop - Driver stop routine
  1184. * @ndev: Pointer to net_device structure
  1185. *
  1186. * This is the drivers stop routine. It will disable the
  1187. * interrupts and put the device into configuration mode.
  1188. */
  1189. static void xcan_chip_stop(struct net_device *ndev)
  1190. {
  1191. struct xcan_priv *priv = netdev_priv(ndev);
  1192. /* Disable interrupts and leave the can in configuration mode */
  1193. set_reset_mode(ndev);
  1194. priv->can.state = CAN_STATE_STOPPED;
  1195. }
  1196. /**
  1197. * xcan_open - Driver open routine
  1198. * @ndev: Pointer to net_device structure
  1199. *
  1200. * This is the driver open routine.
  1201. * Return: 0 on success and failure value on error
  1202. */
  1203. static int xcan_open(struct net_device *ndev)
  1204. {
  1205. struct xcan_priv *priv = netdev_priv(ndev);
  1206. int ret;
  1207. ret = pm_runtime_get_sync(priv->dev);
  1208. if (ret < 0) {
  1209. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1210. __func__, ret);
  1211. goto err;
  1212. }
  1213. ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
  1214. ndev->name, ndev);
  1215. if (ret < 0) {
  1216. netdev_err(ndev, "irq allocation for CAN failed\n");
  1217. goto err;
  1218. }
  1219. /* Set chip into reset mode */
  1220. ret = set_reset_mode(ndev);
  1221. if (ret < 0) {
  1222. netdev_err(ndev, "mode resetting failed!\n");
  1223. goto err_irq;
  1224. }
  1225. /* Common open */
  1226. ret = open_candev(ndev);
  1227. if (ret)
  1228. goto err_irq;
  1229. ret = xcan_chip_start(ndev);
  1230. if (ret < 0) {
  1231. netdev_err(ndev, "xcan_chip_start failed!\n");
  1232. goto err_candev;
  1233. }
  1234. can_led_event(ndev, CAN_LED_EVENT_OPEN);
  1235. napi_enable(&priv->napi);
  1236. netif_start_queue(ndev);
  1237. return 0;
  1238. err_candev:
  1239. close_candev(ndev);
  1240. err_irq:
  1241. free_irq(ndev->irq, ndev);
  1242. err:
  1243. pm_runtime_put(priv->dev);
  1244. return ret;
  1245. }
  1246. /**
  1247. * xcan_close - Driver close routine
  1248. * @ndev: Pointer to net_device structure
  1249. *
  1250. * Return: 0 always
  1251. */
  1252. static int xcan_close(struct net_device *ndev)
  1253. {
  1254. struct xcan_priv *priv = netdev_priv(ndev);
  1255. netif_stop_queue(ndev);
  1256. napi_disable(&priv->napi);
  1257. xcan_chip_stop(ndev);
  1258. free_irq(ndev->irq, ndev);
  1259. close_candev(ndev);
  1260. can_led_event(ndev, CAN_LED_EVENT_STOP);
  1261. pm_runtime_put(priv->dev);
  1262. return 0;
  1263. }
  1264. /**
  1265. * xcan_get_berr_counter - error counter routine
  1266. * @ndev: Pointer to net_device structure
  1267. * @bec: Pointer to can_berr_counter structure
  1268. *
  1269. * This is the driver error counter routine.
  1270. * Return: 0 on success and failure value on error
  1271. */
  1272. static int xcan_get_berr_counter(const struct net_device *ndev,
  1273. struct can_berr_counter *bec)
  1274. {
  1275. struct xcan_priv *priv = netdev_priv(ndev);
  1276. int ret;
  1277. ret = pm_runtime_get_sync(priv->dev);
  1278. if (ret < 0) {
  1279. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1280. __func__, ret);
  1281. pm_runtime_put(priv->dev);
  1282. return ret;
  1283. }
  1284. bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  1285. bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  1286. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  1287. pm_runtime_put(priv->dev);
  1288. return 0;
  1289. }
  1290. static const struct net_device_ops xcan_netdev_ops = {
  1291. .ndo_open = xcan_open,
  1292. .ndo_stop = xcan_close,
  1293. .ndo_start_xmit = xcan_start_xmit,
  1294. .ndo_change_mtu = can_change_mtu,
  1295. };
  1296. /**
  1297. * xcan_suspend - Suspend method for the driver
  1298. * @dev: Address of the device structure
  1299. *
  1300. * Put the driver into low power mode.
  1301. * Return: 0 on success and failure value on error
  1302. */
  1303. static int __maybe_unused xcan_suspend(struct device *dev)
  1304. {
  1305. struct net_device *ndev = dev_get_drvdata(dev);
  1306. if (netif_running(ndev)) {
  1307. netif_stop_queue(ndev);
  1308. netif_device_detach(ndev);
  1309. xcan_chip_stop(ndev);
  1310. }
  1311. return pm_runtime_force_suspend(dev);
  1312. }
  1313. /**
  1314. * xcan_resume - Resume from suspend
  1315. * @dev: Address of the device structure
  1316. *
  1317. * Resume operation after suspend.
  1318. * Return: 0 on success and failure value on error
  1319. */
  1320. static int __maybe_unused xcan_resume(struct device *dev)
  1321. {
  1322. struct net_device *ndev = dev_get_drvdata(dev);
  1323. int ret;
  1324. ret = pm_runtime_force_resume(dev);
  1325. if (ret) {
  1326. dev_err(dev, "pm_runtime_force_resume failed on resume\n");
  1327. return ret;
  1328. }
  1329. if (netif_running(ndev)) {
  1330. ret = xcan_chip_start(ndev);
  1331. if (ret) {
  1332. dev_err(dev, "xcan_chip_start failed on resume\n");
  1333. return ret;
  1334. }
  1335. netif_device_attach(ndev);
  1336. netif_start_queue(ndev);
  1337. }
  1338. return 0;
  1339. }
  1340. /**
  1341. * xcan_runtime_suspend - Runtime suspend method for the driver
  1342. * @dev: Address of the device structure
  1343. *
  1344. * Put the driver into low power mode.
  1345. * Return: 0 always
  1346. */
  1347. static int __maybe_unused xcan_runtime_suspend(struct device *dev)
  1348. {
  1349. struct net_device *ndev = dev_get_drvdata(dev);
  1350. struct xcan_priv *priv = netdev_priv(ndev);
  1351. clk_disable_unprepare(priv->bus_clk);
  1352. clk_disable_unprepare(priv->can_clk);
  1353. return 0;
  1354. }
  1355. /**
  1356. * xcan_runtime_resume - Runtime resume from suspend
  1357. * @dev: Address of the device structure
  1358. *
  1359. * Resume operation after suspend.
  1360. * Return: 0 on success and failure value on error
  1361. */
  1362. static int __maybe_unused xcan_runtime_resume(struct device *dev)
  1363. {
  1364. struct net_device *ndev = dev_get_drvdata(dev);
  1365. struct xcan_priv *priv = netdev_priv(ndev);
  1366. int ret;
  1367. ret = clk_prepare_enable(priv->bus_clk);
  1368. if (ret) {
  1369. dev_err(dev, "Cannot enable clock.\n");
  1370. return ret;
  1371. }
  1372. ret = clk_prepare_enable(priv->can_clk);
  1373. if (ret) {
  1374. dev_err(dev, "Cannot enable clock.\n");
  1375. clk_disable_unprepare(priv->bus_clk);
  1376. return ret;
  1377. }
  1378. return 0;
  1379. }
  1380. static const struct dev_pm_ops xcan_dev_pm_ops = {
  1381. SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
  1382. SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
  1383. };
  1384. static const struct xcan_devtype_data xcan_zynq_data = {
  1385. .cantype = XZYNQ_CANPS,
  1386. .flags = XCAN_FLAG_TXFEMP,
  1387. .bittiming_const = &xcan_bittiming_const,
  1388. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
  1389. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
  1390. .bus_clk_name = "pclk",
  1391. };
  1392. static const struct xcan_devtype_data xcan_axi_data = {
  1393. .cantype = XAXI_CAN,
  1394. .bittiming_const = &xcan_bittiming_const,
  1395. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
  1396. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
  1397. .bus_clk_name = "s_axi_aclk",
  1398. };
  1399. static const struct xcan_devtype_data xcan_canfd_data = {
  1400. .cantype = XAXI_CANFD,
  1401. .flags = XCAN_FLAG_EXT_FILTERS |
  1402. XCAN_FLAG_RXMNF |
  1403. XCAN_FLAG_TX_MAILBOXES |
  1404. XCAN_FLAG_RX_FIFO_MULTI,
  1405. .bittiming_const = &xcan_bittiming_const_canfd,
  1406. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
  1407. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
  1408. .bus_clk_name = "s_axi_aclk",
  1409. };
  1410. static const struct xcan_devtype_data xcan_canfd2_data = {
  1411. .cantype = XAXI_CANFD_2_0,
  1412. .flags = XCAN_FLAG_EXT_FILTERS |
  1413. XCAN_FLAG_RXMNF |
  1414. XCAN_FLAG_TX_MAILBOXES |
  1415. XCAN_FLAG_CANFD_2 |
  1416. XCAN_FLAG_RX_FIFO_MULTI,
  1417. .bittiming_const = &xcan_bittiming_const_canfd2,
  1418. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
  1419. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
  1420. .bus_clk_name = "s_axi_aclk",
  1421. };
  1422. /* Match table for OF platform binding */
  1423. static const struct of_device_id xcan_of_match[] = {
  1424. { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
  1425. { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
  1426. { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
  1427. { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
  1428. { /* end of list */ },
  1429. };
  1430. MODULE_DEVICE_TABLE(of, xcan_of_match);
  1431. /**
  1432. * xcan_probe - Platform registration call
  1433. * @pdev: Handle to the platform device structure
  1434. *
  1435. * This function does all the memory allocation and registration for the CAN
  1436. * device.
  1437. *
  1438. * Return: 0 on success and failure value on error
  1439. */
  1440. static int xcan_probe(struct platform_device *pdev)
  1441. {
  1442. struct resource *res; /* IO mem resources */
  1443. struct net_device *ndev;
  1444. struct xcan_priv *priv;
  1445. const struct of_device_id *of_id;
  1446. const struct xcan_devtype_data *devtype = &xcan_axi_data;
  1447. void __iomem *addr;
  1448. int ret;
  1449. int rx_max, tx_max;
  1450. int hw_tx_max, hw_rx_max;
  1451. const char *hw_tx_max_property;
  1452. /* Get the virtual base address for the device */
  1453. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1454. addr = devm_ioremap_resource(&pdev->dev, res);
  1455. if (IS_ERR(addr)) {
  1456. ret = PTR_ERR(addr);
  1457. goto err;
  1458. }
  1459. of_id = of_match_device(xcan_of_match, &pdev->dev);
  1460. if (of_id && of_id->data)
  1461. devtype = of_id->data;
  1462. hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
  1463. "tx-mailbox-count" : "tx-fifo-depth";
  1464. ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
  1465. &hw_tx_max);
  1466. if (ret < 0) {
  1467. dev_err(&pdev->dev, "missing %s property\n",
  1468. hw_tx_max_property);
  1469. goto err;
  1470. }
  1471. ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
  1472. &hw_rx_max);
  1473. if (ret < 0) {
  1474. dev_err(&pdev->dev,
  1475. "missing rx-fifo-depth property (mailbox mode is not supported)\n");
  1476. goto err;
  1477. }
  1478. /* With TX FIFO:
  1479. *
  1480. * There is no way to directly figure out how many frames have been
  1481. * sent when the TXOK interrupt is processed. If TXFEMP
  1482. * is supported, we can have 2 frames in the FIFO and use TXFEMP
  1483. * to determine if 1 or 2 frames have been sent.
  1484. * Theoretically we should be able to use TXFWMEMP to determine up
  1485. * to 3 frames, but it seems that after putting a second frame in the
  1486. * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
  1487. * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
  1488. * sent), which is not a sensible state - possibly TXFWMEMP is not
  1489. * completely synchronized with the rest of the bits?
  1490. *
  1491. * With TX mailboxes:
  1492. *
  1493. * HW sends frames in CAN ID priority order. To preserve FIFO ordering
  1494. * we submit frames one at a time.
  1495. */
  1496. if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
  1497. (devtype->flags & XCAN_FLAG_TXFEMP))
  1498. tx_max = min(hw_tx_max, 2);
  1499. else
  1500. tx_max = 1;
  1501. rx_max = hw_rx_max;
  1502. /* Create a CAN device instance */
  1503. ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
  1504. if (!ndev)
  1505. return -ENOMEM;
  1506. priv = netdev_priv(ndev);
  1507. priv->dev = &pdev->dev;
  1508. priv->can.bittiming_const = devtype->bittiming_const;
  1509. priv->can.do_set_mode = xcan_do_set_mode;
  1510. priv->can.do_get_berr_counter = xcan_get_berr_counter;
  1511. priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
  1512. CAN_CTRLMODE_BERR_REPORTING;
  1513. if (devtype->cantype == XAXI_CANFD)
  1514. priv->can.data_bittiming_const =
  1515. &xcan_data_bittiming_const_canfd;
  1516. if (devtype->cantype == XAXI_CANFD_2_0)
  1517. priv->can.data_bittiming_const =
  1518. &xcan_data_bittiming_const_canfd2;
  1519. if (devtype->cantype == XAXI_CANFD ||
  1520. devtype->cantype == XAXI_CANFD_2_0)
  1521. priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
  1522. priv->reg_base = addr;
  1523. priv->tx_max = tx_max;
  1524. priv->devtype = *devtype;
  1525. spin_lock_init(&priv->tx_lock);
  1526. /* Get IRQ for the device */
  1527. ndev->irq = platform_get_irq(pdev, 0);
  1528. ndev->flags |= IFF_ECHO; /* We support local echo */
  1529. platform_set_drvdata(pdev, ndev);
  1530. SET_NETDEV_DEV(ndev, &pdev->dev);
  1531. ndev->netdev_ops = &xcan_netdev_ops;
  1532. /* Getting the CAN can_clk info */
  1533. priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
  1534. if (IS_ERR(priv->can_clk)) {
  1535. if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
  1536. dev_err(&pdev->dev, "Device clock not found.\n");
  1537. ret = PTR_ERR(priv->can_clk);
  1538. goto err_free;
  1539. }
  1540. priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
  1541. if (IS_ERR(priv->bus_clk)) {
  1542. dev_err(&pdev->dev, "bus clock not found\n");
  1543. ret = PTR_ERR(priv->bus_clk);
  1544. goto err_free;
  1545. }
  1546. priv->write_reg = xcan_write_reg_le;
  1547. priv->read_reg = xcan_read_reg_le;
  1548. pm_runtime_enable(&pdev->dev);
  1549. ret = pm_runtime_get_sync(&pdev->dev);
  1550. if (ret < 0) {
  1551. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1552. __func__, ret);
  1553. goto err_disableclks;
  1554. }
  1555. if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
  1556. priv->write_reg = xcan_write_reg_be;
  1557. priv->read_reg = xcan_read_reg_be;
  1558. }
  1559. priv->can.clock.freq = clk_get_rate(priv->can_clk);
  1560. netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
  1561. ret = register_candev(ndev);
  1562. if (ret) {
  1563. dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
  1564. goto err_disableclks;
  1565. }
  1566. devm_can_led_init(ndev);
  1567. pm_runtime_put(&pdev->dev);
  1568. if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
  1569. priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
  1570. priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
  1571. }
  1572. netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
  1573. priv->reg_base, ndev->irq, priv->can.clock.freq,
  1574. hw_tx_max, priv->tx_max);
  1575. return 0;
  1576. err_disableclks:
  1577. pm_runtime_put(priv->dev);
  1578. pm_runtime_disable(&pdev->dev);
  1579. err_free:
  1580. free_candev(ndev);
  1581. err:
  1582. return ret;
  1583. }
  1584. /**
  1585. * xcan_remove - Unregister the device after releasing the resources
  1586. * @pdev: Handle to the platform device structure
  1587. *
  1588. * This function frees all the resources allocated to the device.
  1589. * Return: 0 always
  1590. */
  1591. static int xcan_remove(struct platform_device *pdev)
  1592. {
  1593. struct net_device *ndev = platform_get_drvdata(pdev);
  1594. struct xcan_priv *priv = netdev_priv(ndev);
  1595. unregister_candev(ndev);
  1596. pm_runtime_disable(&pdev->dev);
  1597. netif_napi_del(&priv->napi);
  1598. free_candev(ndev);
  1599. return 0;
  1600. }
  1601. static struct platform_driver xcan_driver = {
  1602. .probe = xcan_probe,
  1603. .remove = xcan_remove,
  1604. .driver = {
  1605. .name = DRIVER_NAME,
  1606. .pm = &xcan_dev_pm_ops,
  1607. .of_match_table = xcan_of_match,
  1608. },
  1609. };
  1610. module_platform_driver(xcan_driver);
  1611. MODULE_LICENSE("GPL");
  1612. MODULE_AUTHOR("Xilinx Inc");
  1613. MODULE_DESCRIPTION("Xilinx CAN interface");