c_can.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. /*
  2. * CAN bus driver for Bosch C_CAN controller
  3. *
  4. * Copyright (C) 2010 ST Microelectronics
  5. * Bhupesh Sharma <bhupesh.sharma@st.com>
  6. *
  7. * Borrowed heavily from the C_CAN driver originally written by:
  8. * Copyright (C) 2007
  9. * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
  10. * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
  11. *
  12. * TX and RX NAPI implementation has been borrowed from at91 CAN driver
  13. * written by:
  14. * Copyright
  15. * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
  16. * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
  17. *
  18. * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
  19. * Bosch C_CAN user manual can be obtained from:
  20. * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
  21. * users_manual_c_can.pdf
  22. *
  23. * This file is licensed under the terms of the GNU General Public
  24. * License version 2. This program is licensed "as is" without any
  25. * warranty of any kind, whether express or implied.
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/module.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/delay.h>
  31. #include <linux/netdevice.h>
  32. #include <linux/if_arp.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/list.h>
  35. #include <linux/io.h>
  36. #include <linux/pm_runtime.h>
  37. #include <linux/pinctrl/consumer.h>
  38. #include <linux/can.h>
  39. #include <linux/can/dev.h>
  40. #include <linux/can/error.h>
  41. #include <linux/can/led.h>
  42. #include "c_can.h"
  43. /* Number of interface registers */
  44. #define IF_ENUM_REG_LEN 11
  45. #define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
  46. /* control extension register D_CAN specific */
  47. #define CONTROL_EX_PDR BIT(8)
  48. /* control register */
  49. #define CONTROL_TEST BIT(7)
  50. #define CONTROL_CCE BIT(6)
  51. #define CONTROL_DISABLE_AR BIT(5)
  52. #define CONTROL_ENABLE_AR (0 << 5)
  53. #define CONTROL_EIE BIT(3)
  54. #define CONTROL_SIE BIT(2)
  55. #define CONTROL_IE BIT(1)
  56. #define CONTROL_INIT BIT(0)
  57. #define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
  58. /* test register */
  59. #define TEST_RX BIT(7)
  60. #define TEST_TX1 BIT(6)
  61. #define TEST_TX2 BIT(5)
  62. #define TEST_LBACK BIT(4)
  63. #define TEST_SILENT BIT(3)
  64. #define TEST_BASIC BIT(2)
  65. /* status register */
  66. #define STATUS_PDA BIT(10)
  67. #define STATUS_BOFF BIT(7)
  68. #define STATUS_EWARN BIT(6)
  69. #define STATUS_EPASS BIT(5)
  70. #define STATUS_RXOK BIT(4)
  71. #define STATUS_TXOK BIT(3)
  72. /* error counter register */
  73. #define ERR_CNT_TEC_MASK 0xff
  74. #define ERR_CNT_TEC_SHIFT 0
  75. #define ERR_CNT_REC_SHIFT 8
  76. #define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
  77. #define ERR_CNT_RP_SHIFT 15
  78. #define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
  79. /* bit-timing register */
  80. #define BTR_BRP_MASK 0x3f
  81. #define BTR_BRP_SHIFT 0
  82. #define BTR_SJW_SHIFT 6
  83. #define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
  84. #define BTR_TSEG1_SHIFT 8
  85. #define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
  86. #define BTR_TSEG2_SHIFT 12
  87. #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
  88. /* brp extension register */
  89. #define BRP_EXT_BRPE_MASK 0x0f
  90. #define BRP_EXT_BRPE_SHIFT 0
  91. /* IFx command request */
  92. #define IF_COMR_BUSY BIT(15)
  93. /* IFx command mask */
  94. #define IF_COMM_WR BIT(7)
  95. #define IF_COMM_MASK BIT(6)
  96. #define IF_COMM_ARB BIT(5)
  97. #define IF_COMM_CONTROL BIT(4)
  98. #define IF_COMM_CLR_INT_PND BIT(3)
  99. #define IF_COMM_TXRQST BIT(2)
  100. #define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
  101. #define IF_COMM_DATAA BIT(1)
  102. #define IF_COMM_DATAB BIT(0)
  103. /* TX buffer setup */
  104. #define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \
  105. IF_COMM_TXRQST | \
  106. IF_COMM_DATAA | IF_COMM_DATAB)
  107. /* For the low buffers we clear the interrupt bit, but keep newdat */
  108. #define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
  109. IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
  110. IF_COMM_DATAA | IF_COMM_DATAB)
  111. /* For the high buffers we clear the interrupt bit and newdat */
  112. #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
  113. /* Receive setup of message objects */
  114. #define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
  115. /* Invalidation of message objects */
  116. #define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
  117. /* IFx arbitration */
  118. #define IF_ARB_MSGVAL BIT(31)
  119. #define IF_ARB_MSGXTD BIT(30)
  120. #define IF_ARB_TRANSMIT BIT(29)
  121. /* IFx message control */
  122. #define IF_MCONT_NEWDAT BIT(15)
  123. #define IF_MCONT_MSGLST BIT(14)
  124. #define IF_MCONT_INTPND BIT(13)
  125. #define IF_MCONT_UMASK BIT(12)
  126. #define IF_MCONT_TXIE BIT(11)
  127. #define IF_MCONT_RXIE BIT(10)
  128. #define IF_MCONT_RMTEN BIT(9)
  129. #define IF_MCONT_TXRQST BIT(8)
  130. #define IF_MCONT_EOB BIT(7)
  131. #define IF_MCONT_DLC_MASK 0xf
  132. #define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
  133. #define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
  134. #define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
  135. /*
  136. * Use IF1 for RX and IF2 for TX
  137. */
  138. #define IF_RX 0
  139. #define IF_TX 1
  140. /* minimum timeout for checking BUSY status */
  141. #define MIN_TIMEOUT_VALUE 6
  142. /* Wait for ~1 sec for INIT bit */
  143. #define INIT_WAIT_MS 1000
  144. /* napi related */
  145. #define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
  146. /* c_can lec values */
  147. enum c_can_lec_type {
  148. LEC_NO_ERROR = 0,
  149. LEC_STUFF_ERROR,
  150. LEC_FORM_ERROR,
  151. LEC_ACK_ERROR,
  152. LEC_BIT1_ERROR,
  153. LEC_BIT0_ERROR,
  154. LEC_CRC_ERROR,
  155. LEC_UNUSED,
  156. LEC_MASK = LEC_UNUSED,
  157. };
  158. /*
  159. * c_can error types:
  160. * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
  161. */
  162. enum c_can_bus_error_types {
  163. C_CAN_NO_ERROR = 0,
  164. C_CAN_BUS_OFF,
  165. C_CAN_ERROR_WARNING,
  166. C_CAN_ERROR_PASSIVE,
  167. };
  168. static const struct can_bittiming_const c_can_bittiming_const = {
  169. .name = KBUILD_MODNAME,
  170. .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
  171. .tseg1_max = 16,
  172. .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
  173. .tseg2_max = 8,
  174. .sjw_max = 4,
  175. .brp_min = 1,
  176. .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
  177. .brp_inc = 1,
  178. };
  179. static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
  180. {
  181. if (priv->device)
  182. pm_runtime_enable(priv->device);
  183. }
  184. static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
  185. {
  186. if (priv->device)
  187. pm_runtime_disable(priv->device);
  188. }
  189. static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
  190. {
  191. if (priv->device)
  192. pm_runtime_get_sync(priv->device);
  193. }
  194. static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
  195. {
  196. if (priv->device)
  197. pm_runtime_put_sync(priv->device);
  198. }
  199. static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
  200. {
  201. if (priv->raminit)
  202. priv->raminit(priv, enable);
  203. }
  204. static void c_can_irq_control(struct c_can_priv *priv, bool enable)
  205. {
  206. u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
  207. if (enable)
  208. ctrl |= CONTROL_IRQMSK;
  209. priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
  210. }
  211. static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
  212. {
  213. struct c_can_priv *priv = netdev_priv(dev);
  214. int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
  215. priv->write_reg32(priv, reg, (cmd << 16) | obj);
  216. for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
  217. if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
  218. return;
  219. udelay(1);
  220. }
  221. netdev_err(dev, "Updating object timed out\n");
  222. }
  223. static inline void c_can_object_get(struct net_device *dev, int iface,
  224. u32 obj, u32 cmd)
  225. {
  226. c_can_obj_update(dev, iface, cmd, obj);
  227. }
  228. static inline void c_can_object_put(struct net_device *dev, int iface,
  229. u32 obj, u32 cmd)
  230. {
  231. c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
  232. }
  233. /*
  234. * Note: According to documentation clearing TXIE while MSGVAL is set
  235. * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
  236. * load significantly.
  237. */
  238. static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
  239. {
  240. struct c_can_priv *priv = netdev_priv(dev);
  241. priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
  242. c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
  243. }
  244. static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
  245. {
  246. struct c_can_priv *priv = netdev_priv(dev);
  247. priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
  248. priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
  249. c_can_inval_tx_object(dev, iface, obj);
  250. }
  251. static void c_can_setup_tx_object(struct net_device *dev, int iface,
  252. struct can_frame *frame, int idx)
  253. {
  254. struct c_can_priv *priv = netdev_priv(dev);
  255. u16 ctrl = IF_MCONT_TX | frame->can_dlc;
  256. bool rtr = frame->can_id & CAN_RTR_FLAG;
  257. u32 arb = IF_ARB_MSGVAL;
  258. int i;
  259. if (frame->can_id & CAN_EFF_FLAG) {
  260. arb |= frame->can_id & CAN_EFF_MASK;
  261. arb |= IF_ARB_MSGXTD;
  262. } else {
  263. arb |= (frame->can_id & CAN_SFF_MASK) << 18;
  264. }
  265. if (!rtr)
  266. arb |= IF_ARB_TRANSMIT;
  267. /*
  268. * If we change the DIR bit, we need to invalidate the buffer
  269. * first, i.e. clear the MSGVAL flag in the arbiter.
  270. */
  271. if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
  272. u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
  273. c_can_inval_msg_object(dev, iface, obj);
  274. change_bit(idx, &priv->tx_dir);
  275. }
  276. priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
  277. priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
  278. for (i = 0; i < frame->can_dlc; i += 2) {
  279. priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
  280. frame->data[i] | (frame->data[i + 1] << 8));
  281. }
  282. }
  283. static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
  284. int iface)
  285. {
  286. int i;
  287. for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
  288. c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
  289. }
  290. static int c_can_handle_lost_msg_obj(struct net_device *dev,
  291. int iface, int objno, u32 ctrl)
  292. {
  293. struct net_device_stats *stats = &dev->stats;
  294. struct c_can_priv *priv = netdev_priv(dev);
  295. struct can_frame *frame;
  296. struct sk_buff *skb;
  297. ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
  298. priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
  299. c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
  300. stats->rx_errors++;
  301. stats->rx_over_errors++;
  302. /* create an error msg */
  303. skb = alloc_can_err_skb(dev, &frame);
  304. if (unlikely(!skb))
  305. return 0;
  306. frame->can_id |= CAN_ERR_CRTL;
  307. frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
  308. netif_receive_skb(skb);
  309. return 1;
  310. }
  311. static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
  312. {
  313. struct net_device_stats *stats = &dev->stats;
  314. struct c_can_priv *priv = netdev_priv(dev);
  315. struct can_frame *frame;
  316. struct sk_buff *skb;
  317. u32 arb, data;
  318. skb = alloc_can_skb(dev, &frame);
  319. if (!skb) {
  320. stats->rx_dropped++;
  321. return -ENOMEM;
  322. }
  323. frame->can_dlc = get_can_dlc(ctrl & 0x0F);
  324. arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
  325. if (arb & IF_ARB_MSGXTD)
  326. frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
  327. else
  328. frame->can_id = (arb >> 18) & CAN_SFF_MASK;
  329. if (arb & IF_ARB_TRANSMIT) {
  330. frame->can_id |= CAN_RTR_FLAG;
  331. } else {
  332. int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
  333. for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
  334. data = priv->read_reg(priv, dreg);
  335. frame->data[i] = data;
  336. frame->data[i + 1] = data >> 8;
  337. }
  338. }
  339. stats->rx_packets++;
  340. stats->rx_bytes += frame->can_dlc;
  341. netif_receive_skb(skb);
  342. return 0;
  343. }
  344. static void c_can_setup_receive_object(struct net_device *dev, int iface,
  345. u32 obj, u32 mask, u32 id, u32 mcont)
  346. {
  347. struct c_can_priv *priv = netdev_priv(dev);
  348. mask |= BIT(29);
  349. priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
  350. id |= IF_ARB_MSGVAL;
  351. priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
  352. priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
  353. c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
  354. }
  355. static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
  356. struct net_device *dev)
  357. {
  358. struct can_frame *frame = (struct can_frame *)skb->data;
  359. struct c_can_priv *priv = netdev_priv(dev);
  360. u32 idx, obj;
  361. if (can_dropped_invalid_skb(dev, skb))
  362. return NETDEV_TX_OK;
  363. /*
  364. * This is not a FIFO. C/D_CAN sends out the buffers
  365. * prioritized. The lowest buffer number wins.
  366. */
  367. idx = fls(atomic_read(&priv->tx_active));
  368. obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
  369. /* If this is the last buffer, stop the xmit queue */
  370. if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
  371. netif_stop_queue(dev);
  372. /*
  373. * Store the message in the interface so we can call
  374. * can_put_echo_skb(). We must do this before we enable
  375. * transmit as we might race against do_tx().
  376. */
  377. c_can_setup_tx_object(dev, IF_TX, frame, idx);
  378. priv->dlc[idx] = frame->can_dlc;
  379. can_put_echo_skb(skb, dev, idx);
  380. /* Update the active bits */
  381. atomic_add((1 << idx), &priv->tx_active);
  382. /* Start transmission */
  383. c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
  384. return NETDEV_TX_OK;
  385. }
  386. static int c_can_wait_for_ctrl_init(struct net_device *dev,
  387. struct c_can_priv *priv, u32 init)
  388. {
  389. int retry = 0;
  390. while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
  391. udelay(10);
  392. if (retry++ > 1000) {
  393. netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
  394. return -EIO;
  395. }
  396. }
  397. return 0;
  398. }
  399. static int c_can_set_bittiming(struct net_device *dev)
  400. {
  401. unsigned int reg_btr, reg_brpe, ctrl_save;
  402. u8 brp, brpe, sjw, tseg1, tseg2;
  403. u32 ten_bit_brp;
  404. struct c_can_priv *priv = netdev_priv(dev);
  405. const struct can_bittiming *bt = &priv->can.bittiming;
  406. int res;
  407. /* c_can provides a 6-bit brp and 4-bit brpe fields */
  408. ten_bit_brp = bt->brp - 1;
  409. brp = ten_bit_brp & BTR_BRP_MASK;
  410. brpe = ten_bit_brp >> 6;
  411. sjw = bt->sjw - 1;
  412. tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
  413. tseg2 = bt->phase_seg2 - 1;
  414. reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
  415. (tseg2 << BTR_TSEG2_SHIFT);
  416. reg_brpe = brpe & BRP_EXT_BRPE_MASK;
  417. netdev_info(dev,
  418. "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
  419. ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
  420. ctrl_save &= ~CONTROL_INIT;
  421. priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
  422. res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
  423. if (res)
  424. return res;
  425. priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
  426. priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
  427. priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
  428. return c_can_wait_for_ctrl_init(dev, priv, 0);
  429. }
  430. /*
  431. * Configure C_CAN message objects for Tx and Rx purposes:
  432. * C_CAN provides a total of 32 message objects that can be configured
  433. * either for Tx or Rx purposes. Here the first 16 message objects are used as
  434. * a reception FIFO. The end of reception FIFO is signified by the EoB bit
  435. * being SET. The remaining 16 message objects are kept aside for Tx purposes.
  436. * See user guide document for further details on configuring message
  437. * objects.
  438. */
  439. static void c_can_configure_msg_objects(struct net_device *dev)
  440. {
  441. int i;
  442. /* first invalidate all message objects */
  443. for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
  444. c_can_inval_msg_object(dev, IF_RX, i);
  445. /* setup receive message objects */
  446. for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
  447. c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
  448. c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
  449. IF_MCONT_RCV_EOB);
  450. }
  451. /*
  452. * Configure C_CAN chip:
  453. * - enable/disable auto-retransmission
  454. * - set operating mode
  455. * - configure message objects
  456. */
  457. static int c_can_chip_config(struct net_device *dev)
  458. {
  459. struct c_can_priv *priv = netdev_priv(dev);
  460. /* enable automatic retransmission */
  461. priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
  462. if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
  463. (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
  464. /* loopback + silent mode : useful for hot self-test */
  465. priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
  466. priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
  467. } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
  468. /* loopback mode : useful for self-test function */
  469. priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
  470. priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
  471. } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
  472. /* silent mode : bus-monitoring mode */
  473. priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
  474. priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
  475. }
  476. /* configure message objects */
  477. c_can_configure_msg_objects(dev);
  478. /* set a `lec` value so that we can check for updates later */
  479. priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
  480. /* Clear all internal status */
  481. atomic_set(&priv->tx_active, 0);
  482. priv->rxmasked = 0;
  483. priv->tx_dir = 0;
  484. /* set bittiming params */
  485. return c_can_set_bittiming(dev);
  486. }
  487. static int c_can_start(struct net_device *dev)
  488. {
  489. struct c_can_priv *priv = netdev_priv(dev);
  490. int err;
  491. /* basic c_can configuration */
  492. err = c_can_chip_config(dev);
  493. if (err)
  494. return err;
  495. /* Setup the command for new messages */
  496. priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
  497. IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
  498. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  499. /* activate pins */
  500. pinctrl_pm_select_default_state(dev->dev.parent);
  501. return 0;
  502. }
  503. static void c_can_stop(struct net_device *dev)
  504. {
  505. struct c_can_priv *priv = netdev_priv(dev);
  506. c_can_irq_control(priv, false);
  507. /* put ctrl to init on stop to end ongoing transmission */
  508. priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
  509. /* deactivate pins */
  510. pinctrl_pm_select_sleep_state(dev->dev.parent);
  511. priv->can.state = CAN_STATE_STOPPED;
  512. }
  513. static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
  514. {
  515. struct c_can_priv *priv = netdev_priv(dev);
  516. int err;
  517. switch (mode) {
  518. case CAN_MODE_START:
  519. err = c_can_start(dev);
  520. if (err)
  521. return err;
  522. netif_wake_queue(dev);
  523. c_can_irq_control(priv, true);
  524. break;
  525. default:
  526. return -EOPNOTSUPP;
  527. }
  528. return 0;
  529. }
  530. static int __c_can_get_berr_counter(const struct net_device *dev,
  531. struct can_berr_counter *bec)
  532. {
  533. unsigned int reg_err_counter;
  534. struct c_can_priv *priv = netdev_priv(dev);
  535. reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
  536. bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
  537. ERR_CNT_REC_SHIFT;
  538. bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
  539. return 0;
  540. }
  541. static int c_can_get_berr_counter(const struct net_device *dev,
  542. struct can_berr_counter *bec)
  543. {
  544. struct c_can_priv *priv = netdev_priv(dev);
  545. int err;
  546. c_can_pm_runtime_get_sync(priv);
  547. err = __c_can_get_berr_counter(dev, bec);
  548. c_can_pm_runtime_put_sync(priv);
  549. return err;
  550. }
  551. static void c_can_do_tx(struct net_device *dev)
  552. {
  553. struct c_can_priv *priv = netdev_priv(dev);
  554. struct net_device_stats *stats = &dev->stats;
  555. u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
  556. clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
  557. while ((idx = ffs(pend))) {
  558. idx--;
  559. pend &= ~(1 << idx);
  560. obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
  561. c_can_inval_tx_object(dev, IF_RX, obj);
  562. can_get_echo_skb(dev, idx);
  563. bytes += priv->dlc[idx];
  564. pkts++;
  565. }
  566. /* Clear the bits in the tx_active mask */
  567. atomic_sub(clr, &priv->tx_active);
  568. if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
  569. netif_wake_queue(dev);
  570. if (pkts) {
  571. stats->tx_bytes += bytes;
  572. stats->tx_packets += pkts;
  573. can_led_event(dev, CAN_LED_EVENT_TX);
  574. }
  575. }
  576. /*
  577. * If we have a gap in the pending bits, that means we either
  578. * raced with the hardware or failed to readout all upper
  579. * objects in the last run due to quota limit.
  580. */
  581. static u32 c_can_adjust_pending(u32 pend)
  582. {
  583. u32 weight, lasts;
  584. if (pend == RECEIVE_OBJECT_BITS)
  585. return pend;
  586. /*
  587. * If the last set bit is larger than the number of pending
  588. * bits we have a gap.
  589. */
  590. weight = hweight32(pend);
  591. lasts = fls(pend);
  592. /* If the bits are linear, nothing to do */
  593. if (lasts == weight)
  594. return pend;
  595. /*
  596. * Find the first set bit after the gap. We walk backwards
  597. * from the last set bit.
  598. */
  599. for (lasts--; pend & (1 << (lasts - 1)); lasts--);
  600. return pend & ~((1 << lasts) - 1);
  601. }
  602. static inline void c_can_rx_object_get(struct net_device *dev,
  603. struct c_can_priv *priv, u32 obj)
  604. {
  605. c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
  606. }
  607. static inline void c_can_rx_finalize(struct net_device *dev,
  608. struct c_can_priv *priv, u32 obj)
  609. {
  610. if (priv->type != BOSCH_D_CAN)
  611. c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
  612. }
  613. static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
  614. u32 pend, int quota)
  615. {
  616. u32 pkts = 0, ctrl, obj;
  617. while ((obj = ffs(pend)) && quota > 0) {
  618. pend &= ~BIT(obj - 1);
  619. c_can_rx_object_get(dev, priv, obj);
  620. ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
  621. if (ctrl & IF_MCONT_MSGLST) {
  622. int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
  623. pkts += n;
  624. quota -= n;
  625. continue;
  626. }
  627. /*
  628. * This really should not happen, but this covers some
  629. * odd HW behaviour. Do not remove that unless you
  630. * want to brick your machine.
  631. */
  632. if (!(ctrl & IF_MCONT_NEWDAT))
  633. continue;
  634. /* read the data from the message object */
  635. c_can_read_msg_object(dev, IF_RX, ctrl);
  636. c_can_rx_finalize(dev, priv, obj);
  637. pkts++;
  638. quota--;
  639. }
  640. return pkts;
  641. }
  642. static inline u32 c_can_get_pending(struct c_can_priv *priv)
  643. {
  644. u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
  645. return pend;
  646. }
  647. /*
  648. * theory of operation:
  649. *
  650. * c_can core saves a received CAN message into the first free message
  651. * object it finds free (starting with the lowest). Bits NEWDAT and
  652. * INTPND are set for this message object indicating that a new message
  653. * has arrived. To work-around this issue, we keep two groups of message
  654. * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  655. *
  656. * We clear the newdat bit right away.
  657. *
  658. * This can result in packet reordering when the readout is slow.
  659. */
  660. static int c_can_do_rx_poll(struct net_device *dev, int quota)
  661. {
  662. struct c_can_priv *priv = netdev_priv(dev);
  663. u32 pkts = 0, pend = 0, toread, n;
  664. /*
  665. * It is faster to read only one 16bit register. This is only possible
  666. * for a maximum number of 16 objects.
  667. */
  668. BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
  669. "Implementation does not support more message objects than 16");
  670. while (quota > 0) {
  671. if (!pend) {
  672. pend = c_can_get_pending(priv);
  673. if (!pend)
  674. break;
  675. /*
  676. * If the pending field has a gap, handle the
  677. * bits above the gap first.
  678. */
  679. toread = c_can_adjust_pending(pend);
  680. } else {
  681. toread = pend;
  682. }
  683. /* Remove the bits from pend */
  684. pend &= ~toread;
  685. /* Read the objects */
  686. n = c_can_read_objects(dev, priv, toread, quota);
  687. pkts += n;
  688. quota -= n;
  689. }
  690. if (pkts)
  691. can_led_event(dev, CAN_LED_EVENT_RX);
  692. return pkts;
  693. }
  694. static int c_can_handle_state_change(struct net_device *dev,
  695. enum c_can_bus_error_types error_type)
  696. {
  697. unsigned int reg_err_counter;
  698. unsigned int rx_err_passive;
  699. struct c_can_priv *priv = netdev_priv(dev);
  700. struct net_device_stats *stats = &dev->stats;
  701. struct can_frame *cf;
  702. struct sk_buff *skb;
  703. struct can_berr_counter bec;
  704. switch (error_type) {
  705. case C_CAN_ERROR_WARNING:
  706. /* error warning state */
  707. priv->can.can_stats.error_warning++;
  708. priv->can.state = CAN_STATE_ERROR_WARNING;
  709. break;
  710. case C_CAN_ERROR_PASSIVE:
  711. /* error passive state */
  712. priv->can.can_stats.error_passive++;
  713. priv->can.state = CAN_STATE_ERROR_PASSIVE;
  714. break;
  715. case C_CAN_BUS_OFF:
  716. /* bus-off state */
  717. priv->can.state = CAN_STATE_BUS_OFF;
  718. priv->can.can_stats.bus_off++;
  719. break;
  720. default:
  721. break;
  722. }
  723. /* propagate the error condition to the CAN stack */
  724. skb = alloc_can_err_skb(dev, &cf);
  725. if (unlikely(!skb))
  726. return 0;
  727. __c_can_get_berr_counter(dev, &bec);
  728. reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
  729. rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
  730. ERR_CNT_RP_SHIFT;
  731. switch (error_type) {
  732. case C_CAN_ERROR_WARNING:
  733. /* error warning state */
  734. cf->can_id |= CAN_ERR_CRTL;
  735. cf->data[1] = (bec.txerr > bec.rxerr) ?
  736. CAN_ERR_CRTL_TX_WARNING :
  737. CAN_ERR_CRTL_RX_WARNING;
  738. cf->data[6] = bec.txerr;
  739. cf->data[7] = bec.rxerr;
  740. break;
  741. case C_CAN_ERROR_PASSIVE:
  742. /* error passive state */
  743. cf->can_id |= CAN_ERR_CRTL;
  744. if (rx_err_passive)
  745. cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
  746. if (bec.txerr > 127)
  747. cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
  748. cf->data[6] = bec.txerr;
  749. cf->data[7] = bec.rxerr;
  750. break;
  751. case C_CAN_BUS_OFF:
  752. /* bus-off state */
  753. cf->can_id |= CAN_ERR_BUSOFF;
  754. can_bus_off(dev);
  755. break;
  756. default:
  757. break;
  758. }
  759. stats->rx_packets++;
  760. stats->rx_bytes += cf->can_dlc;
  761. netif_receive_skb(skb);
  762. return 1;
  763. }
  764. static int c_can_handle_bus_err(struct net_device *dev,
  765. enum c_can_lec_type lec_type)
  766. {
  767. struct c_can_priv *priv = netdev_priv(dev);
  768. struct net_device_stats *stats = &dev->stats;
  769. struct can_frame *cf;
  770. struct sk_buff *skb;
  771. /*
  772. * early exit if no lec update or no error.
  773. * no lec update means that no CAN bus event has been detected
  774. * since CPU wrote 0x7 value to status reg.
  775. */
  776. if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
  777. return 0;
  778. if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
  779. return 0;
  780. /* common for all type of bus errors */
  781. priv->can.can_stats.bus_error++;
  782. stats->rx_errors++;
  783. /* propagate the error condition to the CAN stack */
  784. skb = alloc_can_err_skb(dev, &cf);
  785. if (unlikely(!skb))
  786. return 0;
  787. /*
  788. * check for 'last error code' which tells us the
  789. * type of the last error to occur on the CAN bus
  790. */
  791. cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  792. cf->data[2] |= CAN_ERR_PROT_UNSPEC;
  793. switch (lec_type) {
  794. case LEC_STUFF_ERROR:
  795. netdev_dbg(dev, "stuff error\n");
  796. cf->data[2] |= CAN_ERR_PROT_STUFF;
  797. break;
  798. case LEC_FORM_ERROR:
  799. netdev_dbg(dev, "form error\n");
  800. cf->data[2] |= CAN_ERR_PROT_FORM;
  801. break;
  802. case LEC_ACK_ERROR:
  803. netdev_dbg(dev, "ack error\n");
  804. cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
  805. CAN_ERR_PROT_LOC_ACK_DEL);
  806. break;
  807. case LEC_BIT1_ERROR:
  808. netdev_dbg(dev, "bit1 error\n");
  809. cf->data[2] |= CAN_ERR_PROT_BIT1;
  810. break;
  811. case LEC_BIT0_ERROR:
  812. netdev_dbg(dev, "bit0 error\n");
  813. cf->data[2] |= CAN_ERR_PROT_BIT0;
  814. break;
  815. case LEC_CRC_ERROR:
  816. netdev_dbg(dev, "CRC error\n");
  817. cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
  818. CAN_ERR_PROT_LOC_CRC_DEL);
  819. break;
  820. default:
  821. break;
  822. }
  823. stats->rx_packets++;
  824. stats->rx_bytes += cf->can_dlc;
  825. netif_receive_skb(skb);
  826. return 1;
  827. }
  828. static int c_can_poll(struct napi_struct *napi, int quota)
  829. {
  830. struct net_device *dev = napi->dev;
  831. struct c_can_priv *priv = netdev_priv(dev);
  832. u16 curr, last = priv->last_status;
  833. int work_done = 0;
  834. priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
  835. /* Ack status on C_CAN. D_CAN is self clearing */
  836. if (priv->type != BOSCH_D_CAN)
  837. priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
  838. /* handle state changes */
  839. if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
  840. netdev_dbg(dev, "entered error warning state\n");
  841. work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
  842. }
  843. if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
  844. netdev_dbg(dev, "entered error passive state\n");
  845. work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
  846. }
  847. if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
  848. netdev_dbg(dev, "entered bus off state\n");
  849. work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
  850. goto end;
  851. }
  852. /* handle bus recovery events */
  853. if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
  854. netdev_dbg(dev, "left bus off state\n");
  855. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  856. }
  857. if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
  858. netdev_dbg(dev, "left error passive state\n");
  859. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  860. }
  861. /* handle lec errors on the bus */
  862. work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
  863. /* Handle Tx/Rx events. We do this unconditionally */
  864. work_done += c_can_do_rx_poll(dev, (quota - work_done));
  865. c_can_do_tx(dev);
  866. end:
  867. if (work_done < quota) {
  868. napi_complete(napi);
  869. /* enable all IRQs if we are not in bus off state */
  870. if (priv->can.state != CAN_STATE_BUS_OFF)
  871. c_can_irq_control(priv, true);
  872. }
  873. return work_done;
  874. }
  875. static irqreturn_t c_can_isr(int irq, void *dev_id)
  876. {
  877. struct net_device *dev = (struct net_device *)dev_id;
  878. struct c_can_priv *priv = netdev_priv(dev);
  879. if (!priv->read_reg(priv, C_CAN_INT_REG))
  880. return IRQ_NONE;
  881. /* disable all interrupts and schedule the NAPI */
  882. c_can_irq_control(priv, false);
  883. napi_schedule(&priv->napi);
  884. return IRQ_HANDLED;
  885. }
  886. static int c_can_open(struct net_device *dev)
  887. {
  888. int err;
  889. struct c_can_priv *priv = netdev_priv(dev);
  890. c_can_pm_runtime_get_sync(priv);
  891. c_can_reset_ram(priv, true);
  892. /* open the can device */
  893. err = open_candev(dev);
  894. if (err) {
  895. netdev_err(dev, "failed to open can device\n");
  896. goto exit_open_fail;
  897. }
  898. /* register interrupt handler */
  899. err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
  900. dev);
  901. if (err < 0) {
  902. netdev_err(dev, "failed to request interrupt\n");
  903. goto exit_irq_fail;
  904. }
  905. /* start the c_can controller */
  906. err = c_can_start(dev);
  907. if (err)
  908. goto exit_start_fail;
  909. can_led_event(dev, CAN_LED_EVENT_OPEN);
  910. napi_enable(&priv->napi);
  911. /* enable status change, error and module interrupts */
  912. c_can_irq_control(priv, true);
  913. netif_start_queue(dev);
  914. return 0;
  915. exit_start_fail:
  916. free_irq(dev->irq, dev);
  917. exit_irq_fail:
  918. close_candev(dev);
  919. exit_open_fail:
  920. c_can_reset_ram(priv, false);
  921. c_can_pm_runtime_put_sync(priv);
  922. return err;
  923. }
  924. static int c_can_close(struct net_device *dev)
  925. {
  926. struct c_can_priv *priv = netdev_priv(dev);
  927. netif_stop_queue(dev);
  928. napi_disable(&priv->napi);
  929. c_can_stop(dev);
  930. free_irq(dev->irq, dev);
  931. close_candev(dev);
  932. c_can_reset_ram(priv, false);
  933. c_can_pm_runtime_put_sync(priv);
  934. can_led_event(dev, CAN_LED_EVENT_STOP);
  935. return 0;
  936. }
  937. struct net_device *alloc_c_can_dev(void)
  938. {
  939. struct net_device *dev;
  940. struct c_can_priv *priv;
  941. dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
  942. if (!dev)
  943. return NULL;
  944. priv = netdev_priv(dev);
  945. netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
  946. priv->dev = dev;
  947. priv->can.bittiming_const = &c_can_bittiming_const;
  948. priv->can.do_set_mode = c_can_set_mode;
  949. priv->can.do_get_berr_counter = c_can_get_berr_counter;
  950. priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
  951. CAN_CTRLMODE_LISTENONLY |
  952. CAN_CTRLMODE_BERR_REPORTING;
  953. return dev;
  954. }
  955. EXPORT_SYMBOL_GPL(alloc_c_can_dev);
  956. #ifdef CONFIG_PM
  957. int c_can_power_down(struct net_device *dev)
  958. {
  959. u32 val;
  960. unsigned long time_out;
  961. struct c_can_priv *priv = netdev_priv(dev);
  962. if (!(dev->flags & IFF_UP))
  963. return 0;
  964. WARN_ON(priv->type != BOSCH_D_CAN);
  965. /* set PDR value so the device goes to power down mode */
  966. val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
  967. val |= CONTROL_EX_PDR;
  968. priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
  969. /* Wait for the PDA bit to get set */
  970. time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
  971. while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
  972. time_after(time_out, jiffies))
  973. cpu_relax();
  974. if (time_after(jiffies, time_out))
  975. return -ETIMEDOUT;
  976. c_can_stop(dev);
  977. c_can_reset_ram(priv, false);
  978. c_can_pm_runtime_put_sync(priv);
  979. return 0;
  980. }
  981. EXPORT_SYMBOL_GPL(c_can_power_down);
  982. int c_can_power_up(struct net_device *dev)
  983. {
  984. u32 val;
  985. unsigned long time_out;
  986. struct c_can_priv *priv = netdev_priv(dev);
  987. int ret;
  988. if (!(dev->flags & IFF_UP))
  989. return 0;
  990. WARN_ON(priv->type != BOSCH_D_CAN);
  991. c_can_pm_runtime_get_sync(priv);
  992. c_can_reset_ram(priv, true);
  993. /* Clear PDR and INIT bits */
  994. val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
  995. val &= ~CONTROL_EX_PDR;
  996. priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
  997. val = priv->read_reg(priv, C_CAN_CTRL_REG);
  998. val &= ~CONTROL_INIT;
  999. priv->write_reg(priv, C_CAN_CTRL_REG, val);
  1000. /* Wait for the PDA bit to get clear */
  1001. time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
  1002. while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
  1003. time_after(time_out, jiffies))
  1004. cpu_relax();
  1005. if (time_after(jiffies, time_out))
  1006. return -ETIMEDOUT;
  1007. ret = c_can_start(dev);
  1008. if (!ret)
  1009. c_can_irq_control(priv, true);
  1010. return ret;
  1011. }
  1012. EXPORT_SYMBOL_GPL(c_can_power_up);
  1013. #endif
  1014. void free_c_can_dev(struct net_device *dev)
  1015. {
  1016. struct c_can_priv *priv = netdev_priv(dev);
  1017. netif_napi_del(&priv->napi);
  1018. free_candev(dev);
  1019. }
  1020. EXPORT_SYMBOL_GPL(free_c_can_dev);
  1021. static const struct net_device_ops c_can_netdev_ops = {
  1022. .ndo_open = c_can_open,
  1023. .ndo_stop = c_can_close,
  1024. .ndo_start_xmit = c_can_start_xmit,
  1025. .ndo_change_mtu = can_change_mtu,
  1026. };
  1027. int register_c_can_dev(struct net_device *dev)
  1028. {
  1029. struct c_can_priv *priv = netdev_priv(dev);
  1030. int err;
  1031. /* Deactivate pins to prevent DRA7 DCAN IP from being
  1032. * stuck in transition when module is disabled.
  1033. * Pins are activated in c_can_start() and deactivated
  1034. * in c_can_stop()
  1035. */
  1036. pinctrl_pm_select_sleep_state(dev->dev.parent);
  1037. c_can_pm_runtime_enable(priv);
  1038. dev->flags |= IFF_ECHO; /* we support local echo */
  1039. dev->netdev_ops = &c_can_netdev_ops;
  1040. err = register_candev(dev);
  1041. if (err)
  1042. c_can_pm_runtime_disable(priv);
  1043. else
  1044. devm_can_led_init(dev);
  1045. return err;
  1046. }
  1047. EXPORT_SYMBOL_GPL(register_c_can_dev);
  1048. void unregister_c_can_dev(struct net_device *dev)
  1049. {
  1050. struct c_can_priv *priv = netdev_priv(dev);
  1051. unregister_candev(dev);
  1052. c_can_pm_runtime_disable(priv);
  1053. }
  1054. EXPORT_SYMBOL_GPL(unregister_c_can_dev);
  1055. MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
  1056. MODULE_LICENSE("GPL v2");
  1057. MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");