omap_hdq.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /*
  2. * drivers/w1/masters/omap_hdq.c
  3. *
  4. * Copyright (C) 2007,2012 Texas Instruments, Inc.
  5. *
  6. * This file is licensed under the terms of the GNU General Public License
  7. * version 2. This program is licensed "as is" without any warranty of any
  8. * kind, whether express or implied.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/io.h>
  18. #include <linux/sched.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/of.h>
  21. #include <linux/w1.h>
  22. #define MOD_NAME "OMAP_HDQ:"
  23. #define OMAP_HDQ_REVISION 0x00
  24. #define OMAP_HDQ_TX_DATA 0x04
  25. #define OMAP_HDQ_RX_DATA 0x08
  26. #define OMAP_HDQ_CTRL_STATUS 0x0c
  27. #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
  28. #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
  29. #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
  30. #define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
  31. #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
  32. #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
  33. #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
  34. #define OMAP_HDQ_INT_STATUS 0x10
  35. #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
  36. #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
  37. #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
  38. #define OMAP_HDQ_SYSCONFIG 0x14
  39. #define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
  40. #define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
  41. #define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
  42. #define OMAP_HDQ_SYSSTATUS 0x18
  43. #define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
  44. #define OMAP_HDQ_FLAG_CLEAR 0
  45. #define OMAP_HDQ_FLAG_SET 1
  46. #define OMAP_HDQ_TIMEOUT (HZ/5)
  47. #define OMAP_HDQ_MAX_USER 4
  48. static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
  49. static int w1_id;
  50. module_param(w1_id, int, S_IRUSR);
  51. MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
  52. struct hdq_data {
  53. struct device *dev;
  54. void __iomem *hdq_base;
  55. /* lock status update */
  56. struct mutex hdq_mutex;
  57. int hdq_usecount;
  58. u8 hdq_irqstatus;
  59. /* device lock */
  60. spinlock_t hdq_spinlock;
  61. /*
  62. * Used to control the call to omap_hdq_get and omap_hdq_put.
  63. * HDQ Protocol: Write the CMD|REG_address first, followed by
  64. * the data wrire or read.
  65. */
  66. int init_trans;
  67. int rrw;
  68. /* mode: 0-HDQ 1-W1 */
  69. int mode;
  70. };
  71. /* HDQ register I/O routines */
  72. static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
  73. {
  74. return __raw_readl(hdq_data->hdq_base + offset);
  75. }
  76. static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
  77. {
  78. __raw_writel(val, hdq_data->hdq_base + offset);
  79. }
  80. static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
  81. u8 val, u8 mask)
  82. {
  83. u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
  84. | (val & mask);
  85. __raw_writel(new_val, hdq_data->hdq_base + offset);
  86. return new_val;
  87. }
  88. static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
  89. u32 mask)
  90. {
  91. u32 ie;
  92. ie = readl(hdq_data->hdq_base + offset);
  93. writel(ie & mask, hdq_data->hdq_base + offset);
  94. }
  95. /*
  96. * Wait for one or more bits in flag change.
  97. * HDQ_FLAG_SET: wait until any bit in the flag is set.
  98. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
  99. * return 0 on success and -ETIMEDOUT in the case of timeout.
  100. */
  101. static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
  102. u8 flag, u8 flag_set, u8 *status)
  103. {
  104. int ret = 0;
  105. unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
  106. if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
  107. /* wait for the flag clear */
  108. while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
  109. && time_before(jiffies, timeout)) {
  110. schedule_timeout_uninterruptible(1);
  111. }
  112. if (*status & flag)
  113. ret = -ETIMEDOUT;
  114. } else if (flag_set == OMAP_HDQ_FLAG_SET) {
  115. /* wait for the flag set */
  116. while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
  117. && time_before(jiffies, timeout)) {
  118. schedule_timeout_uninterruptible(1);
  119. }
  120. if (!(*status & flag))
  121. ret = -ETIMEDOUT;
  122. } else
  123. return -EINVAL;
  124. return ret;
  125. }
  126. /* write out a byte and fill *status with HDQ_INT_STATUS */
  127. static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
  128. {
  129. int ret;
  130. u8 tmp_status;
  131. unsigned long irqflags;
  132. *status = 0;
  133. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  134. /* clear interrupt flags via a dummy read */
  135. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  136. /* ISR loads it with new INT_STATUS */
  137. hdq_data->hdq_irqstatus = 0;
  138. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  139. hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
  140. /* set the GO bit */
  141. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
  142. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  143. /* wait for the TXCOMPLETE bit */
  144. ret = wait_event_timeout(hdq_wait_queue,
  145. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  146. if (ret == 0) {
  147. dev_dbg(hdq_data->dev, "TX wait elapsed\n");
  148. ret = -ETIMEDOUT;
  149. goto out;
  150. }
  151. *status = hdq_data->hdq_irqstatus;
  152. /* check irqstatus */
  153. if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
  154. dev_dbg(hdq_data->dev, "timeout waiting for"
  155. " TXCOMPLETE/RXCOMPLETE, %x", *status);
  156. ret = -ETIMEDOUT;
  157. goto out;
  158. }
  159. /* wait for the GO bit return to zero */
  160. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  161. OMAP_HDQ_CTRL_STATUS_GO,
  162. OMAP_HDQ_FLAG_CLEAR, &tmp_status);
  163. if (ret) {
  164. dev_dbg(hdq_data->dev, "timeout waiting GO bit"
  165. " return to zero, %x", tmp_status);
  166. }
  167. out:
  168. return ret;
  169. }
  170. /* HDQ Interrupt service routine */
  171. static irqreturn_t hdq_isr(int irq, void *_hdq)
  172. {
  173. struct hdq_data *hdq_data = _hdq;
  174. unsigned long irqflags;
  175. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  176. hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  177. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  178. dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
  179. if (hdq_data->hdq_irqstatus &
  180. (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
  181. | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  182. /* wake up sleeping process */
  183. wake_up(&hdq_wait_queue);
  184. }
  185. return IRQ_HANDLED;
  186. }
  187. /* W1 search callback function in HDQ mode */
  188. static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
  189. u8 search_type, w1_slave_found_callback slave_found)
  190. {
  191. u64 module_id, rn_le, cs, id;
  192. if (w1_id)
  193. module_id = w1_id;
  194. else
  195. module_id = 0x1;
  196. rn_le = cpu_to_le64(module_id);
  197. /*
  198. * HDQ might not obey truly the 1-wire spec.
  199. * So calculate CRC based on module parameter.
  200. */
  201. cs = w1_calc_crc8((u8 *)&rn_le, 7);
  202. id = (cs << 56) | module_id;
  203. slave_found(master_dev, id);
  204. }
  205. static int _omap_hdq_reset(struct hdq_data *hdq_data)
  206. {
  207. int ret;
  208. u8 tmp_status;
  209. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  210. OMAP_HDQ_SYSCONFIG_SOFTRESET);
  211. /*
  212. * Select HDQ/1W mode & enable clocks.
  213. * It is observed that INT flags can't be cleared via a read and GO/INIT
  214. * won't return to zero if interrupt is disabled. So we always enable
  215. * interrupt.
  216. */
  217. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  218. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  219. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  220. /* wait for reset to complete */
  221. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
  222. OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
  223. if (ret)
  224. dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
  225. tmp_status);
  226. else {
  227. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  228. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  229. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
  230. hdq_data->mode);
  231. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  232. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  233. }
  234. return ret;
  235. }
  236. /* Issue break pulse to the device */
  237. static int omap_hdq_break(struct hdq_data *hdq_data)
  238. {
  239. int ret = 0;
  240. u8 tmp_status;
  241. unsigned long irqflags;
  242. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  243. if (ret < 0) {
  244. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  245. ret = -EINTR;
  246. goto rtn;
  247. }
  248. spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
  249. /* clear interrupt flags via a dummy read */
  250. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  251. /* ISR loads it with new INT_STATUS */
  252. hdq_data->hdq_irqstatus = 0;
  253. spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
  254. /* set the INIT and GO bit */
  255. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  256. OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
  257. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  258. OMAP_HDQ_CTRL_STATUS_GO);
  259. /* wait for the TIMEOUT bit */
  260. ret = wait_event_timeout(hdq_wait_queue,
  261. hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
  262. if (ret == 0) {
  263. dev_dbg(hdq_data->dev, "break wait elapsed\n");
  264. ret = -EINTR;
  265. goto out;
  266. }
  267. tmp_status = hdq_data->hdq_irqstatus;
  268. /* check irqstatus */
  269. if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
  270. dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
  271. tmp_status);
  272. ret = -ETIMEDOUT;
  273. goto out;
  274. }
  275. /*
  276. * check for the presence detect bit to get
  277. * set to show that the slave is responding
  278. */
  279. if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
  280. OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
  281. dev_dbg(hdq_data->dev, "Presence bit not set\n");
  282. ret = -ETIMEDOUT;
  283. goto out;
  284. }
  285. /*
  286. * wait for both INIT and GO bits rerurn to zero.
  287. * zero wait time expected for interrupt mode.
  288. */
  289. ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
  290. OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
  291. OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
  292. &tmp_status);
  293. if (ret)
  294. dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
  295. " return to zero, %x", tmp_status);
  296. out:
  297. mutex_unlock(&hdq_data->hdq_mutex);
  298. rtn:
  299. return ret;
  300. }
  301. static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
  302. {
  303. int ret = 0;
  304. u8 status;
  305. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  306. if (ret < 0) {
  307. ret = -EINTR;
  308. goto rtn;
  309. }
  310. if (!hdq_data->hdq_usecount) {
  311. ret = -EINVAL;
  312. goto out;
  313. }
  314. if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  315. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
  316. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
  317. OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
  318. /*
  319. * The RX comes immediately after TX.
  320. */
  321. wait_event_timeout(hdq_wait_queue,
  322. (hdq_data->hdq_irqstatus
  323. & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
  324. OMAP_HDQ_TIMEOUT);
  325. hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
  326. OMAP_HDQ_CTRL_STATUS_DIR);
  327. status = hdq_data->hdq_irqstatus;
  328. /* check irqstatus */
  329. if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
  330. dev_dbg(hdq_data->dev, "timeout waiting for"
  331. " RXCOMPLETE, %x", status);
  332. ret = -ETIMEDOUT;
  333. goto out;
  334. }
  335. }
  336. /* the data is ready. Read it in! */
  337. *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
  338. out:
  339. mutex_unlock(&hdq_data->hdq_mutex);
  340. rtn:
  341. return ret;
  342. }
  343. /* Enable clocks and set the controller to HDQ/1W mode */
  344. static int omap_hdq_get(struct hdq_data *hdq_data)
  345. {
  346. int ret = 0;
  347. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  348. if (ret < 0) {
  349. ret = -EINTR;
  350. goto rtn;
  351. }
  352. if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
  353. dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
  354. ret = -EINVAL;
  355. goto out;
  356. } else {
  357. hdq_data->hdq_usecount++;
  358. try_module_get(THIS_MODULE);
  359. if (1 == hdq_data->hdq_usecount) {
  360. pm_runtime_get_sync(hdq_data->dev);
  361. /* make sure HDQ/1W is out of reset */
  362. if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
  363. OMAP_HDQ_SYSSTATUS_RESETDONE)) {
  364. ret = _omap_hdq_reset(hdq_data);
  365. if (ret)
  366. /* back up the count */
  367. hdq_data->hdq_usecount--;
  368. } else {
  369. /* select HDQ/1W mode & enable clocks */
  370. hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
  371. OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
  372. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
  373. hdq_data->mode);
  374. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  375. OMAP_HDQ_SYSCONFIG_NOIDLE);
  376. hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
  377. }
  378. }
  379. }
  380. out:
  381. mutex_unlock(&hdq_data->hdq_mutex);
  382. rtn:
  383. return ret;
  384. }
  385. /* Disable clocks to the module */
  386. static int omap_hdq_put(struct hdq_data *hdq_data)
  387. {
  388. int ret = 0;
  389. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  390. if (ret < 0)
  391. return -EINTR;
  392. hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
  393. OMAP_HDQ_SYSCONFIG_AUTOIDLE);
  394. if (0 == hdq_data->hdq_usecount) {
  395. dev_dbg(hdq_data->dev, "attempt to decrement use count"
  396. " when it is zero");
  397. ret = -EINVAL;
  398. } else {
  399. hdq_data->hdq_usecount--;
  400. module_put(THIS_MODULE);
  401. if (0 == hdq_data->hdq_usecount)
  402. pm_runtime_put_sync(hdq_data->dev);
  403. }
  404. mutex_unlock(&hdq_data->hdq_mutex);
  405. return ret;
  406. }
  407. /*
  408. * W1 triplet callback function - used for searching ROM addresses.
  409. * Registered only when controller is in 1-wire mode.
  410. */
  411. static u8 omap_w1_triplet(void *_hdq, u8 bdir)
  412. {
  413. u8 id_bit, comp_bit;
  414. int err;
  415. u8 ret = 0x3; /* no slaves responded */
  416. struct hdq_data *hdq_data = _hdq;
  417. u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
  418. OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
  419. u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
  420. omap_hdq_get(_hdq);
  421. err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  422. if (err < 0) {
  423. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  424. goto rtn;
  425. }
  426. hdq_data->hdq_irqstatus = 0;
  427. /* read id_bit */
  428. hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
  429. ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
  430. err = wait_event_timeout(hdq_wait_queue,
  431. (hdq_data->hdq_irqstatus
  432. & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
  433. OMAP_HDQ_TIMEOUT);
  434. if (err == 0) {
  435. dev_dbg(hdq_data->dev, "RX wait elapsed\n");
  436. goto out;
  437. }
  438. id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
  439. hdq_data->hdq_irqstatus = 0;
  440. /* read comp_bit */
  441. hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
  442. ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
  443. err = wait_event_timeout(hdq_wait_queue,
  444. (hdq_data->hdq_irqstatus
  445. & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
  446. OMAP_HDQ_TIMEOUT);
  447. if (err == 0) {
  448. dev_dbg(hdq_data->dev, "RX wait elapsed\n");
  449. goto out;
  450. }
  451. comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
  452. if (id_bit && comp_bit) {
  453. ret = 0x03; /* no slaves responded */
  454. goto out;
  455. }
  456. if (!id_bit && !comp_bit) {
  457. /* Both bits are valid, take the direction given */
  458. ret = bdir ? 0x04 : 0;
  459. } else {
  460. /* Only one bit is valid, take that direction */
  461. bdir = id_bit;
  462. ret = id_bit ? 0x05 : 0x02;
  463. }
  464. /* write bdir bit */
  465. hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
  466. hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
  467. err = wait_event_timeout(hdq_wait_queue,
  468. (hdq_data->hdq_irqstatus
  469. & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
  470. OMAP_HDQ_TIMEOUT);
  471. if (err == 0) {
  472. dev_dbg(hdq_data->dev, "TX wait elapsed\n");
  473. goto out;
  474. }
  475. hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
  476. OMAP_HDQ_CTRL_STATUS_SINGLE);
  477. out:
  478. mutex_unlock(&hdq_data->hdq_mutex);
  479. rtn:
  480. omap_hdq_put(_hdq);
  481. return ret;
  482. }
  483. /* reset callback */
  484. static u8 omap_w1_reset_bus(void *_hdq)
  485. {
  486. omap_hdq_get(_hdq);
  487. omap_hdq_break(_hdq);
  488. omap_hdq_put(_hdq);
  489. return 0;
  490. }
  491. /* Read a byte of data from the device */
  492. static u8 omap_w1_read_byte(void *_hdq)
  493. {
  494. struct hdq_data *hdq_data = _hdq;
  495. u8 val = 0;
  496. int ret;
  497. /* First write to initialize the transfer */
  498. if (hdq_data->init_trans == 0)
  499. omap_hdq_get(hdq_data);
  500. ret = hdq_read_byte(hdq_data, &val);
  501. if (ret) {
  502. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  503. if (ret < 0) {
  504. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  505. return -EINTR;
  506. }
  507. hdq_data->init_trans = 0;
  508. mutex_unlock(&hdq_data->hdq_mutex);
  509. omap_hdq_put(hdq_data);
  510. return -1;
  511. }
  512. hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
  513. ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
  514. /* Write followed by a read, release the module */
  515. if (hdq_data->init_trans) {
  516. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  517. if (ret < 0) {
  518. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  519. return -EINTR;
  520. }
  521. hdq_data->init_trans = 0;
  522. mutex_unlock(&hdq_data->hdq_mutex);
  523. omap_hdq_put(hdq_data);
  524. }
  525. return val;
  526. }
  527. /* Write a byte of data to the device */
  528. static void omap_w1_write_byte(void *_hdq, u8 byte)
  529. {
  530. struct hdq_data *hdq_data = _hdq;
  531. int ret;
  532. u8 status;
  533. /* First write to initialize the transfer */
  534. if (hdq_data->init_trans == 0)
  535. omap_hdq_get(hdq_data);
  536. /*
  537. * We need to reset the slave before
  538. * issuing the SKIP ROM command, else
  539. * the slave will not work.
  540. */
  541. if (byte == W1_SKIP_ROM)
  542. omap_hdq_break(hdq_data);
  543. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  544. if (ret < 0) {
  545. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  546. return;
  547. }
  548. hdq_data->init_trans++;
  549. mutex_unlock(&hdq_data->hdq_mutex);
  550. ret = hdq_write_byte(hdq_data, byte, &status);
  551. if (ret < 0) {
  552. dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
  553. return;
  554. }
  555. /* Second write, data transferred. Release the module */
  556. if (hdq_data->init_trans > 1) {
  557. omap_hdq_put(hdq_data);
  558. ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
  559. if (ret < 0) {
  560. dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
  561. return;
  562. }
  563. hdq_data->init_trans = 0;
  564. mutex_unlock(&hdq_data->hdq_mutex);
  565. }
  566. }
  567. static struct w1_bus_master omap_w1_master = {
  568. .read_byte = omap_w1_read_byte,
  569. .write_byte = omap_w1_write_byte,
  570. .reset_bus = omap_w1_reset_bus,
  571. };
  572. static int omap_hdq_probe(struct platform_device *pdev)
  573. {
  574. struct device *dev = &pdev->dev;
  575. struct hdq_data *hdq_data;
  576. struct resource *res;
  577. int ret, irq;
  578. u8 rev;
  579. const char *mode;
  580. hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
  581. if (!hdq_data) {
  582. dev_dbg(&pdev->dev, "unable to allocate memory\n");
  583. return -ENOMEM;
  584. }
  585. hdq_data->dev = dev;
  586. platform_set_drvdata(pdev, hdq_data);
  587. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  588. hdq_data->hdq_base = devm_ioremap_resource(dev, res);
  589. if (IS_ERR(hdq_data->hdq_base))
  590. return PTR_ERR(hdq_data->hdq_base);
  591. hdq_data->hdq_usecount = 0;
  592. hdq_data->rrw = 0;
  593. mutex_init(&hdq_data->hdq_mutex);
  594. pm_runtime_enable(&pdev->dev);
  595. ret = pm_runtime_get_sync(&pdev->dev);
  596. if (ret < 0) {
  597. dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
  598. goto err_w1;
  599. }
  600. ret = _omap_hdq_reset(hdq_data);
  601. if (ret) {
  602. dev_dbg(&pdev->dev, "reset failed\n");
  603. goto err_irq;
  604. }
  605. rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
  606. dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
  607. (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
  608. spin_lock_init(&hdq_data->hdq_spinlock);
  609. irq = platform_get_irq(pdev, 0);
  610. if (irq < 0) {
  611. dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
  612. ret = irq;
  613. goto err_irq;
  614. }
  615. ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
  616. if (ret < 0) {
  617. dev_dbg(&pdev->dev, "could not request irq\n");
  618. goto err_irq;
  619. }
  620. omap_hdq_break(hdq_data);
  621. pm_runtime_put_sync(&pdev->dev);
  622. ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
  623. if (ret < 0 || !strcmp(mode, "hdq")) {
  624. hdq_data->mode = 0;
  625. omap_w1_master.search = omap_w1_search_bus;
  626. } else {
  627. hdq_data->mode = 1;
  628. omap_w1_master.triplet = omap_w1_triplet;
  629. }
  630. omap_w1_master.data = hdq_data;
  631. ret = w1_add_master_device(&omap_w1_master);
  632. if (ret) {
  633. dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
  634. goto err_w1;
  635. }
  636. return 0;
  637. err_irq:
  638. pm_runtime_put_sync(&pdev->dev);
  639. err_w1:
  640. pm_runtime_disable(&pdev->dev);
  641. return ret;
  642. }
  643. static int omap_hdq_remove(struct platform_device *pdev)
  644. {
  645. struct hdq_data *hdq_data = platform_get_drvdata(pdev);
  646. mutex_lock(&hdq_data->hdq_mutex);
  647. if (hdq_data->hdq_usecount) {
  648. dev_dbg(&pdev->dev, "removed when use count is not zero\n");
  649. mutex_unlock(&hdq_data->hdq_mutex);
  650. return -EBUSY;
  651. }
  652. mutex_unlock(&hdq_data->hdq_mutex);
  653. /* remove module dependency */
  654. pm_runtime_disable(&pdev->dev);
  655. w1_remove_master_device(&omap_w1_master);
  656. return 0;
  657. }
  658. static const struct of_device_id omap_hdq_dt_ids[] = {
  659. { .compatible = "ti,omap3-1w" },
  660. { .compatible = "ti,am4372-hdq" },
  661. {}
  662. };
  663. MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
  664. static struct platform_driver omap_hdq_driver = {
  665. .probe = omap_hdq_probe,
  666. .remove = omap_hdq_remove,
  667. .driver = {
  668. .name = "omap_hdq",
  669. .of_match_table = omap_hdq_dt_ids,
  670. },
  671. };
  672. module_platform_driver(omap_hdq_driver);
  673. MODULE_AUTHOR("Texas Instruments");
  674. MODULE_DESCRIPTION("HDQ-1W driver Library");
  675. MODULE_LICENSE("GPL");