ucb1x00-core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * linux/drivers/mfd/ucb1x00-core.c
  3. *
  4. * Copyright (C) 2001 Russell King, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License.
  9. *
  10. * The UCB1x00 core driver provides basic services for handling IO,
  11. * the ADC, interrupts, and accessing registers. It is designed
  12. * such that everything goes through this layer, thereby providing
  13. * a consistent locking methodology, as well as allowing the drivers
  14. * to be used on other non-MCP-enabled hardware platforms.
  15. *
  16. * Note that all locks are private to this file. Nothing else may
  17. * touch them.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/sched.h>
  22. #include <linux/slab.h>
  23. #include <linux/init.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/irq.h>
  27. #include <linux/device.h>
  28. #include <linux/mutex.h>
  29. #include <linux/mfd/ucb1x00.h>
  30. #include <linux/pm.h>
  31. #include <linux/gpio/driver.h>
  32. static DEFINE_MUTEX(ucb1x00_mutex);
  33. static LIST_HEAD(ucb1x00_drivers);
  34. static LIST_HEAD(ucb1x00_devices);
  35. /**
  36. * ucb1x00_io_set_dir - set IO direction
  37. * @ucb: UCB1x00 structure describing chip
  38. * @in: bitfield of IO pins to be set as inputs
  39. * @out: bitfield of IO pins to be set as outputs
  40. *
  41. * Set the IO direction of the ten general purpose IO pins on
  42. * the UCB1x00 chip. The @in bitfield has priority over the
  43. * @out bitfield, in that if you specify a pin as both input
  44. * and output, it will end up as an input.
  45. *
  46. * ucb1x00_enable must have been called to enable the comms
  47. * before using this function.
  48. *
  49. * This function takes a spinlock, disabling interrupts.
  50. */
  51. void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
  52. {
  53. unsigned long flags;
  54. spin_lock_irqsave(&ucb->io_lock, flags);
  55. ucb->io_dir |= out;
  56. ucb->io_dir &= ~in;
  57. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  58. spin_unlock_irqrestore(&ucb->io_lock, flags);
  59. }
  60. /**
  61. * ucb1x00_io_write - set or clear IO outputs
  62. * @ucb: UCB1x00 structure describing chip
  63. * @set: bitfield of IO pins to set to logic '1'
  64. * @clear: bitfield of IO pins to set to logic '0'
  65. *
  66. * Set the IO output state of the specified IO pins. The value
  67. * is retained if the pins are subsequently configured as inputs.
  68. * The @clear bitfield has priority over the @set bitfield -
  69. * outputs will be cleared.
  70. *
  71. * ucb1x00_enable must have been called to enable the comms
  72. * before using this function.
  73. *
  74. * This function takes a spinlock, disabling interrupts.
  75. */
  76. void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
  77. {
  78. unsigned long flags;
  79. spin_lock_irqsave(&ucb->io_lock, flags);
  80. ucb->io_out |= set;
  81. ucb->io_out &= ~clear;
  82. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  83. spin_unlock_irqrestore(&ucb->io_lock, flags);
  84. }
  85. /**
  86. * ucb1x00_io_read - read the current state of the IO pins
  87. * @ucb: UCB1x00 structure describing chip
  88. *
  89. * Return a bitfield describing the logic state of the ten
  90. * general purpose IO pins.
  91. *
  92. * ucb1x00_enable must have been called to enable the comms
  93. * before using this function.
  94. *
  95. * This function does not take any mutexes or spinlocks.
  96. */
  97. unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
  98. {
  99. return ucb1x00_reg_read(ucb, UCB_IO_DATA);
  100. }
  101. static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
  102. {
  103. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  104. unsigned long flags;
  105. spin_lock_irqsave(&ucb->io_lock, flags);
  106. if (value)
  107. ucb->io_out |= 1 << offset;
  108. else
  109. ucb->io_out &= ~(1 << offset);
  110. ucb1x00_enable(ucb);
  111. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  112. ucb1x00_disable(ucb);
  113. spin_unlock_irqrestore(&ucb->io_lock, flags);
  114. }
  115. static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
  116. {
  117. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  118. unsigned val;
  119. ucb1x00_enable(ucb);
  120. val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
  121. ucb1x00_disable(ucb);
  122. return !!(val & (1 << offset));
  123. }
  124. static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
  125. {
  126. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  127. unsigned long flags;
  128. spin_lock_irqsave(&ucb->io_lock, flags);
  129. ucb->io_dir &= ~(1 << offset);
  130. ucb1x00_enable(ucb);
  131. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  132. ucb1x00_disable(ucb);
  133. spin_unlock_irqrestore(&ucb->io_lock, flags);
  134. return 0;
  135. }
  136. static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
  137. , int value)
  138. {
  139. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  140. unsigned long flags;
  141. unsigned old, mask = 1 << offset;
  142. spin_lock_irqsave(&ucb->io_lock, flags);
  143. old = ucb->io_out;
  144. if (value)
  145. ucb->io_out |= mask;
  146. else
  147. ucb->io_out &= ~mask;
  148. ucb1x00_enable(ucb);
  149. if (old != ucb->io_out)
  150. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  151. if (!(ucb->io_dir & mask)) {
  152. ucb->io_dir |= mask;
  153. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  154. }
  155. ucb1x00_disable(ucb);
  156. spin_unlock_irqrestore(&ucb->io_lock, flags);
  157. return 0;
  158. }
  159. static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
  160. {
  161. struct ucb1x00 *ucb = gpiochip_get_data(chip);
  162. return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
  163. }
  164. /*
  165. * UCB1300 data sheet says we must:
  166. * 1. enable ADC => 5us (including reference startup time)
  167. * 2. select input => 51*tsibclk => 4.3us
  168. * 3. start conversion => 102*tsibclk => 8.5us
  169. * (tsibclk = 1/11981000)
  170. * Period between SIB 128-bit frames = 10.7us
  171. */
  172. /**
  173. * ucb1x00_adc_enable - enable the ADC converter
  174. * @ucb: UCB1x00 structure describing chip
  175. *
  176. * Enable the ucb1x00 and ADC converter on the UCB1x00 for use.
  177. * Any code wishing to use the ADC converter must call this
  178. * function prior to using it.
  179. *
  180. * This function takes the ADC mutex to prevent two or more
  181. * concurrent uses, and therefore may sleep. As a result, it
  182. * can only be called from process context, not interrupt
  183. * context.
  184. *
  185. * You should release the ADC as soon as possible using
  186. * ucb1x00_adc_disable.
  187. */
  188. void ucb1x00_adc_enable(struct ucb1x00 *ucb)
  189. {
  190. mutex_lock(&ucb->adc_mutex);
  191. ucb->adc_cr |= UCB_ADC_ENA;
  192. ucb1x00_enable(ucb);
  193. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
  194. }
  195. /**
  196. * ucb1x00_adc_read - read the specified ADC channel
  197. * @ucb: UCB1x00 structure describing chip
  198. * @adc_channel: ADC channel mask
  199. * @sync: wait for syncronisation pulse.
  200. *
  201. * Start an ADC conversion and wait for the result. Note that
  202. * synchronised ADC conversions (via the ADCSYNC pin) must wait
  203. * until the trigger is asserted and the conversion is finished.
  204. *
  205. * This function currently spins waiting for the conversion to
  206. * complete (2 frames max without sync).
  207. *
  208. * If called for a synchronised ADC conversion, it may sleep
  209. * with the ADC mutex held.
  210. */
  211. unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
  212. {
  213. unsigned int val;
  214. if (sync)
  215. adc_channel |= UCB_ADC_SYNC_ENA;
  216. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
  217. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
  218. for (;;) {
  219. val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
  220. if (val & UCB_ADC_DAT_VAL)
  221. break;
  222. /* yield to other processes */
  223. set_current_state(TASK_INTERRUPTIBLE);
  224. schedule_timeout(1);
  225. }
  226. return UCB_ADC_DAT(val);
  227. }
  228. /**
  229. * ucb1x00_adc_disable - disable the ADC converter
  230. * @ucb: UCB1x00 structure describing chip
  231. *
  232. * Disable the ADC converter and release the ADC mutex.
  233. */
  234. void ucb1x00_adc_disable(struct ucb1x00 *ucb)
  235. {
  236. ucb->adc_cr &= ~UCB_ADC_ENA;
  237. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
  238. ucb1x00_disable(ucb);
  239. mutex_unlock(&ucb->adc_mutex);
  240. }
  241. /*
  242. * UCB1x00 Interrupt handling.
  243. *
  244. * The UCB1x00 can generate interrupts when the SIBCLK is stopped.
  245. * Since we need to read an internal register, we must re-enable
  246. * SIBCLK to talk to the chip. We leave the clock running until
  247. * we have finished processing all interrupts from the chip.
  248. */
  249. static void ucb1x00_irq(struct irq_desc *desc)
  250. {
  251. struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
  252. unsigned int isr, i;
  253. ucb1x00_enable(ucb);
  254. isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
  255. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
  256. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  257. for (i = 0; i < 16 && isr; i++, isr >>= 1)
  258. if (isr & 1)
  259. generic_handle_irq(ucb->irq_base + i);
  260. ucb1x00_disable(ucb);
  261. }
  262. static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
  263. {
  264. ucb1x00_enable(ucb);
  265. if (ucb->irq_ris_enbl & mask)
  266. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  267. ucb->irq_mask);
  268. if (ucb->irq_fal_enbl & mask)
  269. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  270. ucb->irq_mask);
  271. ucb1x00_disable(ucb);
  272. }
  273. static void ucb1x00_irq_noop(struct irq_data *data)
  274. {
  275. }
  276. static void ucb1x00_irq_mask(struct irq_data *data)
  277. {
  278. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  279. unsigned mask = 1 << (data->irq - ucb->irq_base);
  280. raw_spin_lock(&ucb->irq_lock);
  281. ucb->irq_mask &= ~mask;
  282. ucb1x00_irq_update(ucb, mask);
  283. raw_spin_unlock(&ucb->irq_lock);
  284. }
  285. static void ucb1x00_irq_unmask(struct irq_data *data)
  286. {
  287. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  288. unsigned mask = 1 << (data->irq - ucb->irq_base);
  289. raw_spin_lock(&ucb->irq_lock);
  290. ucb->irq_mask |= mask;
  291. ucb1x00_irq_update(ucb, mask);
  292. raw_spin_unlock(&ucb->irq_lock);
  293. }
  294. static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
  295. {
  296. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  297. unsigned mask = 1 << (data->irq - ucb->irq_base);
  298. raw_spin_lock(&ucb->irq_lock);
  299. if (type & IRQ_TYPE_EDGE_RISING)
  300. ucb->irq_ris_enbl |= mask;
  301. else
  302. ucb->irq_ris_enbl &= ~mask;
  303. if (type & IRQ_TYPE_EDGE_FALLING)
  304. ucb->irq_fal_enbl |= mask;
  305. else
  306. ucb->irq_fal_enbl &= ~mask;
  307. if (ucb->irq_mask & mask) {
  308. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  309. ucb->irq_mask);
  310. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  311. ucb->irq_mask);
  312. }
  313. raw_spin_unlock(&ucb->irq_lock);
  314. return 0;
  315. }
  316. static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
  317. {
  318. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  319. struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
  320. unsigned mask = 1 << (data->irq - ucb->irq_base);
  321. if (!pdata || !pdata->can_wakeup)
  322. return -EINVAL;
  323. raw_spin_lock(&ucb->irq_lock);
  324. if (on)
  325. ucb->irq_wake |= mask;
  326. else
  327. ucb->irq_wake &= ~mask;
  328. raw_spin_unlock(&ucb->irq_lock);
  329. return 0;
  330. }
  331. static struct irq_chip ucb1x00_irqchip = {
  332. .name = "ucb1x00",
  333. .irq_ack = ucb1x00_irq_noop,
  334. .irq_mask = ucb1x00_irq_mask,
  335. .irq_unmask = ucb1x00_irq_unmask,
  336. .irq_set_type = ucb1x00_irq_set_type,
  337. .irq_set_wake = ucb1x00_irq_set_wake,
  338. };
  339. static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
  340. {
  341. struct ucb1x00_dev *dev;
  342. int ret;
  343. dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
  344. if (!dev)
  345. return -ENOMEM;
  346. dev->ucb = ucb;
  347. dev->drv = drv;
  348. ret = drv->add(dev);
  349. if (ret) {
  350. kfree(dev);
  351. return ret;
  352. }
  353. list_add_tail(&dev->dev_node, &ucb->devs);
  354. list_add_tail(&dev->drv_node, &drv->devs);
  355. return ret;
  356. }
  357. static void ucb1x00_remove_dev(struct ucb1x00_dev *dev)
  358. {
  359. dev->drv->remove(dev);
  360. list_del(&dev->dev_node);
  361. list_del(&dev->drv_node);
  362. kfree(dev);
  363. }
  364. /*
  365. * Try to probe our interrupt, rather than relying on lots of
  366. * hard-coded machine dependencies. For reference, the expected
  367. * IRQ mappings are:
  368. *
  369. * Machine Default IRQ
  370. * adsbitsy IRQ_GPCIN4
  371. * cerf IRQ_GPIO_UCB1200_IRQ
  372. * flexanet IRQ_GPIO_GUI
  373. * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ
  374. * graphicsclient ADS_EXT_IRQ(8)
  375. * graphicsmaster ADS_EXT_IRQ(8)
  376. * lart LART_IRQ_UCB1200
  377. * omnimeter IRQ_GPIO23
  378. * pfs168 IRQ_GPIO_UCB1300_IRQ
  379. * simpad IRQ_GPIO_UCB1300_IRQ
  380. * shannon SHANNON_IRQ_GPIO_IRQ_CODEC
  381. * yopy IRQ_GPIO_UCB1200_IRQ
  382. */
  383. static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
  384. {
  385. unsigned long mask;
  386. mask = probe_irq_on();
  387. /*
  388. * Enable the ADC interrupt.
  389. */
  390. ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
  391. ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
  392. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
  393. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  394. /*
  395. * Cause an ADC interrupt.
  396. */
  397. ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
  398. ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
  399. /*
  400. * Wait for the conversion to complete.
  401. */
  402. while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
  403. ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
  404. /*
  405. * Disable and clear interrupt.
  406. */
  407. ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
  408. ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
  409. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
  410. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  411. /*
  412. * Read triggered interrupt.
  413. */
  414. return probe_irq_off(mask);
  415. }
  416. static void ucb1x00_release(struct device *dev)
  417. {
  418. struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
  419. kfree(ucb);
  420. }
  421. static struct class ucb1x00_class = {
  422. .name = "ucb1x00",
  423. .dev_release = ucb1x00_release,
  424. };
  425. static int ucb1x00_probe(struct mcp *mcp)
  426. {
  427. struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
  428. struct ucb1x00_driver *drv;
  429. struct ucb1x00 *ucb;
  430. unsigned id, i, irq_base;
  431. int ret = -ENODEV;
  432. /* Tell the platform to deassert the UCB1x00 reset */
  433. if (pdata && pdata->reset)
  434. pdata->reset(UCB_RST_PROBE);
  435. mcp_enable(mcp);
  436. id = mcp_reg_read(mcp, UCB_ID);
  437. mcp_disable(mcp);
  438. if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
  439. printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
  440. goto out;
  441. }
  442. ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
  443. ret = -ENOMEM;
  444. if (!ucb)
  445. goto out;
  446. device_initialize(&ucb->dev);
  447. ucb->dev.class = &ucb1x00_class;
  448. ucb->dev.parent = &mcp->attached_device;
  449. dev_set_name(&ucb->dev, "ucb1x00");
  450. raw_spin_lock_init(&ucb->irq_lock);
  451. spin_lock_init(&ucb->io_lock);
  452. mutex_init(&ucb->adc_mutex);
  453. ucb->id = id;
  454. ucb->mcp = mcp;
  455. ret = device_add(&ucb->dev);
  456. if (ret)
  457. goto err_dev_add;
  458. ucb1x00_enable(ucb);
  459. ucb->irq = ucb1x00_detect_irq(ucb);
  460. ucb1x00_disable(ucb);
  461. if (!ucb->irq) {
  462. dev_err(&ucb->dev, "IRQ probe failed\n");
  463. ret = -ENODEV;
  464. goto err_no_irq;
  465. }
  466. ucb->gpio.base = -1;
  467. irq_base = pdata ? pdata->irq_base : 0;
  468. ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
  469. if (ucb->irq_base < 0) {
  470. dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
  471. ucb->irq_base);
  472. ret = ucb->irq_base;
  473. goto err_irq_alloc;
  474. }
  475. for (i = 0; i < 16; i++) {
  476. unsigned irq = ucb->irq_base + i;
  477. irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
  478. irq_set_chip_data(irq, ucb);
  479. irq_clear_status_flags(irq, IRQ_NOREQUEST);
  480. }
  481. irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
  482. irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb);
  483. if (pdata && pdata->gpio_base) {
  484. ucb->gpio.label = dev_name(&ucb->dev);
  485. ucb->gpio.parent = &ucb->dev;
  486. ucb->gpio.owner = THIS_MODULE;
  487. ucb->gpio.base = pdata->gpio_base;
  488. ucb->gpio.ngpio = 10;
  489. ucb->gpio.set = ucb1x00_gpio_set;
  490. ucb->gpio.get = ucb1x00_gpio_get;
  491. ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
  492. ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
  493. ucb->gpio.to_irq = ucb1x00_to_irq;
  494. ret = gpiochip_add_data(&ucb->gpio, ucb);
  495. if (ret)
  496. goto err_gpio_add;
  497. } else
  498. dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
  499. mcp_set_drvdata(mcp, ucb);
  500. if (pdata)
  501. device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
  502. INIT_LIST_HEAD(&ucb->devs);
  503. mutex_lock(&ucb1x00_mutex);
  504. list_add_tail(&ucb->node, &ucb1x00_devices);
  505. list_for_each_entry(drv, &ucb1x00_drivers, node) {
  506. ucb1x00_add_dev(ucb, drv);
  507. }
  508. mutex_unlock(&ucb1x00_mutex);
  509. return ret;
  510. err_gpio_add:
  511. irq_set_chained_handler(ucb->irq, NULL);
  512. err_irq_alloc:
  513. if (ucb->irq_base > 0)
  514. irq_free_descs(ucb->irq_base, 16);
  515. err_no_irq:
  516. device_del(&ucb->dev);
  517. err_dev_add:
  518. put_device(&ucb->dev);
  519. out:
  520. if (pdata && pdata->reset)
  521. pdata->reset(UCB_RST_PROBE_FAIL);
  522. return ret;
  523. }
  524. static void ucb1x00_remove(struct mcp *mcp)
  525. {
  526. struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
  527. struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
  528. struct list_head *l, *n;
  529. mutex_lock(&ucb1x00_mutex);
  530. list_del(&ucb->node);
  531. list_for_each_safe(l, n, &ucb->devs) {
  532. struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
  533. ucb1x00_remove_dev(dev);
  534. }
  535. mutex_unlock(&ucb1x00_mutex);
  536. if (ucb->gpio.base != -1)
  537. gpiochip_remove(&ucb->gpio);
  538. irq_set_chained_handler(ucb->irq, NULL);
  539. irq_free_descs(ucb->irq_base, 16);
  540. device_unregister(&ucb->dev);
  541. if (pdata && pdata->reset)
  542. pdata->reset(UCB_RST_REMOVE);
  543. }
  544. int ucb1x00_register_driver(struct ucb1x00_driver *drv)
  545. {
  546. struct ucb1x00 *ucb;
  547. INIT_LIST_HEAD(&drv->devs);
  548. mutex_lock(&ucb1x00_mutex);
  549. list_add_tail(&drv->node, &ucb1x00_drivers);
  550. list_for_each_entry(ucb, &ucb1x00_devices, node) {
  551. ucb1x00_add_dev(ucb, drv);
  552. }
  553. mutex_unlock(&ucb1x00_mutex);
  554. return 0;
  555. }
  556. void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
  557. {
  558. struct list_head *n, *l;
  559. mutex_lock(&ucb1x00_mutex);
  560. list_del(&drv->node);
  561. list_for_each_safe(l, n, &drv->devs) {
  562. struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
  563. ucb1x00_remove_dev(dev);
  564. }
  565. mutex_unlock(&ucb1x00_mutex);
  566. }
  567. #ifdef CONFIG_PM_SLEEP
  568. static int ucb1x00_suspend(struct device *dev)
  569. {
  570. struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
  571. struct ucb1x00 *ucb = dev_get_drvdata(dev);
  572. struct ucb1x00_dev *udev;
  573. mutex_lock(&ucb1x00_mutex);
  574. list_for_each_entry(udev, &ucb->devs, dev_node) {
  575. if (udev->drv->suspend)
  576. udev->drv->suspend(udev);
  577. }
  578. mutex_unlock(&ucb1x00_mutex);
  579. if (ucb->irq_wake) {
  580. unsigned long flags;
  581. raw_spin_lock_irqsave(&ucb->irq_lock, flags);
  582. ucb1x00_enable(ucb);
  583. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  584. ucb->irq_wake);
  585. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  586. ucb->irq_wake);
  587. ucb1x00_disable(ucb);
  588. raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
  589. enable_irq_wake(ucb->irq);
  590. } else if (pdata && pdata->reset)
  591. pdata->reset(UCB_RST_SUSPEND);
  592. return 0;
  593. }
  594. static int ucb1x00_resume(struct device *dev)
  595. {
  596. struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
  597. struct ucb1x00 *ucb = dev_get_drvdata(dev);
  598. struct ucb1x00_dev *udev;
  599. if (!ucb->irq_wake && pdata && pdata->reset)
  600. pdata->reset(UCB_RST_RESUME);
  601. ucb1x00_enable(ucb);
  602. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  603. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  604. if (ucb->irq_wake) {
  605. unsigned long flags;
  606. raw_spin_lock_irqsave(&ucb->irq_lock, flags);
  607. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  608. ucb->irq_mask);
  609. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  610. ucb->irq_mask);
  611. raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
  612. disable_irq_wake(ucb->irq);
  613. }
  614. ucb1x00_disable(ucb);
  615. mutex_lock(&ucb1x00_mutex);
  616. list_for_each_entry(udev, &ucb->devs, dev_node) {
  617. if (udev->drv->resume)
  618. udev->drv->resume(udev);
  619. }
  620. mutex_unlock(&ucb1x00_mutex);
  621. return 0;
  622. }
  623. #endif
  624. static SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops, ucb1x00_suspend, ucb1x00_resume);
  625. static struct mcp_driver ucb1x00_driver = {
  626. .drv = {
  627. .name = "ucb1x00",
  628. .owner = THIS_MODULE,
  629. .pm = &ucb1x00_pm_ops,
  630. },
  631. .probe = ucb1x00_probe,
  632. .remove = ucb1x00_remove,
  633. };
  634. static int __init ucb1x00_init(void)
  635. {
  636. int ret = class_register(&ucb1x00_class);
  637. if (ret == 0) {
  638. ret = mcp_driver_register(&ucb1x00_driver);
  639. if (ret)
  640. class_unregister(&ucb1x00_class);
  641. }
  642. return ret;
  643. }
  644. static void __exit ucb1x00_exit(void)
  645. {
  646. mcp_driver_unregister(&ucb1x00_driver);
  647. class_unregister(&ucb1x00_class);
  648. }
  649. module_init(ucb1x00_init);
  650. module_exit(ucb1x00_exit);
  651. EXPORT_SYMBOL(ucb1x00_io_set_dir);
  652. EXPORT_SYMBOL(ucb1x00_io_write);
  653. EXPORT_SYMBOL(ucb1x00_io_read);
  654. EXPORT_SYMBOL(ucb1x00_adc_enable);
  655. EXPORT_SYMBOL(ucb1x00_adc_read);
  656. EXPORT_SYMBOL(ucb1x00_adc_disable);
  657. EXPORT_SYMBOL(ucb1x00_register_driver);
  658. EXPORT_SYMBOL(ucb1x00_unregister_driver);
  659. MODULE_ALIAS("mcp:ucb1x00");
  660. MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
  661. MODULE_DESCRIPTION("UCB1x00 core driver");
  662. MODULE_LICENSE("GPL");