nuvoton-cir.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. /*
  2. * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
  3. *
  4. * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
  5. * Copyright (C) 2009 Nuvoton PS Team
  6. *
  7. * Special thanks to Nuvoton for providing hardware, spec sheets and
  8. * sample code upon which portions of this driver are based. Indirect
  9. * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10. * modeled after.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pnp.h>
  26. #include <linux/io.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sched.h>
  29. #include <linux/slab.h>
  30. #include <media/rc-core.h>
  31. #include <linux/pci_ids.h>
  32. #include "nuvoton-cir.h"
  33. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
  34. static const struct nvt_chip nvt_chips[] = {
  35. { "w83667hg", NVT_W83667HG },
  36. { "NCT6775F", NVT_6775F },
  37. { "NCT6776F", NVT_6776F },
  38. { "NCT6779D", NVT_6779D },
  39. };
  40. static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
  41. {
  42. return nvt->rdev->dev.parent;
  43. }
  44. static inline bool is_w83667hg(struct nvt_dev *nvt)
  45. {
  46. return nvt->chip_ver == NVT_W83667HG;
  47. }
  48. /* write val to config reg */
  49. static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  50. {
  51. outb(reg, nvt->cr_efir);
  52. outb(val, nvt->cr_efdr);
  53. }
  54. /* read val from config reg */
  55. static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  56. {
  57. outb(reg, nvt->cr_efir);
  58. return inb(nvt->cr_efdr);
  59. }
  60. /* update config register bit without changing other bits */
  61. static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  62. {
  63. u8 tmp = nvt_cr_read(nvt, reg) | val;
  64. nvt_cr_write(nvt, tmp, reg);
  65. }
  66. /* clear config register bit without changing other bits */
  67. static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  68. {
  69. u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  70. nvt_cr_write(nvt, tmp, reg);
  71. }
  72. /* enter extended function mode */
  73. static inline int nvt_efm_enable(struct nvt_dev *nvt)
  74. {
  75. if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
  76. return -EBUSY;
  77. /* Enabling Extended Function Mode explicitly requires writing 2x */
  78. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  79. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  80. return 0;
  81. }
  82. /* exit extended function mode */
  83. static inline void nvt_efm_disable(struct nvt_dev *nvt)
  84. {
  85. outb(EFER_EFM_DISABLE, nvt->cr_efir);
  86. release_region(nvt->cr_efir, 2);
  87. }
  88. /*
  89. * When you want to address a specific logical device, write its logical
  90. * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  91. * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  92. */
  93. static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  94. {
  95. nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
  96. }
  97. /* select and enable logical device with setting EFM mode*/
  98. static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  99. {
  100. nvt_efm_enable(nvt);
  101. nvt_select_logical_dev(nvt, ldev);
  102. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  103. nvt_efm_disable(nvt);
  104. }
  105. /* select and disable logical device with setting EFM mode*/
  106. static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
  107. {
  108. nvt_efm_enable(nvt);
  109. nvt_select_logical_dev(nvt, ldev);
  110. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  111. nvt_efm_disable(nvt);
  112. }
  113. /* write val to cir config register */
  114. static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  115. {
  116. outb(val, nvt->cir_addr + offset);
  117. }
  118. /* read val from cir config register */
  119. static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
  120. {
  121. return inb(nvt->cir_addr + offset);
  122. }
  123. /* write val to cir wake register */
  124. static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
  125. u8 val, u8 offset)
  126. {
  127. outb(val, nvt->cir_wake_addr + offset);
  128. }
  129. /* read val from cir wake config register */
  130. static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
  131. {
  132. return inb(nvt->cir_wake_addr + offset);
  133. }
  134. /* don't override io address if one is set already */
  135. static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
  136. {
  137. unsigned long old_addr;
  138. old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
  139. old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
  140. if (old_addr)
  141. *ioaddr = old_addr;
  142. else {
  143. nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
  144. nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
  145. }
  146. }
  147. static void nvt_write_wakeup_codes(struct rc_dev *dev,
  148. const u8 *wbuf, int count)
  149. {
  150. u8 tolerance, config;
  151. struct nvt_dev *nvt = dev->priv;
  152. unsigned long flags;
  153. int i;
  154. /* hardcode the tolerance to 10% */
  155. tolerance = DIV_ROUND_UP(count, 10);
  156. spin_lock_irqsave(&nvt->lock, flags);
  157. nvt_clear_cir_wake_fifo(nvt);
  158. nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
  159. nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
  160. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  161. /* enable writes to wake fifo */
  162. nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
  163. CIR_WAKE_IRCON);
  164. if (count)
  165. pr_info("Wake samples (%d) =", count);
  166. else
  167. pr_info("Wake sample fifo cleared");
  168. for (i = 0; i < count; i++)
  169. nvt_cir_wake_reg_write(nvt, wbuf[i], CIR_WAKE_WR_FIFO_DATA);
  170. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  171. spin_unlock_irqrestore(&nvt->lock, flags);
  172. }
  173. static ssize_t wakeup_data_show(struct device *dev,
  174. struct device_attribute *attr,
  175. char *buf)
  176. {
  177. struct rc_dev *rc_dev = to_rc_dev(dev);
  178. struct nvt_dev *nvt = rc_dev->priv;
  179. int fifo_len, duration;
  180. unsigned long flags;
  181. ssize_t buf_len = 0;
  182. int i;
  183. spin_lock_irqsave(&nvt->lock, flags);
  184. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  185. fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
  186. /* go to first element to be read */
  187. while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
  188. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  189. for (i = 0; i < fifo_len; i++) {
  190. duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  191. duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
  192. buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
  193. "%d ", duration);
  194. }
  195. buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
  196. spin_unlock_irqrestore(&nvt->lock, flags);
  197. return buf_len;
  198. }
  199. static ssize_t wakeup_data_store(struct device *dev,
  200. struct device_attribute *attr,
  201. const char *buf, size_t len)
  202. {
  203. struct rc_dev *rc_dev = to_rc_dev(dev);
  204. u8 wake_buf[WAKEUP_MAX_SIZE];
  205. char **argv;
  206. int i, count;
  207. unsigned int val;
  208. ssize_t ret;
  209. argv = argv_split(GFP_KERNEL, buf, &count);
  210. if (!argv)
  211. return -ENOMEM;
  212. if (!count || count > WAKEUP_MAX_SIZE) {
  213. ret = -EINVAL;
  214. goto out;
  215. }
  216. for (i = 0; i < count; i++) {
  217. ret = kstrtouint(argv[i], 10, &val);
  218. if (ret)
  219. goto out;
  220. val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
  221. if (!val || val > 0x7f) {
  222. ret = -EINVAL;
  223. goto out;
  224. }
  225. wake_buf[i] = val;
  226. /* sequence must start with a pulse */
  227. if (i % 2 == 0)
  228. wake_buf[i] |= BUF_PULSE_BIT;
  229. }
  230. nvt_write_wakeup_codes(rc_dev, wake_buf, count);
  231. ret = len;
  232. out:
  233. argv_free(argv);
  234. return ret;
  235. }
  236. static DEVICE_ATTR_RW(wakeup_data);
  237. /* dump current cir register contents */
  238. static void cir_dump_regs(struct nvt_dev *nvt)
  239. {
  240. nvt_efm_enable(nvt);
  241. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  242. pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
  243. pr_info(" * CR CIR ACTIVE : 0x%x\n",
  244. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  245. pr_info(" * CR CIR BASE ADDR: 0x%x\n",
  246. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  247. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  248. pr_info(" * CR CIR IRQ NUM: 0x%x\n",
  249. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  250. nvt_efm_disable(nvt);
  251. pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
  252. pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
  253. pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
  254. pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
  255. pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
  256. pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
  257. pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
  258. pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
  259. pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
  260. pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
  261. pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
  262. pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
  263. pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
  264. pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
  265. pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
  266. pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
  267. pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
  268. }
  269. /* dump current cir wake register contents */
  270. static void cir_wake_dump_regs(struct nvt_dev *nvt)
  271. {
  272. u8 i, fifo_len;
  273. nvt_efm_enable(nvt);
  274. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  275. pr_info("%s: Dump CIR WAKE logical device registers:\n",
  276. NVT_DRIVER_NAME);
  277. pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
  278. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  279. pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
  280. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  281. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  282. pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
  283. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  284. nvt_efm_disable(nvt);
  285. pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
  286. pr_info(" * IRCON: 0x%x\n",
  287. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
  288. pr_info(" * IRSTS: 0x%x\n",
  289. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
  290. pr_info(" * IREN: 0x%x\n",
  291. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
  292. pr_info(" * FIFO CMP DEEP: 0x%x\n",
  293. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
  294. pr_info(" * FIFO CMP TOL: 0x%x\n",
  295. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
  296. pr_info(" * FIFO COUNT: 0x%x\n",
  297. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
  298. pr_info(" * SLCH: 0x%x\n",
  299. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
  300. pr_info(" * SLCL: 0x%x\n",
  301. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
  302. pr_info(" * FIFOCON: 0x%x\n",
  303. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
  304. pr_info(" * SRXFSTS: 0x%x\n",
  305. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
  306. pr_info(" * SAMPLE RX FIFO: 0x%x\n",
  307. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
  308. pr_info(" * WR FIFO DATA: 0x%x\n",
  309. nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
  310. pr_info(" * RD FIFO ONLY: 0x%x\n",
  311. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  312. pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
  313. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
  314. pr_info(" * FIFO IGNORE: 0x%x\n",
  315. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
  316. pr_info(" * IRFSM: 0x%x\n",
  317. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
  318. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  319. pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
  320. pr_info("* Contents =");
  321. for (i = 0; i < fifo_len; i++)
  322. pr_cont(" %02x",
  323. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  324. pr_cont("\n");
  325. }
  326. static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
  327. {
  328. int i;
  329. for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
  330. if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
  331. nvt->chip_ver = nvt_chips[i].chip_ver;
  332. return nvt_chips[i].name;
  333. }
  334. return NULL;
  335. }
  336. /* detect hardware features */
  337. static int nvt_hw_detect(struct nvt_dev *nvt)
  338. {
  339. struct device *dev = nvt_get_dev(nvt);
  340. const char *chip_name;
  341. int chip_id;
  342. nvt_efm_enable(nvt);
  343. /* Check if we're wired for the alternate EFER setup */
  344. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  345. if (nvt->chip_major == 0xff) {
  346. nvt_efm_disable(nvt);
  347. nvt->cr_efir = CR_EFIR2;
  348. nvt->cr_efdr = CR_EFDR2;
  349. nvt_efm_enable(nvt);
  350. nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  351. }
  352. nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
  353. nvt_efm_disable(nvt);
  354. chip_id = nvt->chip_major << 8 | nvt->chip_minor;
  355. if (chip_id == NVT_INVALID) {
  356. dev_err(dev, "No device found on either EFM port\n");
  357. return -ENODEV;
  358. }
  359. chip_name = nvt_find_chip(nvt, chip_id);
  360. /* warn, but still let the driver load, if we don't know this chip */
  361. if (!chip_name)
  362. dev_warn(dev,
  363. "unknown chip, id: 0x%02x 0x%02x, it may not work...",
  364. nvt->chip_major, nvt->chip_minor);
  365. else
  366. dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
  367. chip_name, nvt->chip_major, nvt->chip_minor);
  368. return 0;
  369. }
  370. static void nvt_cir_ldev_init(struct nvt_dev *nvt)
  371. {
  372. u8 val, psreg, psmask, psval;
  373. if (is_w83667hg(nvt)) {
  374. psreg = CR_MULTIFUNC_PIN_SEL;
  375. psmask = MULTIFUNC_PIN_SEL_MASK;
  376. psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
  377. } else {
  378. psreg = CR_OUTPUT_PIN_SEL;
  379. psmask = OUTPUT_PIN_SEL_MASK;
  380. psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
  381. }
  382. /* output pin selection: enable CIR, with WB sensor enabled */
  383. val = nvt_cr_read(nvt, psreg);
  384. val &= psmask;
  385. val |= psval;
  386. nvt_cr_write(nvt, val, psreg);
  387. /* Select CIR logical device */
  388. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  389. nvt_set_ioaddr(nvt, &nvt->cir_addr);
  390. nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
  391. nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
  392. nvt->cir_addr, nvt->cir_irq);
  393. }
  394. static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
  395. {
  396. /* Select ACPI logical device and anable it */
  397. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  398. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  399. /* Enable CIR Wake via PSOUT# (Pin60) */
  400. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  401. /* enable pme interrupt of cir wakeup event */
  402. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  403. /* Select CIR Wake logical device */
  404. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  405. nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
  406. nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
  407. nvt->cir_wake_addr);
  408. }
  409. /* clear out the hardware's cir rx fifo */
  410. static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
  411. {
  412. u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  413. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  414. }
  415. /* clear out the hardware's cir wake rx fifo */
  416. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
  417. {
  418. u8 val, config;
  419. config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
  420. /* clearing wake fifo works in learning mode only */
  421. nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
  422. CIR_WAKE_IRCON);
  423. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
  424. nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
  425. CIR_WAKE_FIFOCON);
  426. nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
  427. }
  428. /* clear out the hardware's cir tx fifo */
  429. static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
  430. {
  431. u8 val;
  432. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  433. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
  434. }
  435. /* enable RX Trigger Level Reach and Packet End interrupts */
  436. static void nvt_set_cir_iren(struct nvt_dev *nvt)
  437. {
  438. u8 iren;
  439. iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
  440. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  441. }
  442. static void nvt_cir_regs_init(struct nvt_dev *nvt)
  443. {
  444. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  445. /* set sample limit count (PE interrupt raised when reached) */
  446. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
  447. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
  448. /* set fifo irq trigger levels */
  449. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
  450. CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
  451. /* clear hardware rx and tx fifos */
  452. nvt_clear_cir_fifo(nvt);
  453. nvt_clear_tx_fifo(nvt);
  454. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  455. }
  456. static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
  457. {
  458. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  459. /*
  460. * Disable RX, set specific carrier on = low, off = high,
  461. * and sample period (currently 50us)
  462. */
  463. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
  464. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  465. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  466. CIR_WAKE_IRCON);
  467. /* clear any and all stray interrupts */
  468. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  469. }
  470. static void nvt_enable_wake(struct nvt_dev *nvt)
  471. {
  472. unsigned long flags;
  473. nvt_efm_enable(nvt);
  474. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  475. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  476. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  477. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  478. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  479. nvt_efm_disable(nvt);
  480. spin_lock_irqsave(&nvt->lock, flags);
  481. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  482. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  483. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  484. CIR_WAKE_IRCON);
  485. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  486. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  487. spin_unlock_irqrestore(&nvt->lock, flags);
  488. }
  489. #if 0 /* Currently unused */
  490. /* rx carrier detect only works in learning mode, must be called w/lock */
  491. static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
  492. {
  493. u32 count, carrier, duration = 0;
  494. int i;
  495. count = nvt_cir_reg_read(nvt, CIR_FCCL) |
  496. nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
  497. for (i = 0; i < nvt->pkts; i++) {
  498. if (nvt->buf[i] & BUF_PULSE_BIT)
  499. duration += nvt->buf[i] & BUF_LEN_MASK;
  500. }
  501. duration *= SAMPLE_PERIOD;
  502. if (!count || !duration) {
  503. dev_notice(nvt_get_dev(nvt),
  504. "Unable to determine carrier! (c:%u, d:%u)",
  505. count, duration);
  506. return 0;
  507. }
  508. carrier = MS_TO_NS(count) / duration;
  509. if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
  510. nvt_dbg("WTF? Carrier frequency out of range!");
  511. nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
  512. carrier, count, duration);
  513. return carrier;
  514. }
  515. #endif
  516. /*
  517. * set carrier frequency
  518. *
  519. * set carrier on 2 registers: CP & CC
  520. * always set CP as 0x81
  521. * set CC by SPEC, CC = 3MHz/carrier - 1
  522. */
  523. static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
  524. {
  525. struct nvt_dev *nvt = dev->priv;
  526. u16 val;
  527. if (carrier == 0)
  528. return -EINVAL;
  529. nvt_cir_reg_write(nvt, 1, CIR_CP);
  530. val = 3000000 / (carrier) - 1;
  531. nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
  532. nvt_dbg("cp: 0x%x cc: 0x%x\n",
  533. nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
  534. return 0;
  535. }
  536. static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
  537. struct rc_scancode_filter *sc_filter)
  538. {
  539. u8 buf_val;
  540. int i, ret, count;
  541. unsigned int val;
  542. struct ir_raw_event *raw;
  543. u8 wake_buf[WAKEUP_MAX_SIZE];
  544. bool complete;
  545. /* Require mask to be set */
  546. if (!sc_filter->mask)
  547. return 0;
  548. raw = kmalloc_array(WAKEUP_MAX_SIZE, sizeof(*raw), GFP_KERNEL);
  549. if (!raw)
  550. return -ENOMEM;
  551. ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc_filter->data,
  552. raw, WAKEUP_MAX_SIZE);
  553. complete = (ret != -ENOBUFS);
  554. if (!complete)
  555. ret = WAKEUP_MAX_SIZE;
  556. else if (ret < 0)
  557. goto out_raw;
  558. /* Inspect the ir samples */
  559. for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
  560. /* NS to US */
  561. val = DIV_ROUND_UP(raw[i].duration, 1000L) / SAMPLE_PERIOD;
  562. /* Split too large values into several smaller ones */
  563. while (val > 0 && count < WAKEUP_MAX_SIZE) {
  564. /* Skip last value for better comparison tolerance */
  565. if (complete && i == ret - 1 && val < BUF_LEN_MASK)
  566. break;
  567. /* Clamp values to BUF_LEN_MASK at most */
  568. buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
  569. wake_buf[count] = buf_val;
  570. val -= buf_val;
  571. if ((raw[i]).pulse)
  572. wake_buf[count] |= BUF_PULSE_BIT;
  573. count++;
  574. }
  575. }
  576. nvt_write_wakeup_codes(dev, wake_buf, count);
  577. ret = 0;
  578. out_raw:
  579. kfree(raw);
  580. return ret;
  581. }
  582. /* dump contents of the last rx buffer we got from the hw rx fifo */
  583. static void nvt_dump_rx_buf(struct nvt_dev *nvt)
  584. {
  585. int i;
  586. printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
  587. for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
  588. printk(KERN_CONT "0x%02x ", nvt->buf[i]);
  589. printk(KERN_CONT "\n");
  590. }
  591. /*
  592. * Process raw data in rx driver buffer, store it in raw IR event kfifo,
  593. * trigger decode when appropriate.
  594. *
  595. * We get IR data samples one byte at a time. If the msb is set, its a pulse,
  596. * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
  597. * (default 50us) intervals for that pulse/space. A discrete signal is
  598. * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
  599. * to signal more IR coming (repeats) or end of IR, respectively. We store
  600. * sample data in the raw event kfifo until we see 0x7<something> (except f)
  601. * or 0x80, at which time, we trigger a decode operation.
  602. */
  603. static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
  604. {
  605. DEFINE_IR_RAW_EVENT(rawir);
  606. u8 sample;
  607. int i;
  608. nvt_dbg_verbose("%s firing", __func__);
  609. if (debug)
  610. nvt_dump_rx_buf(nvt);
  611. nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
  612. for (i = 0; i < nvt->pkts; i++) {
  613. sample = nvt->buf[i];
  614. rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
  615. rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
  616. * SAMPLE_PERIOD);
  617. nvt_dbg("Storing %s with duration %d",
  618. rawir.pulse ? "pulse" : "space", rawir.duration);
  619. ir_raw_event_store_with_filter(nvt->rdev, &rawir);
  620. }
  621. nvt->pkts = 0;
  622. nvt_dbg("Calling ir_raw_event_handle\n");
  623. ir_raw_event_handle(nvt->rdev);
  624. nvt_dbg_verbose("%s done", __func__);
  625. }
  626. static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
  627. {
  628. dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
  629. nvt->pkts = 0;
  630. nvt_clear_cir_fifo(nvt);
  631. ir_raw_event_reset(nvt->rdev);
  632. }
  633. /* copy data from hardware rx fifo into driver buffer */
  634. static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
  635. {
  636. u8 fifocount;
  637. int i;
  638. /* Get count of how many bytes to read from RX FIFO */
  639. fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
  640. nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
  641. /* Read fifocount bytes from CIR Sample RX FIFO register */
  642. for (i = 0; i < fifocount; i++)
  643. nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
  644. nvt->pkts = fifocount;
  645. nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
  646. nvt_process_rx_ir_data(nvt);
  647. }
  648. static void nvt_cir_log_irqs(u8 status, u8 iren)
  649. {
  650. nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
  651. status, iren,
  652. status & CIR_IRSTS_RDR ? " RDR" : "",
  653. status & CIR_IRSTS_RTR ? " RTR" : "",
  654. status & CIR_IRSTS_PE ? " PE" : "",
  655. status & CIR_IRSTS_RFO ? " RFO" : "",
  656. status & CIR_IRSTS_TE ? " TE" : "",
  657. status & CIR_IRSTS_TTR ? " TTR" : "",
  658. status & CIR_IRSTS_TFU ? " TFU" : "",
  659. status & CIR_IRSTS_GH ? " GH" : "",
  660. status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
  661. CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
  662. CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
  663. }
  664. /* interrupt service routine for incoming and outgoing CIR data */
  665. static irqreturn_t nvt_cir_isr(int irq, void *data)
  666. {
  667. struct nvt_dev *nvt = data;
  668. u8 status, iren;
  669. nvt_dbg_verbose("%s firing", __func__);
  670. spin_lock(&nvt->lock);
  671. /*
  672. * Get IR Status register contents. Write 1 to ack/clear
  673. *
  674. * bit: reg name - description
  675. * 7: CIR_IRSTS_RDR - RX Data Ready
  676. * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
  677. * 5: CIR_IRSTS_PE - Packet End
  678. * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
  679. * 3: CIR_IRSTS_TE - TX FIFO Empty
  680. * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
  681. * 1: CIR_IRSTS_TFU - TX FIFO Underrun
  682. * 0: CIR_IRSTS_GH - Min Length Detected
  683. */
  684. status = nvt_cir_reg_read(nvt, CIR_IRSTS);
  685. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  686. /* At least NCT6779D creates a spurious interrupt when the
  687. * logical device is being disabled.
  688. */
  689. if (status == 0xff && iren == 0xff) {
  690. spin_unlock(&nvt->lock);
  691. nvt_dbg_verbose("Spurious interrupt detected");
  692. return IRQ_HANDLED;
  693. }
  694. /* IRQ may be shared with CIR WAKE, therefore check for each
  695. * status bit whether the related interrupt source is enabled
  696. */
  697. if (!(status & iren)) {
  698. spin_unlock(&nvt->lock);
  699. nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
  700. return IRQ_NONE;
  701. }
  702. /* ack/clear all irq flags we've got */
  703. nvt_cir_reg_write(nvt, status, CIR_IRSTS);
  704. nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
  705. nvt_cir_log_irqs(status, iren);
  706. if (status & CIR_IRSTS_RFO)
  707. nvt_handle_rx_fifo_overrun(nvt);
  708. else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE))
  709. nvt_get_rx_ir_data(nvt);
  710. spin_unlock(&nvt->lock);
  711. nvt_dbg_verbose("%s done", __func__);
  712. return IRQ_HANDLED;
  713. }
  714. static void nvt_enable_cir(struct nvt_dev *nvt)
  715. {
  716. unsigned long flags;
  717. /* enable the CIR logical device */
  718. nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
  719. spin_lock_irqsave(&nvt->lock, flags);
  720. /*
  721. * Enable TX and RX, specify carrier on = low, off = high, and set
  722. * sample period (currently 50us)
  723. */
  724. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  725. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  726. CIR_IRCON);
  727. /* clear all pending interrupts */
  728. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  729. /* enable interrupts */
  730. nvt_set_cir_iren(nvt);
  731. spin_unlock_irqrestore(&nvt->lock, flags);
  732. }
  733. static void nvt_disable_cir(struct nvt_dev *nvt)
  734. {
  735. unsigned long flags;
  736. spin_lock_irqsave(&nvt->lock, flags);
  737. /* disable CIR interrupts */
  738. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  739. /* clear any and all pending interrupts */
  740. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  741. /* clear all function enable flags */
  742. nvt_cir_reg_write(nvt, 0, CIR_IRCON);
  743. /* clear hardware rx and tx fifos */
  744. nvt_clear_cir_fifo(nvt);
  745. nvt_clear_tx_fifo(nvt);
  746. spin_unlock_irqrestore(&nvt->lock, flags);
  747. /* disable the CIR logical device */
  748. nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
  749. }
  750. static int nvt_open(struct rc_dev *dev)
  751. {
  752. struct nvt_dev *nvt = dev->priv;
  753. nvt_enable_cir(nvt);
  754. return 0;
  755. }
  756. static void nvt_close(struct rc_dev *dev)
  757. {
  758. struct nvt_dev *nvt = dev->priv;
  759. nvt_disable_cir(nvt);
  760. }
  761. /* Allocate memory, probe hardware, and initialize everything */
  762. static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
  763. {
  764. struct nvt_dev *nvt;
  765. struct rc_dev *rdev;
  766. int ret;
  767. nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
  768. if (!nvt)
  769. return -ENOMEM;
  770. /* input device for IR remote */
  771. nvt->rdev = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
  772. if (!nvt->rdev)
  773. return -ENOMEM;
  774. rdev = nvt->rdev;
  775. /* activate pnp device */
  776. ret = pnp_activate_dev(pdev);
  777. if (ret) {
  778. dev_err(&pdev->dev, "Could not activate PNP device!\n");
  779. return ret;
  780. }
  781. /* validate pnp resources */
  782. if (!pnp_port_valid(pdev, 0) ||
  783. pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
  784. dev_err(&pdev->dev, "IR PNP Port not valid!\n");
  785. return -EINVAL;
  786. }
  787. if (!pnp_irq_valid(pdev, 0)) {
  788. dev_err(&pdev->dev, "PNP IRQ not valid!\n");
  789. return -EINVAL;
  790. }
  791. if (!pnp_port_valid(pdev, 1) ||
  792. pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
  793. dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
  794. return -EINVAL;
  795. }
  796. nvt->cir_addr = pnp_port_start(pdev, 0);
  797. nvt->cir_irq = pnp_irq(pdev, 0);
  798. nvt->cir_wake_addr = pnp_port_start(pdev, 1);
  799. nvt->cr_efir = CR_EFIR;
  800. nvt->cr_efdr = CR_EFDR;
  801. spin_lock_init(&nvt->lock);
  802. pnp_set_drvdata(pdev, nvt);
  803. ret = nvt_hw_detect(nvt);
  804. if (ret)
  805. return ret;
  806. /* Initialize CIR & CIR Wake Logical Devices */
  807. nvt_efm_enable(nvt);
  808. nvt_cir_ldev_init(nvt);
  809. nvt_cir_wake_ldev_init(nvt);
  810. nvt_efm_disable(nvt);
  811. /*
  812. * Initialize CIR & CIR Wake Config Registers
  813. * and enable logical devices
  814. */
  815. nvt_cir_regs_init(nvt);
  816. nvt_cir_wake_regs_init(nvt);
  817. /* Set up the rc device */
  818. rdev->priv = nvt;
  819. rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
  820. rdev->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER;
  821. rdev->encode_wakeup = true;
  822. rdev->open = nvt_open;
  823. rdev->close = nvt_close;
  824. rdev->s_tx_carrier = nvt_set_tx_carrier;
  825. rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
  826. rdev->device_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
  827. rdev->input_phys = "nuvoton/cir0";
  828. rdev->input_id.bustype = BUS_HOST;
  829. rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
  830. rdev->input_id.product = nvt->chip_major;
  831. rdev->input_id.version = nvt->chip_minor;
  832. rdev->driver_name = NVT_DRIVER_NAME;
  833. rdev->map_name = RC_MAP_RC6_MCE;
  834. rdev->timeout = MS_TO_NS(100);
  835. /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
  836. rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
  837. #if 0
  838. rdev->min_timeout = XYZ;
  839. rdev->max_timeout = XYZ;
  840. #endif
  841. ret = devm_rc_register_device(&pdev->dev, rdev);
  842. if (ret)
  843. return ret;
  844. /* now claim resources */
  845. if (!devm_request_region(&pdev->dev, nvt->cir_addr,
  846. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  847. return -EBUSY;
  848. ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
  849. IRQF_SHARED, NVT_DRIVER_NAME, nvt);
  850. if (ret)
  851. return ret;
  852. if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
  853. CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
  854. return -EBUSY;
  855. ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
  856. if (ret)
  857. return ret;
  858. device_init_wakeup(&pdev->dev, true);
  859. dev_notice(&pdev->dev, "driver has been successfully loaded\n");
  860. if (debug) {
  861. cir_dump_regs(nvt);
  862. cir_wake_dump_regs(nvt);
  863. }
  864. return 0;
  865. }
  866. static void nvt_remove(struct pnp_dev *pdev)
  867. {
  868. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  869. device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
  870. nvt_disable_cir(nvt);
  871. /* enable CIR Wake (for IR power-on) */
  872. nvt_enable_wake(nvt);
  873. }
  874. static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
  875. {
  876. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  877. nvt_dbg("%s called", __func__);
  878. mutex_lock(&nvt->rdev->lock);
  879. if (nvt->rdev->users)
  880. nvt_disable_cir(nvt);
  881. mutex_unlock(&nvt->rdev->lock);
  882. /* make sure wake is enabled */
  883. nvt_enable_wake(nvt);
  884. return 0;
  885. }
  886. static int nvt_resume(struct pnp_dev *pdev)
  887. {
  888. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  889. nvt_dbg("%s called", __func__);
  890. nvt_cir_regs_init(nvt);
  891. nvt_cir_wake_regs_init(nvt);
  892. mutex_lock(&nvt->rdev->lock);
  893. if (nvt->rdev->users)
  894. nvt_enable_cir(nvt);
  895. mutex_unlock(&nvt->rdev->lock);
  896. return 0;
  897. }
  898. static void nvt_shutdown(struct pnp_dev *pdev)
  899. {
  900. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  901. nvt_enable_wake(nvt);
  902. }
  903. static const struct pnp_device_id nvt_ids[] = {
  904. { "WEC0530", 0 }, /* CIR */
  905. { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
  906. { "", 0 },
  907. };
  908. static struct pnp_driver nvt_driver = {
  909. .name = NVT_DRIVER_NAME,
  910. .id_table = nvt_ids,
  911. .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
  912. .probe = nvt_probe,
  913. .remove = nvt_remove,
  914. .suspend = nvt_suspend,
  915. .resume = nvt_resume,
  916. .shutdown = nvt_shutdown,
  917. };
  918. module_param(debug, int, S_IRUGO | S_IWUSR);
  919. MODULE_PARM_DESC(debug, "Enable debugging output");
  920. MODULE_DEVICE_TABLE(pnp, nvt_ids);
  921. MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
  922. MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
  923. MODULE_LICENSE("GPL");
  924. module_pnp_driver(nvt_driver);