cec-api.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * cec-api.c - HDMI Consumer Electronics Control framework - API
  4. *
  5. * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  6. */
  7. #include <linux/errno.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/kmod.h>
  12. #include <linux/ktime.h>
  13. #include <linux/slab.h>
  14. #include <linux/mm.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/version.h>
  19. #include <media/cec-pin.h>
  20. #include "cec-priv.h"
  21. #include "cec-pin-priv.h"
  22. static inline struct cec_devnode *cec_devnode_data(struct file *filp)
  23. {
  24. struct cec_fh *fh = filp->private_data;
  25. return &fh->adap->devnode;
  26. }
  27. /* CEC file operations */
  28. static __poll_t cec_poll(struct file *filp,
  29. struct poll_table_struct *poll)
  30. {
  31. struct cec_fh *fh = filp->private_data;
  32. struct cec_adapter *adap = fh->adap;
  33. __poll_t res = 0;
  34. poll_wait(filp, &fh->wait, poll);
  35. if (!cec_is_registered(adap))
  36. return EPOLLERR | EPOLLHUP;
  37. mutex_lock(&adap->lock);
  38. if (adap->is_configured &&
  39. adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
  40. res |= EPOLLOUT | EPOLLWRNORM;
  41. if (fh->queued_msgs)
  42. res |= EPOLLIN | EPOLLRDNORM;
  43. if (fh->total_queued_events)
  44. res |= EPOLLPRI;
  45. mutex_unlock(&adap->lock);
  46. return res;
  47. }
  48. static bool cec_is_busy(const struct cec_adapter *adap,
  49. const struct cec_fh *fh)
  50. {
  51. bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
  52. bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
  53. /*
  54. * Exclusive initiators and followers can always access the CEC adapter
  55. */
  56. if (valid_initiator || valid_follower)
  57. return false;
  58. /*
  59. * All others can only access the CEC adapter if there is no
  60. * exclusive initiator and they are in INITIATOR mode.
  61. */
  62. return adap->cec_initiator ||
  63. fh->mode_initiator == CEC_MODE_NO_INITIATOR;
  64. }
  65. static long cec_adap_g_caps(struct cec_adapter *adap,
  66. struct cec_caps __user *parg)
  67. {
  68. struct cec_caps caps = {};
  69. strscpy(caps.driver, adap->devnode.dev.parent->driver->name,
  70. sizeof(caps.driver));
  71. strscpy(caps.name, adap->name, sizeof(caps.name));
  72. caps.available_log_addrs = adap->available_log_addrs;
  73. caps.capabilities = adap->capabilities;
  74. caps.version = LINUX_VERSION_CODE;
  75. if (copy_to_user(parg, &caps, sizeof(caps)))
  76. return -EFAULT;
  77. return 0;
  78. }
  79. static long cec_adap_g_phys_addr(struct cec_adapter *adap,
  80. __u16 __user *parg)
  81. {
  82. u16 phys_addr;
  83. mutex_lock(&adap->lock);
  84. phys_addr = adap->phys_addr;
  85. mutex_unlock(&adap->lock);
  86. if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
  87. return -EFAULT;
  88. return 0;
  89. }
  90. static int cec_validate_phys_addr(u16 phys_addr)
  91. {
  92. int i;
  93. if (phys_addr == CEC_PHYS_ADDR_INVALID)
  94. return 0;
  95. for (i = 0; i < 16; i += 4)
  96. if (phys_addr & (0xf << i))
  97. break;
  98. if (i == 16)
  99. return 0;
  100. for (i += 4; i < 16; i += 4)
  101. if ((phys_addr & (0xf << i)) == 0)
  102. return -EINVAL;
  103. return 0;
  104. }
  105. static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
  106. bool block, __u16 __user *parg)
  107. {
  108. u16 phys_addr;
  109. long err;
  110. if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
  111. return -ENOTTY;
  112. if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
  113. return -EFAULT;
  114. err = cec_validate_phys_addr(phys_addr);
  115. if (err)
  116. return err;
  117. mutex_lock(&adap->lock);
  118. if (cec_is_busy(adap, fh))
  119. err = -EBUSY;
  120. else
  121. __cec_s_phys_addr(adap, phys_addr, block);
  122. mutex_unlock(&adap->lock);
  123. return err;
  124. }
  125. static long cec_adap_g_log_addrs(struct cec_adapter *adap,
  126. struct cec_log_addrs __user *parg)
  127. {
  128. struct cec_log_addrs log_addrs;
  129. mutex_lock(&adap->lock);
  130. /*
  131. * We use memcpy here instead of assignment since there is a
  132. * hole at the end of struct cec_log_addrs that an assignment
  133. * might ignore. So when we do copy_to_user() we could leak
  134. * one byte of memory.
  135. */
  136. memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
  137. if (!adap->is_configured)
  138. memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
  139. sizeof(log_addrs.log_addr));
  140. mutex_unlock(&adap->lock);
  141. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  142. return -EFAULT;
  143. return 0;
  144. }
  145. static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
  146. bool block, struct cec_log_addrs __user *parg)
  147. {
  148. struct cec_log_addrs log_addrs;
  149. long err = -EBUSY;
  150. if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
  151. return -ENOTTY;
  152. if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
  153. return -EFAULT;
  154. log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
  155. CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
  156. CEC_LOG_ADDRS_FL_CDC_ONLY;
  157. mutex_lock(&adap->lock);
  158. if (!adap->is_configuring &&
  159. (!log_addrs.num_log_addrs || !adap->is_configured) &&
  160. !cec_is_busy(adap, fh)) {
  161. err = __cec_s_log_addrs(adap, &log_addrs, block);
  162. if (!err)
  163. log_addrs = adap->log_addrs;
  164. }
  165. mutex_unlock(&adap->lock);
  166. if (err)
  167. return err;
  168. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  169. return -EFAULT;
  170. return 0;
  171. }
  172. static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
  173. bool block, struct cec_msg __user *parg)
  174. {
  175. struct cec_msg msg = {};
  176. long err = 0;
  177. if (!(adap->capabilities & CEC_CAP_TRANSMIT))
  178. return -ENOTTY;
  179. if (copy_from_user(&msg, parg, sizeof(msg)))
  180. return -EFAULT;
  181. mutex_lock(&adap->lock);
  182. if (adap->log_addrs.num_log_addrs == 0)
  183. err = -EPERM;
  184. else if (adap->is_configuring)
  185. err = -ENONET;
  186. else if (cec_is_busy(adap, fh))
  187. err = -EBUSY;
  188. else
  189. err = cec_transmit_msg_fh(adap, &msg, fh, block);
  190. mutex_unlock(&adap->lock);
  191. if (err)
  192. return err;
  193. if (copy_to_user(parg, &msg, sizeof(msg)))
  194. return -EFAULT;
  195. return 0;
  196. }
  197. /* Called by CEC_RECEIVE: wait for a message to arrive */
  198. static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
  199. {
  200. u32 timeout = msg->timeout;
  201. int res;
  202. do {
  203. mutex_lock(&fh->lock);
  204. /* Are there received messages queued up? */
  205. if (fh->queued_msgs) {
  206. /* Yes, return the first one */
  207. struct cec_msg_entry *entry =
  208. list_first_entry(&fh->msgs,
  209. struct cec_msg_entry, list);
  210. list_del(&entry->list);
  211. *msg = entry->msg;
  212. kfree(entry);
  213. fh->queued_msgs--;
  214. mutex_unlock(&fh->lock);
  215. /* restore original timeout value */
  216. msg->timeout = timeout;
  217. return 0;
  218. }
  219. /* No, return EAGAIN in non-blocking mode or wait */
  220. mutex_unlock(&fh->lock);
  221. /* Return when in non-blocking mode */
  222. if (!block)
  223. return -EAGAIN;
  224. if (msg->timeout) {
  225. /* The user specified a timeout */
  226. res = wait_event_interruptible_timeout(fh->wait,
  227. fh->queued_msgs,
  228. msecs_to_jiffies(msg->timeout));
  229. if (res == 0)
  230. res = -ETIMEDOUT;
  231. else if (res > 0)
  232. res = 0;
  233. } else {
  234. /* Wait indefinitely */
  235. res = wait_event_interruptible(fh->wait,
  236. fh->queued_msgs);
  237. }
  238. /* Exit on error, otherwise loop to get the new message */
  239. } while (!res);
  240. return res;
  241. }
  242. static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
  243. bool block, struct cec_msg __user *parg)
  244. {
  245. struct cec_msg msg = {};
  246. long err;
  247. if (copy_from_user(&msg, parg, sizeof(msg)))
  248. return -EFAULT;
  249. err = cec_receive_msg(fh, &msg, block);
  250. if (err)
  251. return err;
  252. msg.flags = 0;
  253. if (copy_to_user(parg, &msg, sizeof(msg)))
  254. return -EFAULT;
  255. return 0;
  256. }
  257. static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
  258. bool block, struct cec_event __user *parg)
  259. {
  260. struct cec_event_entry *ev = NULL;
  261. u64 ts = ~0ULL;
  262. unsigned int i;
  263. unsigned int ev_idx;
  264. long err = 0;
  265. mutex_lock(&fh->lock);
  266. while (!fh->total_queued_events && block) {
  267. mutex_unlock(&fh->lock);
  268. err = wait_event_interruptible(fh->wait,
  269. fh->total_queued_events);
  270. if (err)
  271. return err;
  272. mutex_lock(&fh->lock);
  273. }
  274. /* Find the oldest event */
  275. for (i = 0; i < CEC_NUM_EVENTS; i++) {
  276. struct cec_event_entry *entry =
  277. list_first_entry_or_null(&fh->events[i],
  278. struct cec_event_entry, list);
  279. if (entry && entry->ev.ts <= ts) {
  280. ev = entry;
  281. ev_idx = i;
  282. ts = ev->ev.ts;
  283. }
  284. }
  285. if (!ev) {
  286. err = -EAGAIN;
  287. goto unlock;
  288. }
  289. list_del(&ev->list);
  290. if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
  291. err = -EFAULT;
  292. if (ev_idx >= CEC_NUM_CORE_EVENTS)
  293. kfree(ev);
  294. fh->queued_events[ev_idx]--;
  295. fh->total_queued_events--;
  296. unlock:
  297. mutex_unlock(&fh->lock);
  298. return err;
  299. }
  300. static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
  301. u32 __user *parg)
  302. {
  303. u32 mode = fh->mode_initiator | fh->mode_follower;
  304. if (copy_to_user(parg, &mode, sizeof(mode)))
  305. return -EFAULT;
  306. return 0;
  307. }
  308. static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
  309. u32 __user *parg)
  310. {
  311. u32 mode;
  312. u8 mode_initiator;
  313. u8 mode_follower;
  314. bool send_pin_event = false;
  315. long err = 0;
  316. if (copy_from_user(&mode, parg, sizeof(mode)))
  317. return -EFAULT;
  318. if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
  319. dprintk(1, "%s: invalid mode bits set\n", __func__);
  320. return -EINVAL;
  321. }
  322. mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
  323. mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
  324. if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
  325. mode_follower > CEC_MODE_MONITOR_ALL) {
  326. dprintk(1, "%s: unknown mode\n", __func__);
  327. return -EINVAL;
  328. }
  329. if (mode_follower == CEC_MODE_MONITOR_ALL &&
  330. !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
  331. dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
  332. return -EINVAL;
  333. }
  334. if (mode_follower == CEC_MODE_MONITOR_PIN &&
  335. !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
  336. dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
  337. return -EINVAL;
  338. }
  339. /* Follower modes should always be able to send CEC messages */
  340. if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
  341. !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
  342. mode_follower >= CEC_MODE_FOLLOWER &&
  343. mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  344. dprintk(1, "%s: cannot transmit\n", __func__);
  345. return -EINVAL;
  346. }
  347. /* Monitor modes require CEC_MODE_NO_INITIATOR */
  348. if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
  349. dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
  350. __func__);
  351. return -EINVAL;
  352. }
  353. /* Monitor modes require CAP_NET_ADMIN */
  354. if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
  355. return -EPERM;
  356. mutex_lock(&adap->lock);
  357. /*
  358. * You can't become exclusive follower if someone else already
  359. * has that job.
  360. */
  361. if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  362. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
  363. adap->cec_follower && adap->cec_follower != fh)
  364. err = -EBUSY;
  365. /*
  366. * You can't become exclusive initiator if someone else already
  367. * has that job.
  368. */
  369. if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
  370. adap->cec_initiator && adap->cec_initiator != fh)
  371. err = -EBUSY;
  372. if (!err) {
  373. bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
  374. bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
  375. if (old_mon_all != new_mon_all) {
  376. if (new_mon_all)
  377. err = cec_monitor_all_cnt_inc(adap);
  378. else
  379. cec_monitor_all_cnt_dec(adap);
  380. }
  381. }
  382. if (!err) {
  383. bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
  384. bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
  385. if (old_mon_pin != new_mon_pin) {
  386. send_pin_event = new_mon_pin;
  387. if (new_mon_pin)
  388. err = cec_monitor_pin_cnt_inc(adap);
  389. else
  390. cec_monitor_pin_cnt_dec(adap);
  391. }
  392. }
  393. if (err) {
  394. mutex_unlock(&adap->lock);
  395. return err;
  396. }
  397. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  398. adap->follower_cnt--;
  399. if (mode_follower == CEC_MODE_FOLLOWER)
  400. adap->follower_cnt++;
  401. if (send_pin_event) {
  402. struct cec_event ev = {
  403. .flags = CEC_EVENT_FL_INITIAL_STATE,
  404. };
  405. ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
  406. CEC_EVENT_PIN_CEC_LOW;
  407. cec_queue_event_fh(fh, &ev, 0);
  408. }
  409. if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  410. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  411. adap->passthrough =
  412. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
  413. adap->cec_follower = fh;
  414. } else if (adap->cec_follower == fh) {
  415. adap->passthrough = false;
  416. adap->cec_follower = NULL;
  417. }
  418. if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
  419. adap->cec_initiator = fh;
  420. else if (adap->cec_initiator == fh)
  421. adap->cec_initiator = NULL;
  422. fh->mode_initiator = mode_initiator;
  423. fh->mode_follower = mode_follower;
  424. mutex_unlock(&adap->lock);
  425. return 0;
  426. }
  427. static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  428. {
  429. struct cec_fh *fh = filp->private_data;
  430. struct cec_adapter *adap = fh->adap;
  431. bool block = !(filp->f_flags & O_NONBLOCK);
  432. void __user *parg = (void __user *)arg;
  433. if (!cec_is_registered(adap))
  434. return -ENODEV;
  435. switch (cmd) {
  436. case CEC_ADAP_G_CAPS:
  437. return cec_adap_g_caps(adap, parg);
  438. case CEC_ADAP_G_PHYS_ADDR:
  439. return cec_adap_g_phys_addr(adap, parg);
  440. case CEC_ADAP_S_PHYS_ADDR:
  441. return cec_adap_s_phys_addr(adap, fh, block, parg);
  442. case CEC_ADAP_G_LOG_ADDRS:
  443. return cec_adap_g_log_addrs(adap, parg);
  444. case CEC_ADAP_S_LOG_ADDRS:
  445. return cec_adap_s_log_addrs(adap, fh, block, parg);
  446. case CEC_TRANSMIT:
  447. return cec_transmit(adap, fh, block, parg);
  448. case CEC_RECEIVE:
  449. return cec_receive(adap, fh, block, parg);
  450. case CEC_DQEVENT:
  451. return cec_dqevent(adap, fh, block, parg);
  452. case CEC_G_MODE:
  453. return cec_g_mode(adap, fh, parg);
  454. case CEC_S_MODE:
  455. return cec_s_mode(adap, fh, parg);
  456. default:
  457. return -ENOTTY;
  458. }
  459. }
  460. static int cec_open(struct inode *inode, struct file *filp)
  461. {
  462. struct cec_devnode *devnode =
  463. container_of(inode->i_cdev, struct cec_devnode, cdev);
  464. struct cec_adapter *adap = to_cec_adapter(devnode);
  465. struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
  466. /*
  467. * Initial events that are automatically sent when the cec device is
  468. * opened.
  469. */
  470. struct cec_event ev = {
  471. .event = CEC_EVENT_STATE_CHANGE,
  472. .flags = CEC_EVENT_FL_INITIAL_STATE,
  473. };
  474. unsigned int i;
  475. int err;
  476. if (!fh)
  477. return -ENOMEM;
  478. INIT_LIST_HEAD(&fh->msgs);
  479. INIT_LIST_HEAD(&fh->xfer_list);
  480. for (i = 0; i < CEC_NUM_EVENTS; i++)
  481. INIT_LIST_HEAD(&fh->events[i]);
  482. mutex_init(&fh->lock);
  483. init_waitqueue_head(&fh->wait);
  484. fh->mode_initiator = CEC_MODE_INITIATOR;
  485. fh->adap = adap;
  486. err = cec_get_device(devnode);
  487. if (err) {
  488. kfree(fh);
  489. return err;
  490. }
  491. mutex_lock(&devnode->lock);
  492. if (list_empty(&devnode->fhs) &&
  493. !adap->needs_hpd &&
  494. adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  495. err = adap->ops->adap_enable(adap, true);
  496. if (err) {
  497. mutex_unlock(&devnode->lock);
  498. kfree(fh);
  499. return err;
  500. }
  501. }
  502. filp->private_data = fh;
  503. /* Queue up initial state events */
  504. ev.state_change.phys_addr = adap->phys_addr;
  505. ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
  506. cec_queue_event_fh(fh, &ev, 0);
  507. #ifdef CONFIG_CEC_PIN
  508. if (adap->pin && adap->pin->ops->read_hpd) {
  509. err = adap->pin->ops->read_hpd(adap);
  510. if (err >= 0) {
  511. ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
  512. CEC_EVENT_PIN_HPD_LOW;
  513. cec_queue_event_fh(fh, &ev, 0);
  514. }
  515. }
  516. if (adap->pin && adap->pin->ops->read_5v) {
  517. err = adap->pin->ops->read_5v(adap);
  518. if (err >= 0) {
  519. ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
  520. CEC_EVENT_PIN_5V_LOW;
  521. cec_queue_event_fh(fh, &ev, 0);
  522. }
  523. }
  524. #endif
  525. list_add(&fh->list, &devnode->fhs);
  526. mutex_unlock(&devnode->lock);
  527. return 0;
  528. }
  529. /* Override for the release function */
  530. static int cec_release(struct inode *inode, struct file *filp)
  531. {
  532. struct cec_devnode *devnode = cec_devnode_data(filp);
  533. struct cec_adapter *adap = to_cec_adapter(devnode);
  534. struct cec_fh *fh = filp->private_data;
  535. unsigned int i;
  536. mutex_lock(&adap->lock);
  537. if (adap->cec_initiator == fh)
  538. adap->cec_initiator = NULL;
  539. if (adap->cec_follower == fh) {
  540. adap->cec_follower = NULL;
  541. adap->passthrough = false;
  542. }
  543. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  544. adap->follower_cnt--;
  545. if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
  546. cec_monitor_pin_cnt_dec(adap);
  547. if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
  548. cec_monitor_all_cnt_dec(adap);
  549. mutex_unlock(&adap->lock);
  550. mutex_lock(&devnode->lock);
  551. list_del(&fh->list);
  552. if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
  553. !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  554. WARN_ON(adap->ops->adap_enable(adap, false));
  555. }
  556. mutex_unlock(&devnode->lock);
  557. /* Unhook pending transmits from this filehandle. */
  558. mutex_lock(&adap->lock);
  559. while (!list_empty(&fh->xfer_list)) {
  560. struct cec_data *data =
  561. list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
  562. data->blocking = false;
  563. data->fh = NULL;
  564. list_del(&data->xfer_list);
  565. }
  566. mutex_unlock(&adap->lock);
  567. while (!list_empty(&fh->msgs)) {
  568. struct cec_msg_entry *entry =
  569. list_first_entry(&fh->msgs, struct cec_msg_entry, list);
  570. list_del(&entry->list);
  571. kfree(entry);
  572. }
  573. for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
  574. while (!list_empty(&fh->events[i])) {
  575. struct cec_event_entry *entry =
  576. list_first_entry(&fh->events[i],
  577. struct cec_event_entry, list);
  578. list_del(&entry->list);
  579. kfree(entry);
  580. }
  581. }
  582. kfree(fh);
  583. cec_put_device(devnode);
  584. filp->private_data = NULL;
  585. return 0;
  586. }
  587. const struct file_operations cec_devnode_fops = {
  588. .owner = THIS_MODULE,
  589. .open = cec_open,
  590. .unlocked_ioctl = cec_ioctl,
  591. .compat_ioctl = cec_ioctl,
  592. .release = cec_release,
  593. .poll = cec_poll,
  594. .llseek = no_llseek,
  595. };