cec-api.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * cec-api.c - HDMI Consumer Electronics Control framework - API
  4. *
  5. * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  6. */
  7. #include <linux/errno.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/kmod.h>
  12. #include <linux/ktime.h>
  13. #include <linux/slab.h>
  14. #include <linux/mm.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/version.h>
  19. #include <media/cec-pin.h>
  20. #include "cec-priv.h"
  21. #include "cec-pin-priv.h"
  22. static inline struct cec_devnode *cec_devnode_data(struct file *filp)
  23. {
  24. struct cec_fh *fh = filp->private_data;
  25. return &fh->adap->devnode;
  26. }
  27. /* CEC file operations */
  28. static __poll_t cec_poll(struct file *filp,
  29. struct poll_table_struct *poll)
  30. {
  31. struct cec_fh *fh = filp->private_data;
  32. struct cec_adapter *adap = fh->adap;
  33. __poll_t res = 0;
  34. if (!cec_is_registered(adap))
  35. return EPOLLERR | EPOLLHUP;
  36. mutex_lock(&adap->lock);
  37. if (adap->is_configured &&
  38. adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
  39. res |= EPOLLOUT | EPOLLWRNORM;
  40. if (fh->queued_msgs)
  41. res |= EPOLLIN | EPOLLRDNORM;
  42. if (fh->total_queued_events)
  43. res |= EPOLLPRI;
  44. poll_wait(filp, &fh->wait, poll);
  45. mutex_unlock(&adap->lock);
  46. return res;
  47. }
  48. static bool cec_is_busy(const struct cec_adapter *adap,
  49. const struct cec_fh *fh)
  50. {
  51. bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
  52. bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
  53. /*
  54. * Exclusive initiators and followers can always access the CEC adapter
  55. */
  56. if (valid_initiator || valid_follower)
  57. return false;
  58. /*
  59. * All others can only access the CEC adapter if there is no
  60. * exclusive initiator and they are in INITIATOR mode.
  61. */
  62. return adap->cec_initiator ||
  63. fh->mode_initiator == CEC_MODE_NO_INITIATOR;
  64. }
  65. static long cec_adap_g_caps(struct cec_adapter *adap,
  66. struct cec_caps __user *parg)
  67. {
  68. struct cec_caps caps = {};
  69. strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
  70. sizeof(caps.driver));
  71. strlcpy(caps.name, adap->name, sizeof(caps.name));
  72. caps.available_log_addrs = adap->available_log_addrs;
  73. caps.capabilities = adap->capabilities;
  74. caps.version = LINUX_VERSION_CODE;
  75. if (copy_to_user(parg, &caps, sizeof(caps)))
  76. return -EFAULT;
  77. return 0;
  78. }
  79. static long cec_adap_g_phys_addr(struct cec_adapter *adap,
  80. __u16 __user *parg)
  81. {
  82. u16 phys_addr;
  83. mutex_lock(&adap->lock);
  84. phys_addr = adap->phys_addr;
  85. mutex_unlock(&adap->lock);
  86. if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
  87. return -EFAULT;
  88. return 0;
  89. }
  90. static int cec_validate_phys_addr(u16 phys_addr)
  91. {
  92. int i;
  93. if (phys_addr == CEC_PHYS_ADDR_INVALID)
  94. return 0;
  95. for (i = 0; i < 16; i += 4)
  96. if (phys_addr & (0xf << i))
  97. break;
  98. if (i == 16)
  99. return 0;
  100. for (i += 4; i < 16; i += 4)
  101. if ((phys_addr & (0xf << i)) == 0)
  102. return -EINVAL;
  103. return 0;
  104. }
  105. static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
  106. bool block, __u16 __user *parg)
  107. {
  108. u16 phys_addr;
  109. long err;
  110. if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
  111. return -ENOTTY;
  112. if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
  113. return -EFAULT;
  114. err = cec_validate_phys_addr(phys_addr);
  115. if (err)
  116. return err;
  117. mutex_lock(&adap->lock);
  118. if (cec_is_busy(adap, fh))
  119. err = -EBUSY;
  120. else
  121. __cec_s_phys_addr(adap, phys_addr, block);
  122. mutex_unlock(&adap->lock);
  123. return err;
  124. }
  125. static long cec_adap_g_log_addrs(struct cec_adapter *adap,
  126. struct cec_log_addrs __user *parg)
  127. {
  128. struct cec_log_addrs log_addrs;
  129. mutex_lock(&adap->lock);
  130. log_addrs = adap->log_addrs;
  131. if (!adap->is_configured)
  132. memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
  133. sizeof(log_addrs.log_addr));
  134. mutex_unlock(&adap->lock);
  135. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  136. return -EFAULT;
  137. return 0;
  138. }
  139. static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
  140. bool block, struct cec_log_addrs __user *parg)
  141. {
  142. struct cec_log_addrs log_addrs;
  143. long err = -EBUSY;
  144. if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
  145. return -ENOTTY;
  146. if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
  147. return -EFAULT;
  148. log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
  149. CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
  150. CEC_LOG_ADDRS_FL_CDC_ONLY;
  151. mutex_lock(&adap->lock);
  152. if (!adap->is_configuring &&
  153. (!log_addrs.num_log_addrs || !adap->is_configured) &&
  154. !cec_is_busy(adap, fh)) {
  155. err = __cec_s_log_addrs(adap, &log_addrs, block);
  156. if (!err)
  157. log_addrs = adap->log_addrs;
  158. }
  159. mutex_unlock(&adap->lock);
  160. if (err)
  161. return err;
  162. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  163. return -EFAULT;
  164. return 0;
  165. }
  166. static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
  167. bool block, struct cec_msg __user *parg)
  168. {
  169. struct cec_msg msg = {};
  170. long err = 0;
  171. if (!(adap->capabilities & CEC_CAP_TRANSMIT))
  172. return -ENOTTY;
  173. if (copy_from_user(&msg, parg, sizeof(msg)))
  174. return -EFAULT;
  175. /* A CDC-Only device can only send CDC messages */
  176. if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
  177. (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
  178. return -EINVAL;
  179. mutex_lock(&adap->lock);
  180. if (adap->log_addrs.num_log_addrs == 0)
  181. err = -EPERM;
  182. else if (adap->is_configuring)
  183. err = -ENONET;
  184. else if (!adap->is_configured &&
  185. (adap->needs_hpd || msg.msg[0] != 0xf0))
  186. err = -ENONET;
  187. else if (cec_is_busy(adap, fh))
  188. err = -EBUSY;
  189. else
  190. err = cec_transmit_msg_fh(adap, &msg, fh, block);
  191. mutex_unlock(&adap->lock);
  192. if (err)
  193. return err;
  194. if (copy_to_user(parg, &msg, sizeof(msg)))
  195. return -EFAULT;
  196. return 0;
  197. }
  198. /* Called by CEC_RECEIVE: wait for a message to arrive */
  199. static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
  200. {
  201. u32 timeout = msg->timeout;
  202. int res;
  203. do {
  204. mutex_lock(&fh->lock);
  205. /* Are there received messages queued up? */
  206. if (fh->queued_msgs) {
  207. /* Yes, return the first one */
  208. struct cec_msg_entry *entry =
  209. list_first_entry(&fh->msgs,
  210. struct cec_msg_entry, list);
  211. list_del(&entry->list);
  212. *msg = entry->msg;
  213. kfree(entry);
  214. fh->queued_msgs--;
  215. mutex_unlock(&fh->lock);
  216. /* restore original timeout value */
  217. msg->timeout = timeout;
  218. return 0;
  219. }
  220. /* No, return EAGAIN in non-blocking mode or wait */
  221. mutex_unlock(&fh->lock);
  222. /* Return when in non-blocking mode */
  223. if (!block)
  224. return -EAGAIN;
  225. if (msg->timeout) {
  226. /* The user specified a timeout */
  227. res = wait_event_interruptible_timeout(fh->wait,
  228. fh->queued_msgs,
  229. msecs_to_jiffies(msg->timeout));
  230. if (res == 0)
  231. res = -ETIMEDOUT;
  232. else if (res > 0)
  233. res = 0;
  234. } else {
  235. /* Wait indefinitely */
  236. res = wait_event_interruptible(fh->wait,
  237. fh->queued_msgs);
  238. }
  239. /* Exit on error, otherwise loop to get the new message */
  240. } while (!res);
  241. return res;
  242. }
  243. static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
  244. bool block, struct cec_msg __user *parg)
  245. {
  246. struct cec_msg msg = {};
  247. long err;
  248. if (copy_from_user(&msg, parg, sizeof(msg)))
  249. return -EFAULT;
  250. err = cec_receive_msg(fh, &msg, block);
  251. if (err)
  252. return err;
  253. msg.flags = 0;
  254. if (copy_to_user(parg, &msg, sizeof(msg)))
  255. return -EFAULT;
  256. return 0;
  257. }
  258. static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
  259. bool block, struct cec_event __user *parg)
  260. {
  261. struct cec_event_entry *ev = NULL;
  262. u64 ts = ~0ULL;
  263. unsigned int i;
  264. unsigned int ev_idx;
  265. long err = 0;
  266. mutex_lock(&fh->lock);
  267. while (!fh->total_queued_events && block) {
  268. mutex_unlock(&fh->lock);
  269. err = wait_event_interruptible(fh->wait,
  270. fh->total_queued_events);
  271. if (err)
  272. return err;
  273. mutex_lock(&fh->lock);
  274. }
  275. /* Find the oldest event */
  276. for (i = 0; i < CEC_NUM_EVENTS; i++) {
  277. struct cec_event_entry *entry =
  278. list_first_entry_or_null(&fh->events[i],
  279. struct cec_event_entry, list);
  280. if (entry && entry->ev.ts <= ts) {
  281. ev = entry;
  282. ev_idx = i;
  283. ts = ev->ev.ts;
  284. }
  285. }
  286. if (!ev) {
  287. err = -EAGAIN;
  288. goto unlock;
  289. }
  290. list_del(&ev->list);
  291. if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
  292. err = -EFAULT;
  293. if (ev_idx >= CEC_NUM_CORE_EVENTS)
  294. kfree(ev);
  295. fh->queued_events[ev_idx]--;
  296. fh->total_queued_events--;
  297. unlock:
  298. mutex_unlock(&fh->lock);
  299. return err;
  300. }
  301. static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
  302. u32 __user *parg)
  303. {
  304. u32 mode = fh->mode_initiator | fh->mode_follower;
  305. if (copy_to_user(parg, &mode, sizeof(mode)))
  306. return -EFAULT;
  307. return 0;
  308. }
  309. static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
  310. u32 __user *parg)
  311. {
  312. u32 mode;
  313. u8 mode_initiator;
  314. u8 mode_follower;
  315. bool send_pin_event = false;
  316. long err = 0;
  317. if (copy_from_user(&mode, parg, sizeof(mode)))
  318. return -EFAULT;
  319. if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
  320. dprintk(1, "%s: invalid mode bits set\n", __func__);
  321. return -EINVAL;
  322. }
  323. mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
  324. mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
  325. if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
  326. mode_follower > CEC_MODE_MONITOR_ALL) {
  327. dprintk(1, "%s: unknown mode\n", __func__);
  328. return -EINVAL;
  329. }
  330. if (mode_follower == CEC_MODE_MONITOR_ALL &&
  331. !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
  332. dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
  333. return -EINVAL;
  334. }
  335. if (mode_follower == CEC_MODE_MONITOR_PIN &&
  336. !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
  337. dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
  338. return -EINVAL;
  339. }
  340. /* Follower modes should always be able to send CEC messages */
  341. if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
  342. !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
  343. mode_follower >= CEC_MODE_FOLLOWER &&
  344. mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  345. dprintk(1, "%s: cannot transmit\n", __func__);
  346. return -EINVAL;
  347. }
  348. /* Monitor modes require CEC_MODE_NO_INITIATOR */
  349. if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
  350. dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
  351. __func__);
  352. return -EINVAL;
  353. }
  354. /* Monitor modes require CAP_NET_ADMIN */
  355. if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
  356. return -EPERM;
  357. mutex_lock(&adap->lock);
  358. /*
  359. * You can't become exclusive follower if someone else already
  360. * has that job.
  361. */
  362. if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  363. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
  364. adap->cec_follower && adap->cec_follower != fh)
  365. err = -EBUSY;
  366. /*
  367. * You can't become exclusive initiator if someone else already
  368. * has that job.
  369. */
  370. if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
  371. adap->cec_initiator && adap->cec_initiator != fh)
  372. err = -EBUSY;
  373. if (!err) {
  374. bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
  375. bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
  376. if (old_mon_all != new_mon_all) {
  377. if (new_mon_all)
  378. err = cec_monitor_all_cnt_inc(adap);
  379. else
  380. cec_monitor_all_cnt_dec(adap);
  381. }
  382. }
  383. if (!err) {
  384. bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
  385. bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
  386. if (old_mon_pin != new_mon_pin) {
  387. send_pin_event = new_mon_pin;
  388. if (new_mon_pin)
  389. err = cec_monitor_pin_cnt_inc(adap);
  390. else
  391. cec_monitor_pin_cnt_dec(adap);
  392. }
  393. }
  394. if (err) {
  395. mutex_unlock(&adap->lock);
  396. return err;
  397. }
  398. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  399. adap->follower_cnt--;
  400. if (mode_follower == CEC_MODE_FOLLOWER)
  401. adap->follower_cnt++;
  402. if (send_pin_event) {
  403. struct cec_event ev = {
  404. .flags = CEC_EVENT_FL_INITIAL_STATE,
  405. };
  406. ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
  407. CEC_EVENT_PIN_CEC_LOW;
  408. cec_queue_event_fh(fh, &ev, 0);
  409. }
  410. if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  411. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  412. adap->passthrough =
  413. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
  414. adap->cec_follower = fh;
  415. } else if (adap->cec_follower == fh) {
  416. adap->passthrough = false;
  417. adap->cec_follower = NULL;
  418. }
  419. if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
  420. adap->cec_initiator = fh;
  421. else if (adap->cec_initiator == fh)
  422. adap->cec_initiator = NULL;
  423. fh->mode_initiator = mode_initiator;
  424. fh->mode_follower = mode_follower;
  425. mutex_unlock(&adap->lock);
  426. return 0;
  427. }
  428. static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  429. {
  430. struct cec_fh *fh = filp->private_data;
  431. struct cec_adapter *adap = fh->adap;
  432. bool block = !(filp->f_flags & O_NONBLOCK);
  433. void __user *parg = (void __user *)arg;
  434. if (!cec_is_registered(adap))
  435. return -ENODEV;
  436. switch (cmd) {
  437. case CEC_ADAP_G_CAPS:
  438. return cec_adap_g_caps(adap, parg);
  439. case CEC_ADAP_G_PHYS_ADDR:
  440. return cec_adap_g_phys_addr(adap, parg);
  441. case CEC_ADAP_S_PHYS_ADDR:
  442. return cec_adap_s_phys_addr(adap, fh, block, parg);
  443. case CEC_ADAP_G_LOG_ADDRS:
  444. return cec_adap_g_log_addrs(adap, parg);
  445. case CEC_ADAP_S_LOG_ADDRS:
  446. return cec_adap_s_log_addrs(adap, fh, block, parg);
  447. case CEC_TRANSMIT:
  448. return cec_transmit(adap, fh, block, parg);
  449. case CEC_RECEIVE:
  450. return cec_receive(adap, fh, block, parg);
  451. case CEC_DQEVENT:
  452. return cec_dqevent(adap, fh, block, parg);
  453. case CEC_G_MODE:
  454. return cec_g_mode(adap, fh, parg);
  455. case CEC_S_MODE:
  456. return cec_s_mode(adap, fh, parg);
  457. default:
  458. return -ENOTTY;
  459. }
  460. }
  461. static int cec_open(struct inode *inode, struct file *filp)
  462. {
  463. struct cec_devnode *devnode =
  464. container_of(inode->i_cdev, struct cec_devnode, cdev);
  465. struct cec_adapter *adap = to_cec_adapter(devnode);
  466. struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
  467. /*
  468. * Initial events that are automatically sent when the cec device is
  469. * opened.
  470. */
  471. struct cec_event ev = {
  472. .event = CEC_EVENT_STATE_CHANGE,
  473. .flags = CEC_EVENT_FL_INITIAL_STATE,
  474. };
  475. unsigned int i;
  476. int err;
  477. if (!fh)
  478. return -ENOMEM;
  479. INIT_LIST_HEAD(&fh->msgs);
  480. INIT_LIST_HEAD(&fh->xfer_list);
  481. for (i = 0; i < CEC_NUM_EVENTS; i++)
  482. INIT_LIST_HEAD(&fh->events[i]);
  483. mutex_init(&fh->lock);
  484. init_waitqueue_head(&fh->wait);
  485. fh->mode_initiator = CEC_MODE_INITIATOR;
  486. fh->adap = adap;
  487. err = cec_get_device(devnode);
  488. if (err) {
  489. kfree(fh);
  490. return err;
  491. }
  492. mutex_lock(&devnode->lock);
  493. if (list_empty(&devnode->fhs) &&
  494. !adap->needs_hpd &&
  495. adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  496. err = adap->ops->adap_enable(adap, true);
  497. if (err) {
  498. mutex_unlock(&devnode->lock);
  499. kfree(fh);
  500. return err;
  501. }
  502. }
  503. filp->private_data = fh;
  504. /* Queue up initial state events */
  505. ev.state_change.phys_addr = adap->phys_addr;
  506. ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
  507. cec_queue_event_fh(fh, &ev, 0);
  508. #ifdef CONFIG_CEC_PIN
  509. if (adap->pin && adap->pin->ops->read_hpd) {
  510. err = adap->pin->ops->read_hpd(adap);
  511. if (err >= 0) {
  512. ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
  513. CEC_EVENT_PIN_HPD_LOW;
  514. cec_queue_event_fh(fh, &ev, 0);
  515. }
  516. }
  517. if (adap->pin && adap->pin->ops->read_5v) {
  518. err = adap->pin->ops->read_5v(adap);
  519. if (err >= 0) {
  520. ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
  521. CEC_EVENT_PIN_5V_LOW;
  522. cec_queue_event_fh(fh, &ev, 0);
  523. }
  524. }
  525. #endif
  526. list_add(&fh->list, &devnode->fhs);
  527. mutex_unlock(&devnode->lock);
  528. return 0;
  529. }
  530. /* Override for the release function */
  531. static int cec_release(struct inode *inode, struct file *filp)
  532. {
  533. struct cec_devnode *devnode = cec_devnode_data(filp);
  534. struct cec_adapter *adap = to_cec_adapter(devnode);
  535. struct cec_fh *fh = filp->private_data;
  536. unsigned int i;
  537. mutex_lock(&adap->lock);
  538. if (adap->cec_initiator == fh)
  539. adap->cec_initiator = NULL;
  540. if (adap->cec_follower == fh) {
  541. adap->cec_follower = NULL;
  542. adap->passthrough = false;
  543. }
  544. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  545. adap->follower_cnt--;
  546. if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
  547. cec_monitor_pin_cnt_dec(adap);
  548. if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
  549. cec_monitor_all_cnt_dec(adap);
  550. mutex_unlock(&adap->lock);
  551. mutex_lock(&devnode->lock);
  552. list_del(&fh->list);
  553. if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
  554. !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  555. WARN_ON(adap->ops->adap_enable(adap, false));
  556. }
  557. mutex_unlock(&devnode->lock);
  558. /* Unhook pending transmits from this filehandle. */
  559. mutex_lock(&adap->lock);
  560. while (!list_empty(&fh->xfer_list)) {
  561. struct cec_data *data =
  562. list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
  563. data->blocking = false;
  564. data->fh = NULL;
  565. list_del(&data->xfer_list);
  566. }
  567. mutex_unlock(&adap->lock);
  568. while (!list_empty(&fh->msgs)) {
  569. struct cec_msg_entry *entry =
  570. list_first_entry(&fh->msgs, struct cec_msg_entry, list);
  571. list_del(&entry->list);
  572. kfree(entry);
  573. }
  574. for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
  575. while (!list_empty(&fh->events[i])) {
  576. struct cec_event_entry *entry =
  577. list_first_entry(&fh->events[i],
  578. struct cec_event_entry, list);
  579. list_del(&entry->list);
  580. kfree(entry);
  581. }
  582. }
  583. kfree(fh);
  584. cec_put_device(devnode);
  585. filp->private_data = NULL;
  586. return 0;
  587. }
  588. const struct file_operations cec_devnode_fops = {
  589. .owner = THIS_MODULE,
  590. .open = cec_open,
  591. .unlocked_ioctl = cec_ioctl,
  592. .release = cec_release,
  593. .poll = cec_poll,
  594. .llseek = no_llseek,
  595. };