dmxdev.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467
  1. /*
  2. * dmxdev.c - DVB demultiplexer device
  3. *
  4. * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
  5. * for convergence integrated media GmbH
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public License
  9. * as published by the Free Software Foundation; either version 2.1
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #define pr_fmt(fmt) "dmxdev: " fmt
  19. #include <linux/sched.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/slab.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/module.h>
  24. #include <linux/poll.h>
  25. #include <linux/ioctl.h>
  26. #include <linux/wait.h>
  27. #include <linux/uaccess.h>
  28. #include <media/dmxdev.h>
  29. #include <media/dvb_vb2.h>
  30. static int debug;
  31. module_param(debug, int, 0644);
  32. MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
  33. #define dprintk(fmt, arg...) do { \
  34. if (debug) \
  35. printk(KERN_DEBUG pr_fmt("%s: " fmt), \
  36. __func__, ##arg); \
  37. } while (0)
  38. static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
  39. const u8 *src, size_t len)
  40. {
  41. ssize_t free;
  42. if (!len)
  43. return 0;
  44. if (!buf->data)
  45. return 0;
  46. free = dvb_ringbuffer_free(buf);
  47. if (len > free) {
  48. dprintk("buffer overflow\n");
  49. return -EOVERFLOW;
  50. }
  51. return dvb_ringbuffer_write(buf, src, len);
  52. }
  53. static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
  54. int non_blocking, char __user *buf,
  55. size_t count, loff_t *ppos)
  56. {
  57. size_t todo;
  58. ssize_t avail;
  59. ssize_t ret = 0;
  60. if (!src->data)
  61. return 0;
  62. if (src->error) {
  63. ret = src->error;
  64. dvb_ringbuffer_flush(src);
  65. return ret;
  66. }
  67. for (todo = count; todo > 0; todo -= ret) {
  68. if (non_blocking && dvb_ringbuffer_empty(src)) {
  69. ret = -EWOULDBLOCK;
  70. break;
  71. }
  72. ret = wait_event_interruptible(src->queue,
  73. !dvb_ringbuffer_empty(src) ||
  74. (src->error != 0));
  75. if (ret < 0)
  76. break;
  77. if (src->error) {
  78. ret = src->error;
  79. dvb_ringbuffer_flush(src);
  80. break;
  81. }
  82. avail = dvb_ringbuffer_avail(src);
  83. if (avail > todo)
  84. avail = todo;
  85. ret = dvb_ringbuffer_read_user(src, buf, avail);
  86. if (ret < 0)
  87. break;
  88. buf += ret;
  89. }
  90. return (count - todo) ? (count - todo) : ret;
  91. }
  92. static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
  93. {
  94. struct list_head *head, *pos;
  95. head = demux->get_frontends(demux);
  96. if (!head)
  97. return NULL;
  98. list_for_each(pos, head)
  99. if (DMX_FE_ENTRY(pos)->source == type)
  100. return DMX_FE_ENTRY(pos);
  101. return NULL;
  102. }
  103. static int dvb_dvr_open(struct inode *inode, struct file *file)
  104. {
  105. struct dvb_device *dvbdev = file->private_data;
  106. struct dmxdev *dmxdev = dvbdev->priv;
  107. struct dmx_frontend *front;
  108. bool need_ringbuffer = false;
  109. dprintk("%s\n", __func__);
  110. if (mutex_lock_interruptible(&dmxdev->mutex))
  111. return -ERESTARTSYS;
  112. if (dmxdev->exit) {
  113. mutex_unlock(&dmxdev->mutex);
  114. return -ENODEV;
  115. }
  116. dmxdev->may_do_mmap = 0;
  117. /*
  118. * The logic here is a little tricky due to the ifdef.
  119. *
  120. * The ringbuffer is used for both read and mmap.
  121. *
  122. * It is not needed, however, on two situations:
  123. * - Write devices (access with O_WRONLY);
  124. * - For duplex device nodes, opened with O_RDWR.
  125. */
  126. if ((file->f_flags & O_ACCMODE) == O_RDONLY)
  127. need_ringbuffer = true;
  128. else if ((file->f_flags & O_ACCMODE) == O_RDWR) {
  129. if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
  130. #ifdef CONFIG_DVB_MMAP
  131. dmxdev->may_do_mmap = 1;
  132. need_ringbuffer = true;
  133. #else
  134. mutex_unlock(&dmxdev->mutex);
  135. return -EOPNOTSUPP;
  136. #endif
  137. }
  138. }
  139. if (need_ringbuffer) {
  140. void *mem;
  141. if (!dvbdev->readers) {
  142. mutex_unlock(&dmxdev->mutex);
  143. return -EBUSY;
  144. }
  145. mem = vmalloc(DVR_BUFFER_SIZE);
  146. if (!mem) {
  147. mutex_unlock(&dmxdev->mutex);
  148. return -ENOMEM;
  149. }
  150. dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
  151. if (dmxdev->may_do_mmap)
  152. dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr",
  153. file->f_flags & O_NONBLOCK);
  154. dvbdev->readers--;
  155. }
  156. if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
  157. dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
  158. if (!dmxdev->demux->write) {
  159. mutex_unlock(&dmxdev->mutex);
  160. return -EOPNOTSUPP;
  161. }
  162. front = get_fe(dmxdev->demux, DMX_MEMORY_FE);
  163. if (!front) {
  164. mutex_unlock(&dmxdev->mutex);
  165. return -EINVAL;
  166. }
  167. dmxdev->demux->disconnect_frontend(dmxdev->demux);
  168. dmxdev->demux->connect_frontend(dmxdev->demux, front);
  169. }
  170. dvbdev->users++;
  171. mutex_unlock(&dmxdev->mutex);
  172. return 0;
  173. }
  174. static int dvb_dvr_release(struct inode *inode, struct file *file)
  175. {
  176. struct dvb_device *dvbdev = file->private_data;
  177. struct dmxdev *dmxdev = dvbdev->priv;
  178. mutex_lock(&dmxdev->mutex);
  179. if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
  180. dmxdev->demux->disconnect_frontend(dmxdev->demux);
  181. dmxdev->demux->connect_frontend(dmxdev->demux,
  182. dmxdev->dvr_orig_fe);
  183. }
  184. if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
  185. dmxdev->may_do_mmap) {
  186. if (dmxdev->may_do_mmap) {
  187. if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
  188. dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx);
  189. dvb_vb2_release(&dmxdev->dvr_vb2_ctx);
  190. }
  191. dvbdev->readers++;
  192. if (dmxdev->dvr_buffer.data) {
  193. void *mem = dmxdev->dvr_buffer.data;
  194. /*memory barrier*/
  195. mb();
  196. spin_lock_irq(&dmxdev->lock);
  197. dmxdev->dvr_buffer.data = NULL;
  198. spin_unlock_irq(&dmxdev->lock);
  199. vfree(mem);
  200. }
  201. }
  202. /* TODO */
  203. dvbdev->users--;
  204. if (dvbdev->users == 1 && dmxdev->exit == 1) {
  205. mutex_unlock(&dmxdev->mutex);
  206. wake_up(&dvbdev->wait_queue);
  207. } else
  208. mutex_unlock(&dmxdev->mutex);
  209. return 0;
  210. }
  211. static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
  212. size_t count, loff_t *ppos)
  213. {
  214. struct dvb_device *dvbdev = file->private_data;
  215. struct dmxdev *dmxdev = dvbdev->priv;
  216. int ret;
  217. if (!dmxdev->demux->write)
  218. return -EOPNOTSUPP;
  219. if ((file->f_flags & O_ACCMODE) != O_WRONLY)
  220. return -EINVAL;
  221. if (mutex_lock_interruptible(&dmxdev->mutex))
  222. return -ERESTARTSYS;
  223. if (dmxdev->exit) {
  224. mutex_unlock(&dmxdev->mutex);
  225. return -ENODEV;
  226. }
  227. ret = dmxdev->demux->write(dmxdev->demux, buf, count);
  228. mutex_unlock(&dmxdev->mutex);
  229. return ret;
  230. }
  231. static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
  232. loff_t *ppos)
  233. {
  234. struct dvb_device *dvbdev = file->private_data;
  235. struct dmxdev *dmxdev = dvbdev->priv;
  236. if (dmxdev->exit)
  237. return -ENODEV;
  238. return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
  239. file->f_flags & O_NONBLOCK,
  240. buf, count, ppos);
  241. }
  242. static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
  243. unsigned long size)
  244. {
  245. struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer;
  246. void *newmem;
  247. void *oldmem;
  248. dprintk("%s\n", __func__);
  249. if (buf->size == size)
  250. return 0;
  251. if (!size)
  252. return -EINVAL;
  253. newmem = vmalloc(size);
  254. if (!newmem)
  255. return -ENOMEM;
  256. oldmem = buf->data;
  257. spin_lock_irq(&dmxdev->lock);
  258. buf->data = newmem;
  259. buf->size = size;
  260. /* reset and not flush in case the buffer shrinks */
  261. dvb_ringbuffer_reset(buf);
  262. spin_unlock_irq(&dmxdev->lock);
  263. vfree(oldmem);
  264. return 0;
  265. }
  266. static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
  267. *dmxdevfilter, int state)
  268. {
  269. spin_lock_irq(&dmxdevfilter->dev->lock);
  270. dmxdevfilter->state = state;
  271. spin_unlock_irq(&dmxdevfilter->dev->lock);
  272. }
  273. static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
  274. unsigned long size)
  275. {
  276. struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
  277. void *newmem;
  278. void *oldmem;
  279. if (buf->size == size)
  280. return 0;
  281. if (!size)
  282. return -EINVAL;
  283. if (dmxdevfilter->state >= DMXDEV_STATE_GO)
  284. return -EBUSY;
  285. newmem = vmalloc(size);
  286. if (!newmem)
  287. return -ENOMEM;
  288. oldmem = buf->data;
  289. spin_lock_irq(&dmxdevfilter->dev->lock);
  290. buf->data = newmem;
  291. buf->size = size;
  292. /* reset and not flush in case the buffer shrinks */
  293. dvb_ringbuffer_reset(buf);
  294. spin_unlock_irq(&dmxdevfilter->dev->lock);
  295. vfree(oldmem);
  296. return 0;
  297. }
  298. static void dvb_dmxdev_filter_timeout(struct timer_list *t)
  299. {
  300. struct dmxdev_filter *dmxdevfilter = from_timer(dmxdevfilter, t, timer);
  301. dmxdevfilter->buffer.error = -ETIMEDOUT;
  302. spin_lock_irq(&dmxdevfilter->dev->lock);
  303. dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
  304. spin_unlock_irq(&dmxdevfilter->dev->lock);
  305. wake_up(&dmxdevfilter->buffer.queue);
  306. }
  307. static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
  308. {
  309. struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec;
  310. del_timer(&dmxdevfilter->timer);
  311. if (para->timeout) {
  312. dmxdevfilter->timer.expires =
  313. jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
  314. add_timer(&dmxdevfilter->timer);
  315. }
  316. }
  317. static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
  318. const u8 *buffer2, size_t buffer2_len,
  319. struct dmx_section_filter *filter,
  320. u32 *buffer_flags)
  321. {
  322. struct dmxdev_filter *dmxdevfilter = filter->priv;
  323. int ret;
  324. if (!dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx) &&
  325. dmxdevfilter->buffer.error) {
  326. wake_up(&dmxdevfilter->buffer.queue);
  327. return 0;
  328. }
  329. spin_lock(&dmxdevfilter->dev->lock);
  330. if (dmxdevfilter->state != DMXDEV_STATE_GO) {
  331. spin_unlock(&dmxdevfilter->dev->lock);
  332. return 0;
  333. }
  334. del_timer(&dmxdevfilter->timer);
  335. dprintk("section callback %*ph\n", 6, buffer1);
  336. if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) {
  337. ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
  338. buffer1, buffer1_len,
  339. buffer_flags);
  340. if (ret == buffer1_len)
  341. ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
  342. buffer2, buffer2_len,
  343. buffer_flags);
  344. } else {
  345. ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,
  346. buffer1, buffer1_len);
  347. if (ret == buffer1_len) {
  348. ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,
  349. buffer2, buffer2_len);
  350. }
  351. }
  352. if (ret < 0)
  353. dmxdevfilter->buffer.error = ret;
  354. if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
  355. dmxdevfilter->state = DMXDEV_STATE_DONE;
  356. spin_unlock(&dmxdevfilter->dev->lock);
  357. wake_up(&dmxdevfilter->buffer.queue);
  358. return 0;
  359. }
  360. static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
  361. const u8 *buffer2, size_t buffer2_len,
  362. struct dmx_ts_feed *feed,
  363. u32 *buffer_flags)
  364. {
  365. struct dmxdev_filter *dmxdevfilter = feed->priv;
  366. struct dvb_ringbuffer *buffer;
  367. #ifdef CONFIG_DVB_MMAP
  368. struct dvb_vb2_ctx *ctx;
  369. #endif
  370. int ret;
  371. spin_lock(&dmxdevfilter->dev->lock);
  372. if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
  373. spin_unlock(&dmxdevfilter->dev->lock);
  374. return 0;
  375. }
  376. if (dmxdevfilter->params.pes.output == DMX_OUT_TAP ||
  377. dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
  378. buffer = &dmxdevfilter->buffer;
  379. #ifdef CONFIG_DVB_MMAP
  380. ctx = &dmxdevfilter->vb2_ctx;
  381. #endif
  382. } else {
  383. buffer = &dmxdevfilter->dev->dvr_buffer;
  384. #ifdef CONFIG_DVB_MMAP
  385. ctx = &dmxdevfilter->dev->dvr_vb2_ctx;
  386. #endif
  387. }
  388. if (dvb_vb2_is_streaming(ctx)) {
  389. ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len,
  390. buffer_flags);
  391. if (ret == buffer1_len)
  392. ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len,
  393. buffer_flags);
  394. } else {
  395. if (buffer->error) {
  396. spin_unlock(&dmxdevfilter->dev->lock);
  397. wake_up(&buffer->queue);
  398. return 0;
  399. }
  400. ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
  401. if (ret == buffer1_len)
  402. ret = dvb_dmxdev_buffer_write(buffer,
  403. buffer2, buffer2_len);
  404. }
  405. if (ret < 0)
  406. buffer->error = ret;
  407. spin_unlock(&dmxdevfilter->dev->lock);
  408. wake_up(&buffer->queue);
  409. return 0;
  410. }
  411. /* stop feed but only mark the specified filter as stopped (state set) */
  412. static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
  413. {
  414. struct dmxdev_feed *feed;
  415. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  416. switch (dmxdevfilter->type) {
  417. case DMXDEV_TYPE_SEC:
  418. del_timer(&dmxdevfilter->timer);
  419. dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec);
  420. break;
  421. case DMXDEV_TYPE_PES:
  422. list_for_each_entry(feed, &dmxdevfilter->feed.ts, next)
  423. feed->ts->stop_filtering(feed->ts);
  424. break;
  425. default:
  426. return -EINVAL;
  427. }
  428. return 0;
  429. }
  430. /* start feed associated with the specified filter */
  431. static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
  432. {
  433. struct dmxdev_feed *feed;
  434. int ret;
  435. dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
  436. switch (filter->type) {
  437. case DMXDEV_TYPE_SEC:
  438. return filter->feed.sec->start_filtering(filter->feed.sec);
  439. case DMXDEV_TYPE_PES:
  440. list_for_each_entry(feed, &filter->feed.ts, next) {
  441. ret = feed->ts->start_filtering(feed->ts);
  442. if (ret < 0) {
  443. dvb_dmxdev_feed_stop(filter);
  444. return ret;
  445. }
  446. }
  447. break;
  448. default:
  449. return -EINVAL;
  450. }
  451. return 0;
  452. }
  453. /* restart section feed if it has filters left associated with it,
  454. otherwise release the feed */
  455. static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
  456. {
  457. int i;
  458. struct dmxdev *dmxdev = filter->dev;
  459. u16 pid = filter->params.sec.pid;
  460. for (i = 0; i < dmxdev->filternum; i++)
  461. if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
  462. dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
  463. dmxdev->filter[i].params.sec.pid == pid) {
  464. dvb_dmxdev_feed_start(&dmxdev->filter[i]);
  465. return 0;
  466. }
  467. filter->dev->demux->release_section_feed(dmxdev->demux,
  468. filter->feed.sec);
  469. return 0;
  470. }
  471. static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
  472. {
  473. struct dmxdev_feed *feed;
  474. struct dmx_demux *demux;
  475. if (dmxdevfilter->state < DMXDEV_STATE_GO)
  476. return 0;
  477. switch (dmxdevfilter->type) {
  478. case DMXDEV_TYPE_SEC:
  479. if (!dmxdevfilter->feed.sec)
  480. break;
  481. dvb_dmxdev_feed_stop(dmxdevfilter);
  482. if (dmxdevfilter->filter.sec)
  483. dmxdevfilter->feed.sec->
  484. release_filter(dmxdevfilter->feed.sec,
  485. dmxdevfilter->filter.sec);
  486. dvb_dmxdev_feed_restart(dmxdevfilter);
  487. dmxdevfilter->feed.sec = NULL;
  488. break;
  489. case DMXDEV_TYPE_PES:
  490. dvb_dmxdev_feed_stop(dmxdevfilter);
  491. demux = dmxdevfilter->dev->demux;
  492. list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
  493. demux->release_ts_feed(demux, feed->ts);
  494. feed->ts = NULL;
  495. }
  496. break;
  497. default:
  498. if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED)
  499. return 0;
  500. return -EINVAL;
  501. }
  502. dvb_ringbuffer_flush(&dmxdevfilter->buffer);
  503. return 0;
  504. }
  505. static void dvb_dmxdev_delete_pids(struct dmxdev_filter *dmxdevfilter)
  506. {
  507. struct dmxdev_feed *feed, *tmp;
  508. /* delete all PIDs */
  509. list_for_each_entry_safe(feed, tmp, &dmxdevfilter->feed.ts, next) {
  510. list_del(&feed->next);
  511. kfree(feed);
  512. }
  513. BUG_ON(!list_empty(&dmxdevfilter->feed.ts));
  514. }
  515. static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter)
  516. {
  517. if (dmxdevfilter->state < DMXDEV_STATE_SET)
  518. return 0;
  519. if (dmxdevfilter->type == DMXDEV_TYPE_PES)
  520. dvb_dmxdev_delete_pids(dmxdevfilter);
  521. dmxdevfilter->type = DMXDEV_TYPE_NONE;
  522. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
  523. return 0;
  524. }
  525. static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
  526. struct dmxdev_filter *filter,
  527. struct dmxdev_feed *feed)
  528. {
  529. ktime_t timeout = ktime_set(0, 0);
  530. struct dmx_pes_filter_params *para = &filter->params.pes;
  531. enum dmx_output otype;
  532. int ret;
  533. int ts_type;
  534. enum dmx_ts_pes ts_pes;
  535. struct dmx_ts_feed *tsfeed;
  536. feed->ts = NULL;
  537. otype = para->output;
  538. ts_pes = para->pes_type;
  539. if (ts_pes < DMX_PES_OTHER)
  540. ts_type = TS_DECODER;
  541. else
  542. ts_type = 0;
  543. if (otype == DMX_OUT_TS_TAP)
  544. ts_type |= TS_PACKET;
  545. else if (otype == DMX_OUT_TSDEMUX_TAP)
  546. ts_type |= TS_PACKET | TS_DEMUX;
  547. else if (otype == DMX_OUT_TAP)
  548. ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY;
  549. ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, &feed->ts,
  550. dvb_dmxdev_ts_callback);
  551. if (ret < 0)
  552. return ret;
  553. tsfeed = feed->ts;
  554. tsfeed->priv = filter;
  555. ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, timeout);
  556. if (ret < 0) {
  557. dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
  558. return ret;
  559. }
  560. ret = tsfeed->start_filtering(tsfeed);
  561. if (ret < 0) {
  562. dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
  563. return ret;
  564. }
  565. return 0;
  566. }
  567. static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
  568. {
  569. struct dmxdev *dmxdev = filter->dev;
  570. struct dmxdev_feed *feed;
  571. void *mem;
  572. int ret, i;
  573. if (filter->state < DMXDEV_STATE_SET)
  574. return -EINVAL;
  575. if (filter->state >= DMXDEV_STATE_GO)
  576. dvb_dmxdev_filter_stop(filter);
  577. if (!filter->buffer.data) {
  578. mem = vmalloc(filter->buffer.size);
  579. if (!mem)
  580. return -ENOMEM;
  581. spin_lock_irq(&filter->dev->lock);
  582. filter->buffer.data = mem;
  583. spin_unlock_irq(&filter->dev->lock);
  584. }
  585. dvb_ringbuffer_flush(&filter->buffer);
  586. switch (filter->type) {
  587. case DMXDEV_TYPE_SEC:
  588. {
  589. struct dmx_sct_filter_params *para = &filter->params.sec;
  590. struct dmx_section_filter **secfilter = &filter->filter.sec;
  591. struct dmx_section_feed **secfeed = &filter->feed.sec;
  592. *secfilter = NULL;
  593. *secfeed = NULL;
  594. /* find active filter/feed with same PID */
  595. for (i = 0; i < dmxdev->filternum; i++) {
  596. if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
  597. dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
  598. dmxdev->filter[i].params.sec.pid == para->pid) {
  599. *secfeed = dmxdev->filter[i].feed.sec;
  600. break;
  601. }
  602. }
  603. /* if no feed found, try to allocate new one */
  604. if (!*secfeed) {
  605. ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
  606. secfeed,
  607. dvb_dmxdev_section_callback);
  608. if (ret < 0) {
  609. pr_err("DVB (%s): could not alloc feed\n",
  610. __func__);
  611. return ret;
  612. }
  613. ret = (*secfeed)->set(*secfeed, para->pid,
  614. (para->flags & DMX_CHECK_CRC) ? 1 : 0);
  615. if (ret < 0) {
  616. pr_err("DVB (%s): could not set feed\n",
  617. __func__);
  618. dvb_dmxdev_feed_restart(filter);
  619. return ret;
  620. }
  621. } else {
  622. dvb_dmxdev_feed_stop(filter);
  623. }
  624. ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
  625. if (ret < 0) {
  626. dvb_dmxdev_feed_restart(filter);
  627. filter->feed.sec->start_filtering(*secfeed);
  628. dprintk("could not get filter\n");
  629. return ret;
  630. }
  631. (*secfilter)->priv = filter;
  632. memcpy(&((*secfilter)->filter_value[3]),
  633. &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
  634. memcpy(&(*secfilter)->filter_mask[3],
  635. &para->filter.mask[1], DMX_FILTER_SIZE - 1);
  636. memcpy(&(*secfilter)->filter_mode[3],
  637. &para->filter.mode[1], DMX_FILTER_SIZE - 1);
  638. (*secfilter)->filter_value[0] = para->filter.filter[0];
  639. (*secfilter)->filter_mask[0] = para->filter.mask[0];
  640. (*secfilter)->filter_mode[0] = para->filter.mode[0];
  641. (*secfilter)->filter_mask[1] = 0;
  642. (*secfilter)->filter_mask[2] = 0;
  643. filter->todo = 0;
  644. ret = filter->feed.sec->start_filtering(filter->feed.sec);
  645. if (ret < 0)
  646. return ret;
  647. dvb_dmxdev_filter_timer(filter);
  648. break;
  649. }
  650. case DMXDEV_TYPE_PES:
  651. list_for_each_entry(feed, &filter->feed.ts, next) {
  652. ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
  653. if (ret < 0) {
  654. dvb_dmxdev_filter_stop(filter);
  655. return ret;
  656. }
  657. }
  658. break;
  659. default:
  660. return -EINVAL;
  661. }
  662. dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
  663. return 0;
  664. }
  665. static int dvb_demux_open(struct inode *inode, struct file *file)
  666. {
  667. struct dvb_device *dvbdev = file->private_data;
  668. struct dmxdev *dmxdev = dvbdev->priv;
  669. int i;
  670. struct dmxdev_filter *dmxdevfilter;
  671. if (!dmxdev->filter)
  672. return -EINVAL;
  673. if (mutex_lock_interruptible(&dmxdev->mutex))
  674. return -ERESTARTSYS;
  675. for (i = 0; i < dmxdev->filternum; i++)
  676. if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
  677. break;
  678. if (i == dmxdev->filternum) {
  679. mutex_unlock(&dmxdev->mutex);
  680. return -EMFILE;
  681. }
  682. dmxdevfilter = &dmxdev->filter[i];
  683. mutex_init(&dmxdevfilter->mutex);
  684. file->private_data = dmxdevfilter;
  685. #ifdef CONFIG_DVB_MMAP
  686. dmxdev->may_do_mmap = 1;
  687. #else
  688. dmxdev->may_do_mmap = 0;
  689. #endif
  690. dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
  691. dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter",
  692. file->f_flags & O_NONBLOCK);
  693. dmxdevfilter->type = DMXDEV_TYPE_NONE;
  694. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
  695. timer_setup(&dmxdevfilter->timer, dvb_dmxdev_filter_timeout, 0);
  696. dvbdev->users++;
  697. mutex_unlock(&dmxdev->mutex);
  698. return 0;
  699. }
  700. static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
  701. struct dmxdev_filter *dmxdevfilter)
  702. {
  703. mutex_lock(&dmxdev->mutex);
  704. mutex_lock(&dmxdevfilter->mutex);
  705. if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
  706. dvb_vb2_stream_off(&dmxdevfilter->vb2_ctx);
  707. dvb_vb2_release(&dmxdevfilter->vb2_ctx);
  708. dvb_dmxdev_filter_stop(dmxdevfilter);
  709. dvb_dmxdev_filter_reset(dmxdevfilter);
  710. if (dmxdevfilter->buffer.data) {
  711. void *mem = dmxdevfilter->buffer.data;
  712. spin_lock_irq(&dmxdev->lock);
  713. dmxdevfilter->buffer.data = NULL;
  714. spin_unlock_irq(&dmxdev->lock);
  715. vfree(mem);
  716. }
  717. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
  718. wake_up(&dmxdevfilter->buffer.queue);
  719. mutex_unlock(&dmxdevfilter->mutex);
  720. mutex_unlock(&dmxdev->mutex);
  721. return 0;
  722. }
  723. static inline void invert_mode(struct dmx_filter *filter)
  724. {
  725. int i;
  726. for (i = 0; i < DMX_FILTER_SIZE; i++)
  727. filter->mode[i] ^= 0xff;
  728. }
  729. static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev,
  730. struct dmxdev_filter *filter, u16 pid)
  731. {
  732. struct dmxdev_feed *feed;
  733. if ((filter->type != DMXDEV_TYPE_PES) ||
  734. (filter->state < DMXDEV_STATE_SET))
  735. return -EINVAL;
  736. /* only TS packet filters may have multiple PIDs */
  737. if ((filter->params.pes.output != DMX_OUT_TSDEMUX_TAP) &&
  738. (!list_empty(&filter->feed.ts)))
  739. return -EINVAL;
  740. feed = kzalloc(sizeof(struct dmxdev_feed), GFP_KERNEL);
  741. if (feed == NULL)
  742. return -ENOMEM;
  743. feed->pid = pid;
  744. list_add(&feed->next, &filter->feed.ts);
  745. if (filter->state >= DMXDEV_STATE_GO)
  746. return dvb_dmxdev_start_feed(dmxdev, filter, feed);
  747. return 0;
  748. }
  749. static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
  750. struct dmxdev_filter *filter, u16 pid)
  751. {
  752. struct dmxdev_feed *feed, *tmp;
  753. if ((filter->type != DMXDEV_TYPE_PES) ||
  754. (filter->state < DMXDEV_STATE_SET))
  755. return -EINVAL;
  756. list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
  757. if ((feed->pid == pid) && (feed->ts != NULL)) {
  758. feed->ts->stop_filtering(feed->ts);
  759. filter->dev->demux->release_ts_feed(filter->dev->demux,
  760. feed->ts);
  761. list_del(&feed->next);
  762. kfree(feed);
  763. }
  764. }
  765. return 0;
  766. }
  767. static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
  768. struct dmxdev_filter *dmxdevfilter,
  769. struct dmx_sct_filter_params *params)
  770. {
  771. dprintk("%s: PID=0x%04x, flags=%02x, timeout=%d\n",
  772. __func__, params->pid, params->flags, params->timeout);
  773. dvb_dmxdev_filter_stop(dmxdevfilter);
  774. dmxdevfilter->type = DMXDEV_TYPE_SEC;
  775. memcpy(&dmxdevfilter->params.sec,
  776. params, sizeof(struct dmx_sct_filter_params));
  777. invert_mode(&dmxdevfilter->params.sec.filter);
  778. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  779. if (params->flags & DMX_IMMEDIATE_START)
  780. return dvb_dmxdev_filter_start(dmxdevfilter);
  781. return 0;
  782. }
  783. static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
  784. struct dmxdev_filter *dmxdevfilter,
  785. struct dmx_pes_filter_params *params)
  786. {
  787. int ret;
  788. dvb_dmxdev_filter_stop(dmxdevfilter);
  789. dvb_dmxdev_filter_reset(dmxdevfilter);
  790. if ((unsigned int)params->pes_type > DMX_PES_OTHER)
  791. return -EINVAL;
  792. dmxdevfilter->type = DMXDEV_TYPE_PES;
  793. memcpy(&dmxdevfilter->params, params,
  794. sizeof(struct dmx_pes_filter_params));
  795. INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
  796. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  797. ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter,
  798. dmxdevfilter->params.pes.pid);
  799. if (ret < 0)
  800. return ret;
  801. if (params->flags & DMX_IMMEDIATE_START)
  802. return dvb_dmxdev_filter_start(dmxdevfilter);
  803. return 0;
  804. }
  805. static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
  806. struct file *file, char __user *buf,
  807. size_t count, loff_t *ppos)
  808. {
  809. int result, hcount;
  810. int done = 0;
  811. if (dfil->todo <= 0) {
  812. hcount = 3 + dfil->todo;
  813. if (hcount > count)
  814. hcount = count;
  815. result = dvb_dmxdev_buffer_read(&dfil->buffer,
  816. file->f_flags & O_NONBLOCK,
  817. buf, hcount, ppos);
  818. if (result < 0) {
  819. dfil->todo = 0;
  820. return result;
  821. }
  822. if (copy_from_user(dfil->secheader - dfil->todo, buf, result))
  823. return -EFAULT;
  824. buf += result;
  825. done = result;
  826. count -= result;
  827. dfil->todo -= result;
  828. if (dfil->todo > -3)
  829. return done;
  830. dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff;
  831. if (!count)
  832. return done;
  833. }
  834. if (count > dfil->todo)
  835. count = dfil->todo;
  836. result = dvb_dmxdev_buffer_read(&dfil->buffer,
  837. file->f_flags & O_NONBLOCK,
  838. buf, count, ppos);
  839. if (result < 0)
  840. return result;
  841. dfil->todo -= result;
  842. return (result + done);
  843. }
  844. static ssize_t
  845. dvb_demux_read(struct file *file, char __user *buf, size_t count,
  846. loff_t *ppos)
  847. {
  848. struct dmxdev_filter *dmxdevfilter = file->private_data;
  849. int ret;
  850. if (mutex_lock_interruptible(&dmxdevfilter->mutex))
  851. return -ERESTARTSYS;
  852. if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
  853. ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
  854. else
  855. ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
  856. file->f_flags & O_NONBLOCK,
  857. buf, count, ppos);
  858. mutex_unlock(&dmxdevfilter->mutex);
  859. return ret;
  860. }
  861. static int dvb_demux_do_ioctl(struct file *file,
  862. unsigned int cmd, void *parg)
  863. {
  864. struct dmxdev_filter *dmxdevfilter = file->private_data;
  865. struct dmxdev *dmxdev = dmxdevfilter->dev;
  866. unsigned long arg = (unsigned long)parg;
  867. int ret = 0;
  868. if (mutex_lock_interruptible(&dmxdev->mutex))
  869. return -ERESTARTSYS;
  870. switch (cmd) {
  871. case DMX_START:
  872. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  873. mutex_unlock(&dmxdev->mutex);
  874. return -ERESTARTSYS;
  875. }
  876. if (dmxdevfilter->state < DMXDEV_STATE_SET)
  877. ret = -EINVAL;
  878. else
  879. ret = dvb_dmxdev_filter_start(dmxdevfilter);
  880. mutex_unlock(&dmxdevfilter->mutex);
  881. break;
  882. case DMX_STOP:
  883. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  884. mutex_unlock(&dmxdev->mutex);
  885. return -ERESTARTSYS;
  886. }
  887. ret = dvb_dmxdev_filter_stop(dmxdevfilter);
  888. mutex_unlock(&dmxdevfilter->mutex);
  889. break;
  890. case DMX_SET_FILTER:
  891. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  892. mutex_unlock(&dmxdev->mutex);
  893. return -ERESTARTSYS;
  894. }
  895. ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
  896. mutex_unlock(&dmxdevfilter->mutex);
  897. break;
  898. case DMX_SET_PES_FILTER:
  899. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  900. mutex_unlock(&dmxdev->mutex);
  901. return -ERESTARTSYS;
  902. }
  903. ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
  904. mutex_unlock(&dmxdevfilter->mutex);
  905. break;
  906. case DMX_SET_BUFFER_SIZE:
  907. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  908. mutex_unlock(&dmxdev->mutex);
  909. return -ERESTARTSYS;
  910. }
  911. ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
  912. mutex_unlock(&dmxdevfilter->mutex);
  913. break;
  914. case DMX_GET_PES_PIDS:
  915. if (!dmxdev->demux->get_pes_pids) {
  916. ret = -EINVAL;
  917. break;
  918. }
  919. dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
  920. break;
  921. case DMX_GET_STC:
  922. if (!dmxdev->demux->get_stc) {
  923. ret = -EINVAL;
  924. break;
  925. }
  926. ret = dmxdev->demux->get_stc(dmxdev->demux,
  927. ((struct dmx_stc *)parg)->num,
  928. &((struct dmx_stc *)parg)->stc,
  929. &((struct dmx_stc *)parg)->base);
  930. break;
  931. case DMX_ADD_PID:
  932. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  933. ret = -ERESTARTSYS;
  934. break;
  935. }
  936. ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
  937. mutex_unlock(&dmxdevfilter->mutex);
  938. break;
  939. case DMX_REMOVE_PID:
  940. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  941. ret = -ERESTARTSYS;
  942. break;
  943. }
  944. ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
  945. mutex_unlock(&dmxdevfilter->mutex);
  946. break;
  947. #ifdef CONFIG_DVB_MMAP
  948. case DMX_REQBUFS:
  949. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  950. mutex_unlock(&dmxdev->mutex);
  951. return -ERESTARTSYS;
  952. }
  953. ret = dvb_vb2_reqbufs(&dmxdevfilter->vb2_ctx, parg);
  954. mutex_unlock(&dmxdevfilter->mutex);
  955. break;
  956. case DMX_QUERYBUF:
  957. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  958. mutex_unlock(&dmxdev->mutex);
  959. return -ERESTARTSYS;
  960. }
  961. ret = dvb_vb2_querybuf(&dmxdevfilter->vb2_ctx, parg);
  962. mutex_unlock(&dmxdevfilter->mutex);
  963. break;
  964. case DMX_EXPBUF:
  965. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  966. mutex_unlock(&dmxdev->mutex);
  967. return -ERESTARTSYS;
  968. }
  969. ret = dvb_vb2_expbuf(&dmxdevfilter->vb2_ctx, parg);
  970. mutex_unlock(&dmxdevfilter->mutex);
  971. break;
  972. case DMX_QBUF:
  973. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  974. mutex_unlock(&dmxdev->mutex);
  975. return -ERESTARTSYS;
  976. }
  977. ret = dvb_vb2_qbuf(&dmxdevfilter->vb2_ctx, parg);
  978. if (ret == 0 && !dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
  979. ret = dvb_vb2_stream_on(&dmxdevfilter->vb2_ctx);
  980. mutex_unlock(&dmxdevfilter->mutex);
  981. break;
  982. case DMX_DQBUF:
  983. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  984. mutex_unlock(&dmxdev->mutex);
  985. return -ERESTARTSYS;
  986. }
  987. ret = dvb_vb2_dqbuf(&dmxdevfilter->vb2_ctx, parg);
  988. mutex_unlock(&dmxdevfilter->mutex);
  989. break;
  990. #endif
  991. default:
  992. ret = -ENOTTY;
  993. break;
  994. }
  995. mutex_unlock(&dmxdev->mutex);
  996. return ret;
  997. }
  998. static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
  999. unsigned long arg)
  1000. {
  1001. return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
  1002. }
  1003. static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
  1004. {
  1005. struct dmxdev_filter *dmxdevfilter = file->private_data;
  1006. __poll_t mask = 0;
  1007. if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
  1008. return EPOLLERR;
  1009. if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
  1010. return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait);
  1011. poll_wait(file, &dmxdevfilter->buffer.queue, wait);
  1012. if (dmxdevfilter->state != DMXDEV_STATE_GO &&
  1013. dmxdevfilter->state != DMXDEV_STATE_DONE &&
  1014. dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
  1015. return 0;
  1016. if (dmxdevfilter->buffer.error)
  1017. mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
  1018. if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
  1019. mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
  1020. return mask;
  1021. }
  1022. #ifdef CONFIG_DVB_MMAP
  1023. static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma)
  1024. {
  1025. struct dmxdev_filter *dmxdevfilter = file->private_data;
  1026. struct dmxdev *dmxdev = dmxdevfilter->dev;
  1027. int ret;
  1028. if (!dmxdev->may_do_mmap)
  1029. return -ENOTTY;
  1030. if (mutex_lock_interruptible(&dmxdev->mutex))
  1031. return -ERESTARTSYS;
  1032. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  1033. mutex_unlock(&dmxdev->mutex);
  1034. return -ERESTARTSYS;
  1035. }
  1036. ret = dvb_vb2_mmap(&dmxdevfilter->vb2_ctx, vma);
  1037. mutex_unlock(&dmxdevfilter->mutex);
  1038. mutex_unlock(&dmxdev->mutex);
  1039. return ret;
  1040. }
  1041. #endif
  1042. static int dvb_demux_release(struct inode *inode, struct file *file)
  1043. {
  1044. struct dmxdev_filter *dmxdevfilter = file->private_data;
  1045. struct dmxdev *dmxdev = dmxdevfilter->dev;
  1046. int ret;
  1047. ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
  1048. mutex_lock(&dmxdev->mutex);
  1049. dmxdev->dvbdev->users--;
  1050. if (dmxdev->dvbdev->users == 1 && dmxdev->exit == 1) {
  1051. mutex_unlock(&dmxdev->mutex);
  1052. wake_up(&dmxdev->dvbdev->wait_queue);
  1053. } else
  1054. mutex_unlock(&dmxdev->mutex);
  1055. return ret;
  1056. }
  1057. static const struct file_operations dvb_demux_fops = {
  1058. .owner = THIS_MODULE,
  1059. .read = dvb_demux_read,
  1060. .unlocked_ioctl = dvb_demux_ioctl,
  1061. .open = dvb_demux_open,
  1062. .release = dvb_demux_release,
  1063. .poll = dvb_demux_poll,
  1064. .llseek = default_llseek,
  1065. #ifdef CONFIG_DVB_MMAP
  1066. .mmap = dvb_demux_mmap,
  1067. #endif
  1068. };
  1069. static const struct dvb_device dvbdev_demux = {
  1070. .priv = NULL,
  1071. .users = 1,
  1072. .writers = 1,
  1073. #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
  1074. .name = "dvb-demux",
  1075. #endif
  1076. .fops = &dvb_demux_fops
  1077. };
  1078. static int dvb_dvr_do_ioctl(struct file *file,
  1079. unsigned int cmd, void *parg)
  1080. {
  1081. struct dvb_device *dvbdev = file->private_data;
  1082. struct dmxdev *dmxdev = dvbdev->priv;
  1083. unsigned long arg = (unsigned long)parg;
  1084. int ret;
  1085. if (mutex_lock_interruptible(&dmxdev->mutex))
  1086. return -ERESTARTSYS;
  1087. switch (cmd) {
  1088. case DMX_SET_BUFFER_SIZE:
  1089. ret = dvb_dvr_set_buffer_size(dmxdev, arg);
  1090. break;
  1091. #ifdef CONFIG_DVB_MMAP
  1092. case DMX_REQBUFS:
  1093. ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg);
  1094. break;
  1095. case DMX_QUERYBUF:
  1096. ret = dvb_vb2_querybuf(&dmxdev->dvr_vb2_ctx, parg);
  1097. break;
  1098. case DMX_EXPBUF:
  1099. ret = dvb_vb2_expbuf(&dmxdev->dvr_vb2_ctx, parg);
  1100. break;
  1101. case DMX_QBUF:
  1102. ret = dvb_vb2_qbuf(&dmxdev->dvr_vb2_ctx, parg);
  1103. if (ret == 0 && !dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
  1104. ret = dvb_vb2_stream_on(&dmxdev->dvr_vb2_ctx);
  1105. break;
  1106. case DMX_DQBUF:
  1107. ret = dvb_vb2_dqbuf(&dmxdev->dvr_vb2_ctx, parg);
  1108. break;
  1109. #endif
  1110. default:
  1111. ret = -ENOTTY;
  1112. break;
  1113. }
  1114. mutex_unlock(&dmxdev->mutex);
  1115. return ret;
  1116. }
  1117. static long dvb_dvr_ioctl(struct file *file,
  1118. unsigned int cmd, unsigned long arg)
  1119. {
  1120. return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
  1121. }
  1122. static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
  1123. {
  1124. struct dvb_device *dvbdev = file->private_data;
  1125. struct dmxdev *dmxdev = dvbdev->priv;
  1126. __poll_t mask = 0;
  1127. dprintk("%s\n", __func__);
  1128. if (dmxdev->exit)
  1129. return EPOLLERR;
  1130. if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
  1131. return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait);
  1132. poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
  1133. if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
  1134. dmxdev->may_do_mmap) {
  1135. if (dmxdev->dvr_buffer.error)
  1136. mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
  1137. if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
  1138. mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
  1139. } else
  1140. mask |= (EPOLLOUT | EPOLLWRNORM | EPOLLPRI);
  1141. return mask;
  1142. }
  1143. #ifdef CONFIG_DVB_MMAP
  1144. static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma)
  1145. {
  1146. struct dvb_device *dvbdev = file->private_data;
  1147. struct dmxdev *dmxdev = dvbdev->priv;
  1148. int ret;
  1149. if (!dmxdev->may_do_mmap)
  1150. return -ENOTTY;
  1151. if (dmxdev->exit)
  1152. return -ENODEV;
  1153. if (mutex_lock_interruptible(&dmxdev->mutex))
  1154. return -ERESTARTSYS;
  1155. ret = dvb_vb2_mmap(&dmxdev->dvr_vb2_ctx, vma);
  1156. mutex_unlock(&dmxdev->mutex);
  1157. return ret;
  1158. }
  1159. #endif
  1160. static const struct file_operations dvb_dvr_fops = {
  1161. .owner = THIS_MODULE,
  1162. .read = dvb_dvr_read,
  1163. .write = dvb_dvr_write,
  1164. .unlocked_ioctl = dvb_dvr_ioctl,
  1165. .open = dvb_dvr_open,
  1166. .release = dvb_dvr_release,
  1167. .poll = dvb_dvr_poll,
  1168. .llseek = default_llseek,
  1169. #ifdef CONFIG_DVB_MMAP
  1170. .mmap = dvb_dvr_mmap,
  1171. #endif
  1172. };
  1173. static const struct dvb_device dvbdev_dvr = {
  1174. .priv = NULL,
  1175. .readers = 1,
  1176. .users = 1,
  1177. #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
  1178. .name = "dvb-dvr",
  1179. #endif
  1180. .fops = &dvb_dvr_fops
  1181. };
  1182. int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
  1183. {
  1184. int i;
  1185. if (dmxdev->demux->open(dmxdev->demux) < 0)
  1186. return -EUSERS;
  1187. dmxdev->filter = vmalloc(array_size(sizeof(struct dmxdev_filter),
  1188. dmxdev->filternum));
  1189. if (!dmxdev->filter)
  1190. return -ENOMEM;
  1191. mutex_init(&dmxdev->mutex);
  1192. spin_lock_init(&dmxdev->lock);
  1193. for (i = 0; i < dmxdev->filternum; i++) {
  1194. dmxdev->filter[i].dev = dmxdev;
  1195. dmxdev->filter[i].buffer.data = NULL;
  1196. dvb_dmxdev_filter_state_set(&dmxdev->filter[i],
  1197. DMXDEV_STATE_FREE);
  1198. }
  1199. dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
  1200. DVB_DEVICE_DEMUX, dmxdev->filternum);
  1201. dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
  1202. dmxdev, DVB_DEVICE_DVR, dmxdev->filternum);
  1203. dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
  1204. return 0;
  1205. }
  1206. EXPORT_SYMBOL(dvb_dmxdev_init);
  1207. void dvb_dmxdev_release(struct dmxdev *dmxdev)
  1208. {
  1209. dmxdev->exit = 1;
  1210. if (dmxdev->dvbdev->users > 1) {
  1211. wait_event(dmxdev->dvbdev->wait_queue,
  1212. dmxdev->dvbdev->users == 1);
  1213. }
  1214. if (dmxdev->dvr_dvbdev->users > 1) {
  1215. wait_event(dmxdev->dvr_dvbdev->wait_queue,
  1216. dmxdev->dvr_dvbdev->users == 1);
  1217. }
  1218. dvb_unregister_device(dmxdev->dvbdev);
  1219. dvb_unregister_device(dmxdev->dvr_dvbdev);
  1220. vfree(dmxdev->filter);
  1221. dmxdev->filter = NULL;
  1222. dmxdev->demux->close(dmxdev->demux);
  1223. }
  1224. EXPORT_SYMBOL(dvb_dmxdev_release);