videobuf-core.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200
  1. /*
  2. * generic helper functions for handling video4linux capture buffers
  3. *
  4. * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  5. *
  6. * Highly based on video-buf written originally by:
  7. * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
  8. * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
  9. * (c) 2006 Ted Walther and John Sokol
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2
  14. */
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/mm.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/interrupt.h>
  22. #include <media/videobuf-core.h>
  23. #define MAGIC_BUFFER 0x20070728
  24. #define MAGIC_CHECK(is, should) \
  25. do { \
  26. if (unlikely((is) != (should))) { \
  27. printk(KERN_ERR \
  28. "magic mismatch: %x (expected %x)\n", \
  29. is, should); \
  30. BUG(); \
  31. } \
  32. } while (0)
  33. static int debug;
  34. module_param(debug, int, 0644);
  35. MODULE_DESCRIPTION("helper module to manage video4linux buffers");
  36. MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
  37. MODULE_LICENSE("GPL");
  38. #define dprintk(level, fmt, arg...) \
  39. do { \
  40. if (debug >= level) \
  41. printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
  42. } while (0)
  43. /* --------------------------------------------------------------------- */
  44. #define CALL(q, f, arg...) \
  45. ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
  46. #define CALLPTR(q, f, arg...) \
  47. ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
  48. struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
  49. {
  50. struct videobuf_buffer *vb;
  51. BUG_ON(q->msize < sizeof(*vb));
  52. if (!q->int_ops || !q->int_ops->alloc_vb) {
  53. printk(KERN_ERR "No specific ops defined!\n");
  54. BUG();
  55. }
  56. vb = q->int_ops->alloc_vb(q->msize);
  57. if (NULL != vb) {
  58. init_waitqueue_head(&vb->done);
  59. vb->magic = MAGIC_BUFFER;
  60. }
  61. return vb;
  62. }
  63. EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
  64. static int state_neither_active_nor_queued(struct videobuf_queue *q,
  65. struct videobuf_buffer *vb)
  66. {
  67. unsigned long flags;
  68. bool rc;
  69. spin_lock_irqsave(q->irqlock, flags);
  70. rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
  71. spin_unlock_irqrestore(q->irqlock, flags);
  72. return rc;
  73. };
  74. int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
  75. int non_blocking, int intr)
  76. {
  77. bool is_ext_locked;
  78. int ret = 0;
  79. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  80. if (non_blocking) {
  81. if (state_neither_active_nor_queued(q, vb))
  82. return 0;
  83. return -EAGAIN;
  84. }
  85. is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
  86. /* Release vdev lock to prevent this wait from blocking outside access to
  87. the device. */
  88. if (is_ext_locked)
  89. mutex_unlock(q->ext_lock);
  90. if (intr)
  91. ret = wait_event_interruptible(vb->done,
  92. state_neither_active_nor_queued(q, vb));
  93. else
  94. wait_event(vb->done, state_neither_active_nor_queued(q, vb));
  95. /* Relock */
  96. if (is_ext_locked)
  97. mutex_lock(q->ext_lock);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL_GPL(videobuf_waiton);
  101. int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
  102. struct v4l2_framebuffer *fbuf)
  103. {
  104. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  105. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  106. return CALL(q, iolock, q, vb, fbuf);
  107. }
  108. EXPORT_SYMBOL_GPL(videobuf_iolock);
  109. void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
  110. struct videobuf_buffer *buf)
  111. {
  112. if (q->int_ops->vaddr)
  113. return q->int_ops->vaddr(buf);
  114. return NULL;
  115. }
  116. EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
  117. /* --------------------------------------------------------------------- */
  118. void videobuf_queue_core_init(struct videobuf_queue *q,
  119. const struct videobuf_queue_ops *ops,
  120. struct device *dev,
  121. spinlock_t *irqlock,
  122. enum v4l2_buf_type type,
  123. enum v4l2_field field,
  124. unsigned int msize,
  125. void *priv,
  126. struct videobuf_qtype_ops *int_ops,
  127. struct mutex *ext_lock)
  128. {
  129. BUG_ON(!q);
  130. memset(q, 0, sizeof(*q));
  131. q->irqlock = irqlock;
  132. q->ext_lock = ext_lock;
  133. q->dev = dev;
  134. q->type = type;
  135. q->field = field;
  136. q->msize = msize;
  137. q->ops = ops;
  138. q->priv_data = priv;
  139. q->int_ops = int_ops;
  140. /* All buffer operations are mandatory */
  141. BUG_ON(!q->ops->buf_setup);
  142. BUG_ON(!q->ops->buf_prepare);
  143. BUG_ON(!q->ops->buf_queue);
  144. BUG_ON(!q->ops->buf_release);
  145. /* Lock is mandatory for queue_cancel to work */
  146. BUG_ON(!irqlock);
  147. /* Having implementations for abstract methods are mandatory */
  148. BUG_ON(!q->int_ops);
  149. mutex_init(&q->vb_lock);
  150. init_waitqueue_head(&q->wait);
  151. INIT_LIST_HEAD(&q->stream);
  152. }
  153. EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
  154. /* Locking: Only usage in bttv unsafe find way to remove */
  155. int videobuf_queue_is_busy(struct videobuf_queue *q)
  156. {
  157. int i;
  158. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  159. if (q->streaming) {
  160. dprintk(1, "busy: streaming active\n");
  161. return 1;
  162. }
  163. if (q->reading) {
  164. dprintk(1, "busy: pending read #1\n");
  165. return 1;
  166. }
  167. if (q->read_buf) {
  168. dprintk(1, "busy: pending read #2\n");
  169. return 1;
  170. }
  171. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  172. if (NULL == q->bufs[i])
  173. continue;
  174. if (q->bufs[i]->map) {
  175. dprintk(1, "busy: buffer #%d mapped\n", i);
  176. return 1;
  177. }
  178. if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
  179. dprintk(1, "busy: buffer #%d queued\n", i);
  180. return 1;
  181. }
  182. if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
  183. dprintk(1, "busy: buffer #%d avtive\n", i);
  184. return 1;
  185. }
  186. }
  187. return 0;
  188. }
  189. EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
  190. /*
  191. * __videobuf_free() - free all the buffers and their control structures
  192. *
  193. * This function can only be called if streaming/reading is off, i.e. no buffers
  194. * are under control of the driver.
  195. */
  196. /* Locking: Caller holds q->vb_lock */
  197. static int __videobuf_free(struct videobuf_queue *q)
  198. {
  199. int i;
  200. dprintk(1, "%s\n", __func__);
  201. if (!q)
  202. return 0;
  203. if (q->streaming || q->reading) {
  204. dprintk(1, "Cannot free buffers when streaming or reading\n");
  205. return -EBUSY;
  206. }
  207. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  208. for (i = 0; i < VIDEO_MAX_FRAME; i++)
  209. if (q->bufs[i] && q->bufs[i]->map) {
  210. dprintk(1, "Cannot free mmapped buffers\n");
  211. return -EBUSY;
  212. }
  213. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  214. if (NULL == q->bufs[i])
  215. continue;
  216. q->ops->buf_release(q, q->bufs[i]);
  217. kfree(q->bufs[i]);
  218. q->bufs[i] = NULL;
  219. }
  220. return 0;
  221. }
  222. /* Locking: Caller holds q->vb_lock */
  223. void videobuf_queue_cancel(struct videobuf_queue *q)
  224. {
  225. unsigned long flags = 0;
  226. int i;
  227. q->streaming = 0;
  228. q->reading = 0;
  229. wake_up_interruptible_sync(&q->wait);
  230. /* remove queued buffers from list */
  231. spin_lock_irqsave(q->irqlock, flags);
  232. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  233. if (NULL == q->bufs[i])
  234. continue;
  235. if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
  236. list_del(&q->bufs[i]->queue);
  237. q->bufs[i]->state = VIDEOBUF_ERROR;
  238. wake_up_all(&q->bufs[i]->done);
  239. }
  240. }
  241. spin_unlock_irqrestore(q->irqlock, flags);
  242. /* free all buffers + clear queue */
  243. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  244. if (NULL == q->bufs[i])
  245. continue;
  246. q->ops->buf_release(q, q->bufs[i]);
  247. }
  248. INIT_LIST_HEAD(&q->stream);
  249. }
  250. EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
  251. /* --------------------------------------------------------------------- */
  252. /* Locking: Caller holds q->vb_lock */
  253. enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
  254. {
  255. enum v4l2_field field = q->field;
  256. BUG_ON(V4L2_FIELD_ANY == field);
  257. if (V4L2_FIELD_ALTERNATE == field) {
  258. if (V4L2_FIELD_TOP == q->last) {
  259. field = V4L2_FIELD_BOTTOM;
  260. q->last = V4L2_FIELD_BOTTOM;
  261. } else {
  262. field = V4L2_FIELD_TOP;
  263. q->last = V4L2_FIELD_TOP;
  264. }
  265. }
  266. return field;
  267. }
  268. EXPORT_SYMBOL_GPL(videobuf_next_field);
  269. /* Locking: Caller holds q->vb_lock */
  270. static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
  271. struct videobuf_buffer *vb, enum v4l2_buf_type type)
  272. {
  273. MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
  274. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  275. b->index = vb->i;
  276. b->type = type;
  277. b->memory = vb->memory;
  278. switch (b->memory) {
  279. case V4L2_MEMORY_MMAP:
  280. b->m.offset = vb->boff;
  281. b->length = vb->bsize;
  282. break;
  283. case V4L2_MEMORY_USERPTR:
  284. b->m.userptr = vb->baddr;
  285. b->length = vb->bsize;
  286. break;
  287. case V4L2_MEMORY_OVERLAY:
  288. b->m.offset = vb->boff;
  289. break;
  290. case V4L2_MEMORY_DMABUF:
  291. /* DMABUF is not handled in videobuf framework */
  292. break;
  293. }
  294. b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  295. if (vb->map)
  296. b->flags |= V4L2_BUF_FLAG_MAPPED;
  297. switch (vb->state) {
  298. case VIDEOBUF_PREPARED:
  299. case VIDEOBUF_QUEUED:
  300. case VIDEOBUF_ACTIVE:
  301. b->flags |= V4L2_BUF_FLAG_QUEUED;
  302. break;
  303. case VIDEOBUF_ERROR:
  304. b->flags |= V4L2_BUF_FLAG_ERROR;
  305. /* fall through */
  306. case VIDEOBUF_DONE:
  307. b->flags |= V4L2_BUF_FLAG_DONE;
  308. break;
  309. case VIDEOBUF_NEEDS_INIT:
  310. case VIDEOBUF_IDLE:
  311. /* nothing */
  312. break;
  313. }
  314. b->field = vb->field;
  315. b->timestamp = vb->ts;
  316. b->bytesused = vb->size;
  317. b->sequence = vb->field_count >> 1;
  318. }
  319. int videobuf_mmap_free(struct videobuf_queue *q)
  320. {
  321. int ret;
  322. videobuf_queue_lock(q);
  323. ret = __videobuf_free(q);
  324. videobuf_queue_unlock(q);
  325. return ret;
  326. }
  327. EXPORT_SYMBOL_GPL(videobuf_mmap_free);
  328. /* Locking: Caller holds q->vb_lock */
  329. int __videobuf_mmap_setup(struct videobuf_queue *q,
  330. unsigned int bcount, unsigned int bsize,
  331. enum v4l2_memory memory)
  332. {
  333. unsigned int i;
  334. int err;
  335. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  336. err = __videobuf_free(q);
  337. if (0 != err)
  338. return err;
  339. /* Allocate and initialize buffers */
  340. for (i = 0; i < bcount; i++) {
  341. q->bufs[i] = videobuf_alloc_vb(q);
  342. if (NULL == q->bufs[i])
  343. break;
  344. q->bufs[i]->i = i;
  345. q->bufs[i]->memory = memory;
  346. q->bufs[i]->bsize = bsize;
  347. switch (memory) {
  348. case V4L2_MEMORY_MMAP:
  349. q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
  350. break;
  351. case V4L2_MEMORY_USERPTR:
  352. case V4L2_MEMORY_OVERLAY:
  353. case V4L2_MEMORY_DMABUF:
  354. /* nothing */
  355. break;
  356. }
  357. }
  358. if (!i)
  359. return -ENOMEM;
  360. dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
  361. return i;
  362. }
  363. EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
  364. int videobuf_mmap_setup(struct videobuf_queue *q,
  365. unsigned int bcount, unsigned int bsize,
  366. enum v4l2_memory memory)
  367. {
  368. int ret;
  369. videobuf_queue_lock(q);
  370. ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
  371. videobuf_queue_unlock(q);
  372. return ret;
  373. }
  374. EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
  375. int videobuf_reqbufs(struct videobuf_queue *q,
  376. struct v4l2_requestbuffers *req)
  377. {
  378. unsigned int size, count;
  379. int retval;
  380. if (req->memory != V4L2_MEMORY_MMAP &&
  381. req->memory != V4L2_MEMORY_USERPTR &&
  382. req->memory != V4L2_MEMORY_OVERLAY) {
  383. dprintk(1, "reqbufs: memory type invalid\n");
  384. return -EINVAL;
  385. }
  386. videobuf_queue_lock(q);
  387. if (req->type != q->type) {
  388. dprintk(1, "reqbufs: queue type invalid\n");
  389. retval = -EINVAL;
  390. goto done;
  391. }
  392. if (q->streaming) {
  393. dprintk(1, "reqbufs: streaming already exists\n");
  394. retval = -EBUSY;
  395. goto done;
  396. }
  397. if (!list_empty(&q->stream)) {
  398. dprintk(1, "reqbufs: stream running\n");
  399. retval = -EBUSY;
  400. goto done;
  401. }
  402. if (req->count == 0) {
  403. dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
  404. retval = __videobuf_free(q);
  405. goto done;
  406. }
  407. count = req->count;
  408. if (count > VIDEO_MAX_FRAME)
  409. count = VIDEO_MAX_FRAME;
  410. size = 0;
  411. q->ops->buf_setup(q, &count, &size);
  412. dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
  413. count, size,
  414. (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
  415. retval = __videobuf_mmap_setup(q, count, size, req->memory);
  416. if (retval < 0) {
  417. dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
  418. goto done;
  419. }
  420. req->count = retval;
  421. retval = 0;
  422. done:
  423. videobuf_queue_unlock(q);
  424. return retval;
  425. }
  426. EXPORT_SYMBOL_GPL(videobuf_reqbufs);
  427. int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
  428. {
  429. int ret = -EINVAL;
  430. videobuf_queue_lock(q);
  431. if (unlikely(b->type != q->type)) {
  432. dprintk(1, "querybuf: Wrong type.\n");
  433. goto done;
  434. }
  435. if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
  436. dprintk(1, "querybuf: index out of range.\n");
  437. goto done;
  438. }
  439. if (unlikely(NULL == q->bufs[b->index])) {
  440. dprintk(1, "querybuf: buffer is null.\n");
  441. goto done;
  442. }
  443. videobuf_status(q, b, q->bufs[b->index], q->type);
  444. ret = 0;
  445. done:
  446. videobuf_queue_unlock(q);
  447. return ret;
  448. }
  449. EXPORT_SYMBOL_GPL(videobuf_querybuf);
  450. int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
  451. {
  452. struct videobuf_buffer *buf;
  453. enum v4l2_field field;
  454. unsigned long flags = 0;
  455. int retval;
  456. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  457. if (b->memory == V4L2_MEMORY_MMAP)
  458. down_read(&current->mm->mmap_sem);
  459. videobuf_queue_lock(q);
  460. retval = -EBUSY;
  461. if (q->reading) {
  462. dprintk(1, "qbuf: Reading running...\n");
  463. goto done;
  464. }
  465. retval = -EINVAL;
  466. if (b->type != q->type) {
  467. dprintk(1, "qbuf: Wrong type.\n");
  468. goto done;
  469. }
  470. if (b->index >= VIDEO_MAX_FRAME) {
  471. dprintk(1, "qbuf: index out of range.\n");
  472. goto done;
  473. }
  474. buf = q->bufs[b->index];
  475. if (NULL == buf) {
  476. dprintk(1, "qbuf: buffer is null.\n");
  477. goto done;
  478. }
  479. MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
  480. if (buf->memory != b->memory) {
  481. dprintk(1, "qbuf: memory type is wrong.\n");
  482. goto done;
  483. }
  484. if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
  485. dprintk(1, "qbuf: buffer is already queued or active.\n");
  486. goto done;
  487. }
  488. switch (b->memory) {
  489. case V4L2_MEMORY_MMAP:
  490. if (0 == buf->baddr) {
  491. dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
  492. goto done;
  493. }
  494. if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
  495. || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
  496. || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
  497. || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
  498. buf->size = b->bytesused;
  499. buf->field = b->field;
  500. buf->ts = b->timestamp;
  501. }
  502. break;
  503. case V4L2_MEMORY_USERPTR:
  504. if (b->length < buf->bsize) {
  505. dprintk(1, "qbuf: buffer length is not enough\n");
  506. goto done;
  507. }
  508. if (VIDEOBUF_NEEDS_INIT != buf->state &&
  509. buf->baddr != b->m.userptr)
  510. q->ops->buf_release(q, buf);
  511. buf->baddr = b->m.userptr;
  512. break;
  513. case V4L2_MEMORY_OVERLAY:
  514. buf->boff = b->m.offset;
  515. break;
  516. default:
  517. dprintk(1, "qbuf: wrong memory type\n");
  518. goto done;
  519. }
  520. dprintk(1, "qbuf: requesting next field\n");
  521. field = videobuf_next_field(q);
  522. retval = q->ops->buf_prepare(q, buf, field);
  523. if (0 != retval) {
  524. dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
  525. goto done;
  526. }
  527. list_add_tail(&buf->stream, &q->stream);
  528. if (q->streaming) {
  529. spin_lock_irqsave(q->irqlock, flags);
  530. q->ops->buf_queue(q, buf);
  531. spin_unlock_irqrestore(q->irqlock, flags);
  532. }
  533. dprintk(1, "qbuf: succeeded\n");
  534. retval = 0;
  535. wake_up_interruptible_sync(&q->wait);
  536. done:
  537. videobuf_queue_unlock(q);
  538. if (b->memory == V4L2_MEMORY_MMAP)
  539. up_read(&current->mm->mmap_sem);
  540. return retval;
  541. }
  542. EXPORT_SYMBOL_GPL(videobuf_qbuf);
  543. /* Locking: Caller holds q->vb_lock */
  544. static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
  545. {
  546. int retval;
  547. checks:
  548. if (!q->streaming) {
  549. dprintk(1, "next_buffer: Not streaming\n");
  550. retval = -EINVAL;
  551. goto done;
  552. }
  553. if (list_empty(&q->stream)) {
  554. if (noblock) {
  555. retval = -EAGAIN;
  556. dprintk(2, "next_buffer: no buffers to dequeue\n");
  557. goto done;
  558. } else {
  559. dprintk(2, "next_buffer: waiting on buffer\n");
  560. /* Drop lock to avoid deadlock with qbuf */
  561. videobuf_queue_unlock(q);
  562. /* Checking list_empty and streaming is safe without
  563. * locks because we goto checks to validate while
  564. * holding locks before proceeding */
  565. retval = wait_event_interruptible(q->wait,
  566. !list_empty(&q->stream) || !q->streaming);
  567. videobuf_queue_lock(q);
  568. if (retval)
  569. goto done;
  570. goto checks;
  571. }
  572. }
  573. retval = 0;
  574. done:
  575. return retval;
  576. }
  577. /* Locking: Caller holds q->vb_lock */
  578. static int stream_next_buffer(struct videobuf_queue *q,
  579. struct videobuf_buffer **vb, int nonblocking)
  580. {
  581. int retval;
  582. struct videobuf_buffer *buf = NULL;
  583. retval = stream_next_buffer_check_queue(q, nonblocking);
  584. if (retval)
  585. goto done;
  586. buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
  587. retval = videobuf_waiton(q, buf, nonblocking, 1);
  588. if (retval < 0)
  589. goto done;
  590. *vb = buf;
  591. done:
  592. return retval;
  593. }
  594. int videobuf_dqbuf(struct videobuf_queue *q,
  595. struct v4l2_buffer *b, int nonblocking)
  596. {
  597. struct videobuf_buffer *buf = NULL;
  598. int retval;
  599. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  600. memset(b, 0, sizeof(*b));
  601. videobuf_queue_lock(q);
  602. retval = stream_next_buffer(q, &buf, nonblocking);
  603. if (retval < 0) {
  604. dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
  605. goto done;
  606. }
  607. switch (buf->state) {
  608. case VIDEOBUF_ERROR:
  609. dprintk(1, "dqbuf: state is error\n");
  610. break;
  611. case VIDEOBUF_DONE:
  612. dprintk(1, "dqbuf: state is done\n");
  613. break;
  614. default:
  615. dprintk(1, "dqbuf: state invalid\n");
  616. retval = -EINVAL;
  617. goto done;
  618. }
  619. CALL(q, sync, q, buf);
  620. videobuf_status(q, b, buf, q->type);
  621. list_del(&buf->stream);
  622. buf->state = VIDEOBUF_IDLE;
  623. b->flags &= ~V4L2_BUF_FLAG_DONE;
  624. done:
  625. videobuf_queue_unlock(q);
  626. return retval;
  627. }
  628. EXPORT_SYMBOL_GPL(videobuf_dqbuf);
  629. int videobuf_streamon(struct videobuf_queue *q)
  630. {
  631. struct videobuf_buffer *buf;
  632. unsigned long flags = 0;
  633. int retval;
  634. videobuf_queue_lock(q);
  635. retval = -EBUSY;
  636. if (q->reading)
  637. goto done;
  638. retval = 0;
  639. if (q->streaming)
  640. goto done;
  641. q->streaming = 1;
  642. spin_lock_irqsave(q->irqlock, flags);
  643. list_for_each_entry(buf, &q->stream, stream)
  644. if (buf->state == VIDEOBUF_PREPARED)
  645. q->ops->buf_queue(q, buf);
  646. spin_unlock_irqrestore(q->irqlock, flags);
  647. wake_up_interruptible_sync(&q->wait);
  648. done:
  649. videobuf_queue_unlock(q);
  650. return retval;
  651. }
  652. EXPORT_SYMBOL_GPL(videobuf_streamon);
  653. /* Locking: Caller holds q->vb_lock */
  654. static int __videobuf_streamoff(struct videobuf_queue *q)
  655. {
  656. if (!q->streaming)
  657. return -EINVAL;
  658. videobuf_queue_cancel(q);
  659. return 0;
  660. }
  661. int videobuf_streamoff(struct videobuf_queue *q)
  662. {
  663. int retval;
  664. videobuf_queue_lock(q);
  665. retval = __videobuf_streamoff(q);
  666. videobuf_queue_unlock(q);
  667. return retval;
  668. }
  669. EXPORT_SYMBOL_GPL(videobuf_streamoff);
  670. /* Locking: Caller holds q->vb_lock */
  671. static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
  672. char __user *data,
  673. size_t count, loff_t *ppos)
  674. {
  675. enum v4l2_field field;
  676. unsigned long flags = 0;
  677. int retval;
  678. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  679. /* setup stuff */
  680. q->read_buf = videobuf_alloc_vb(q);
  681. if (NULL == q->read_buf)
  682. return -ENOMEM;
  683. q->read_buf->memory = V4L2_MEMORY_USERPTR;
  684. q->read_buf->baddr = (unsigned long)data;
  685. q->read_buf->bsize = count;
  686. field = videobuf_next_field(q);
  687. retval = q->ops->buf_prepare(q, q->read_buf, field);
  688. if (0 != retval)
  689. goto done;
  690. /* start capture & wait */
  691. spin_lock_irqsave(q->irqlock, flags);
  692. q->ops->buf_queue(q, q->read_buf);
  693. spin_unlock_irqrestore(q->irqlock, flags);
  694. retval = videobuf_waiton(q, q->read_buf, 0, 0);
  695. if (0 == retval) {
  696. CALL(q, sync, q, q->read_buf);
  697. if (VIDEOBUF_ERROR == q->read_buf->state)
  698. retval = -EIO;
  699. else
  700. retval = q->read_buf->size;
  701. }
  702. done:
  703. /* cleanup */
  704. q->ops->buf_release(q, q->read_buf);
  705. kfree(q->read_buf);
  706. q->read_buf = NULL;
  707. return retval;
  708. }
  709. static int __videobuf_copy_to_user(struct videobuf_queue *q,
  710. struct videobuf_buffer *buf,
  711. char __user *data, size_t count,
  712. int nonblocking)
  713. {
  714. void *vaddr = CALLPTR(q, vaddr, buf);
  715. /* copy to userspace */
  716. if (count > buf->size - q->read_off)
  717. count = buf->size - q->read_off;
  718. if (copy_to_user(data, vaddr + q->read_off, count))
  719. return -EFAULT;
  720. return count;
  721. }
  722. static int __videobuf_copy_stream(struct videobuf_queue *q,
  723. struct videobuf_buffer *buf,
  724. char __user *data, size_t count, size_t pos,
  725. int vbihack, int nonblocking)
  726. {
  727. unsigned int *fc = CALLPTR(q, vaddr, buf);
  728. if (vbihack) {
  729. /* dirty, undocumented hack -- pass the frame counter
  730. * within the last four bytes of each vbi data block.
  731. * We need that one to maintain backward compatibility
  732. * to all vbi decoding software out there ... */
  733. fc += (buf->size >> 2) - 1;
  734. *fc = buf->field_count >> 1;
  735. dprintk(1, "vbihack: %d\n", *fc);
  736. }
  737. /* copy stuff using the common method */
  738. count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
  739. if ((count == -EFAULT) && (pos == 0))
  740. return -EFAULT;
  741. return count;
  742. }
  743. ssize_t videobuf_read_one(struct videobuf_queue *q,
  744. char __user *data, size_t count, loff_t *ppos,
  745. int nonblocking)
  746. {
  747. enum v4l2_field field;
  748. unsigned long flags = 0;
  749. unsigned size = 0, nbufs = 1;
  750. int retval;
  751. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  752. videobuf_queue_lock(q);
  753. q->ops->buf_setup(q, &nbufs, &size);
  754. if (NULL == q->read_buf &&
  755. count >= size &&
  756. !nonblocking) {
  757. retval = videobuf_read_zerocopy(q, data, count, ppos);
  758. if (retval >= 0 || retval == -EIO)
  759. /* ok, all done */
  760. goto done;
  761. /* fallback to kernel bounce buffer on failures */
  762. }
  763. if (NULL == q->read_buf) {
  764. /* need to capture a new frame */
  765. retval = -ENOMEM;
  766. q->read_buf = videobuf_alloc_vb(q);
  767. dprintk(1, "video alloc=0x%p\n", q->read_buf);
  768. if (NULL == q->read_buf)
  769. goto done;
  770. q->read_buf->memory = V4L2_MEMORY_USERPTR;
  771. q->read_buf->bsize = count; /* preferred size */
  772. field = videobuf_next_field(q);
  773. retval = q->ops->buf_prepare(q, q->read_buf, field);
  774. if (0 != retval) {
  775. kfree(q->read_buf);
  776. q->read_buf = NULL;
  777. goto done;
  778. }
  779. spin_lock_irqsave(q->irqlock, flags);
  780. q->ops->buf_queue(q, q->read_buf);
  781. spin_unlock_irqrestore(q->irqlock, flags);
  782. q->read_off = 0;
  783. }
  784. /* wait until capture is done */
  785. retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
  786. if (0 != retval)
  787. goto done;
  788. CALL(q, sync, q, q->read_buf);
  789. if (VIDEOBUF_ERROR == q->read_buf->state) {
  790. /* catch I/O errors */
  791. q->ops->buf_release(q, q->read_buf);
  792. kfree(q->read_buf);
  793. q->read_buf = NULL;
  794. retval = -EIO;
  795. goto done;
  796. }
  797. /* Copy to userspace */
  798. retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
  799. if (retval < 0)
  800. goto done;
  801. q->read_off += retval;
  802. if (q->read_off == q->read_buf->size) {
  803. /* all data copied, cleanup */
  804. q->ops->buf_release(q, q->read_buf);
  805. kfree(q->read_buf);
  806. q->read_buf = NULL;
  807. }
  808. done:
  809. videobuf_queue_unlock(q);
  810. return retval;
  811. }
  812. EXPORT_SYMBOL_GPL(videobuf_read_one);
  813. /* Locking: Caller holds q->vb_lock */
  814. static int __videobuf_read_start(struct videobuf_queue *q)
  815. {
  816. enum v4l2_field field;
  817. unsigned long flags = 0;
  818. unsigned int count = 0, size = 0;
  819. int err, i;
  820. q->ops->buf_setup(q, &count, &size);
  821. if (count < 2)
  822. count = 2;
  823. if (count > VIDEO_MAX_FRAME)
  824. count = VIDEO_MAX_FRAME;
  825. size = PAGE_ALIGN(size);
  826. err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
  827. if (err < 0)
  828. return err;
  829. count = err;
  830. for (i = 0; i < count; i++) {
  831. field = videobuf_next_field(q);
  832. err = q->ops->buf_prepare(q, q->bufs[i], field);
  833. if (err)
  834. return err;
  835. list_add_tail(&q->bufs[i]->stream, &q->stream);
  836. }
  837. spin_lock_irqsave(q->irqlock, flags);
  838. for (i = 0; i < count; i++)
  839. q->ops->buf_queue(q, q->bufs[i]);
  840. spin_unlock_irqrestore(q->irqlock, flags);
  841. q->reading = 1;
  842. return 0;
  843. }
  844. static void __videobuf_read_stop(struct videobuf_queue *q)
  845. {
  846. int i;
  847. videobuf_queue_cancel(q);
  848. __videobuf_free(q);
  849. INIT_LIST_HEAD(&q->stream);
  850. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  851. if (NULL == q->bufs[i])
  852. continue;
  853. kfree(q->bufs[i]);
  854. q->bufs[i] = NULL;
  855. }
  856. q->read_buf = NULL;
  857. }
  858. int videobuf_read_start(struct videobuf_queue *q)
  859. {
  860. int rc;
  861. videobuf_queue_lock(q);
  862. rc = __videobuf_read_start(q);
  863. videobuf_queue_unlock(q);
  864. return rc;
  865. }
  866. EXPORT_SYMBOL_GPL(videobuf_read_start);
  867. void videobuf_read_stop(struct videobuf_queue *q)
  868. {
  869. videobuf_queue_lock(q);
  870. __videobuf_read_stop(q);
  871. videobuf_queue_unlock(q);
  872. }
  873. EXPORT_SYMBOL_GPL(videobuf_read_stop);
  874. void videobuf_stop(struct videobuf_queue *q)
  875. {
  876. videobuf_queue_lock(q);
  877. if (q->streaming)
  878. __videobuf_streamoff(q);
  879. if (q->reading)
  880. __videobuf_read_stop(q);
  881. videobuf_queue_unlock(q);
  882. }
  883. EXPORT_SYMBOL_GPL(videobuf_stop);
  884. ssize_t videobuf_read_stream(struct videobuf_queue *q,
  885. char __user *data, size_t count, loff_t *ppos,
  886. int vbihack, int nonblocking)
  887. {
  888. int rc, retval;
  889. unsigned long flags = 0;
  890. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  891. dprintk(2, "%s\n", __func__);
  892. videobuf_queue_lock(q);
  893. retval = -EBUSY;
  894. if (q->streaming)
  895. goto done;
  896. if (!q->reading) {
  897. retval = __videobuf_read_start(q);
  898. if (retval < 0)
  899. goto done;
  900. }
  901. retval = 0;
  902. while (count > 0) {
  903. /* get / wait for data */
  904. if (NULL == q->read_buf) {
  905. q->read_buf = list_entry(q->stream.next,
  906. struct videobuf_buffer,
  907. stream);
  908. list_del(&q->read_buf->stream);
  909. q->read_off = 0;
  910. }
  911. rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
  912. if (rc < 0) {
  913. if (0 == retval)
  914. retval = rc;
  915. break;
  916. }
  917. if (q->read_buf->state == VIDEOBUF_DONE) {
  918. rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
  919. retval, vbihack, nonblocking);
  920. if (rc < 0) {
  921. retval = rc;
  922. break;
  923. }
  924. retval += rc;
  925. count -= rc;
  926. q->read_off += rc;
  927. } else {
  928. /* some error */
  929. q->read_off = q->read_buf->size;
  930. if (0 == retval)
  931. retval = -EIO;
  932. }
  933. /* requeue buffer when done with copying */
  934. if (q->read_off == q->read_buf->size) {
  935. list_add_tail(&q->read_buf->stream,
  936. &q->stream);
  937. spin_lock_irqsave(q->irqlock, flags);
  938. q->ops->buf_queue(q, q->read_buf);
  939. spin_unlock_irqrestore(q->irqlock, flags);
  940. q->read_buf = NULL;
  941. }
  942. if (retval < 0)
  943. break;
  944. }
  945. done:
  946. videobuf_queue_unlock(q);
  947. return retval;
  948. }
  949. EXPORT_SYMBOL_GPL(videobuf_read_stream);
  950. __poll_t videobuf_poll_stream(struct file *file,
  951. struct videobuf_queue *q,
  952. poll_table *wait)
  953. {
  954. __poll_t req_events = poll_requested_events(wait);
  955. struct videobuf_buffer *buf = NULL;
  956. __poll_t rc = 0;
  957. videobuf_queue_lock(q);
  958. if (q->streaming) {
  959. if (!list_empty(&q->stream))
  960. buf = list_entry(q->stream.next,
  961. struct videobuf_buffer, stream);
  962. } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
  963. if (!q->reading)
  964. __videobuf_read_start(q);
  965. if (!q->reading) {
  966. rc = EPOLLERR;
  967. } else if (NULL == q->read_buf) {
  968. q->read_buf = list_entry(q->stream.next,
  969. struct videobuf_buffer,
  970. stream);
  971. list_del(&q->read_buf->stream);
  972. q->read_off = 0;
  973. }
  974. buf = q->read_buf;
  975. }
  976. if (!buf)
  977. rc = EPOLLERR;
  978. if (0 == rc) {
  979. poll_wait(file, &buf->done, wait);
  980. if (buf->state == VIDEOBUF_DONE ||
  981. buf->state == VIDEOBUF_ERROR) {
  982. switch (q->type) {
  983. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  984. case V4L2_BUF_TYPE_VBI_OUTPUT:
  985. case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
  986. case V4L2_BUF_TYPE_SDR_OUTPUT:
  987. rc = EPOLLOUT | EPOLLWRNORM;
  988. break;
  989. default:
  990. rc = EPOLLIN | EPOLLRDNORM;
  991. break;
  992. }
  993. }
  994. }
  995. videobuf_queue_unlock(q);
  996. return rc;
  997. }
  998. EXPORT_SYMBOL_GPL(videobuf_poll_stream);
  999. int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
  1000. {
  1001. int rc = -EINVAL;
  1002. int i;
  1003. MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
  1004. if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
  1005. dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
  1006. return -EINVAL;
  1007. }
  1008. videobuf_queue_lock(q);
  1009. for (i = 0; i < VIDEO_MAX_FRAME; i++) {
  1010. struct videobuf_buffer *buf = q->bufs[i];
  1011. if (buf && buf->memory == V4L2_MEMORY_MMAP &&
  1012. buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
  1013. rc = CALL(q, mmap_mapper, q, buf, vma);
  1014. break;
  1015. }
  1016. }
  1017. videobuf_queue_unlock(q);
  1018. return rc;
  1019. }
  1020. EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);