v4l2-mem2mem.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * Memory-to-memory device framework for Video for Linux 2.
  3. *
  4. * Helper functions for devices that use memory buffers for both source
  5. * and destination.
  6. *
  7. * Copyright (c) 2009 Samsung Electronics Co., Ltd.
  8. * Pawel Osciak, <pawel@osciak.com>
  9. * Marek Szyprowski, <m.szyprowski@samsung.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by the
  13. * Free Software Foundation; either version 2 of the
  14. * License, or (at your option) any later version
  15. */
  16. #ifndef _MEDIA_V4L2_MEM2MEM_H
  17. #define _MEDIA_V4L2_MEM2MEM_H
  18. #include <media/videobuf2-v4l2.h>
  19. /**
  20. * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
  21. * @device_run: required. Begin the actual job (transaction) inside this
  22. * callback.
  23. * The job does NOT have to end before this callback returns
  24. * (and it will be the usual case). When the job finishes,
  25. * v4l2_m2m_job_finish() has to be called.
  26. * @job_ready: optional. Should return 0 if the driver does not have a job
  27. * fully prepared to run yet (i.e. it will not be able to finish a
  28. * transaction without sleeping). If not provided, it will be
  29. * assumed that one source and one destination buffer are all
  30. * that is required for the driver to perform one full transaction.
  31. * This method may not sleep.
  32. * @job_abort: required. Informs the driver that it has to abort the currently
  33. * running transaction as soon as possible (i.e. as soon as it can
  34. * stop the device safely; e.g. in the next interrupt handler),
  35. * even if the transaction would not have been finished by then.
  36. * After the driver performs the necessary steps, it has to call
  37. * v4l2_m2m_job_finish() (as if the transaction ended normally).
  38. * This function does not have to (and will usually not) wait
  39. * until the device enters a state when it can be stopped.
  40. * @lock: optional. Define a driver's own lock callback, instead of using
  41. * &v4l2_m2m_ctx->q_lock.
  42. * @unlock: optional. Define a driver's own unlock callback, instead of
  43. * using &v4l2_m2m_ctx->q_lock.
  44. */
  45. struct v4l2_m2m_ops {
  46. void (*device_run)(void *priv);
  47. int (*job_ready)(void *priv);
  48. void (*job_abort)(void *priv);
  49. void (*lock)(void *priv);
  50. void (*unlock)(void *priv);
  51. };
  52. struct v4l2_m2m_dev;
  53. /**
  54. * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
  55. * processed
  56. *
  57. * @q: pointer to struct &vb2_queue
  58. * @rdy_queue: List of V4L2 mem-to-mem queues
  59. * @rdy_spinlock: spin lock to protect the struct usage
  60. * @num_rdy: number of buffers ready to be processed
  61. * @buffered: is the queue buffered?
  62. *
  63. * Queue for buffers ready to be processed as soon as this
  64. * instance receives access to the device.
  65. */
  66. struct v4l2_m2m_queue_ctx {
  67. struct vb2_queue q;
  68. struct list_head rdy_queue;
  69. spinlock_t rdy_spinlock;
  70. u8 num_rdy;
  71. bool buffered;
  72. };
  73. /**
  74. * struct v4l2_m2m_ctx - Memory to memory context structure
  75. *
  76. * @q_lock: struct &mutex lock
  77. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  78. * @cap_q_ctx: Capture (output to memory) queue context
  79. * @out_q_ctx: Output (input from memory) queue context
  80. * @queue: List of memory to memory contexts
  81. * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
  82. * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
  83. * @finished: Wait queue used to signalize when a job queue finished.
  84. * @priv: Instance private data
  85. */
  86. struct v4l2_m2m_ctx {
  87. /* optional cap/out vb2 queues lock */
  88. struct mutex *q_lock;
  89. /* internal use only */
  90. struct v4l2_m2m_dev *m2m_dev;
  91. struct v4l2_m2m_queue_ctx cap_q_ctx;
  92. struct v4l2_m2m_queue_ctx out_q_ctx;
  93. /* For device job queue */
  94. struct list_head queue;
  95. unsigned long job_flags;
  96. wait_queue_head_t finished;
  97. void *priv;
  98. };
  99. /**
  100. * struct v4l2_m2m_buffer - Memory to memory buffer
  101. *
  102. * @vb: pointer to struct &vb2_v4l2_buffer
  103. * @list: list of m2m buffers
  104. */
  105. struct v4l2_m2m_buffer {
  106. struct vb2_v4l2_buffer vb;
  107. struct list_head list;
  108. };
  109. /**
  110. * v4l2_m2m_get_curr_priv() - return driver private data for the currently
  111. * running instance or NULL if no instance is running
  112. *
  113. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  114. */
  115. void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
  116. /**
  117. * v4l2_m2m_get_vq() - return vb2_queue for the given type
  118. *
  119. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  120. * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
  121. */
  122. struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
  123. enum v4l2_buf_type type);
  124. /**
  125. * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
  126. * the pending job queue and add it if so.
  127. *
  128. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  129. *
  130. * There are three basic requirements an instance has to meet to be able to run:
  131. * 1) at least one source buffer has to be queued,
  132. * 2) at least one destination buffer has to be queued,
  133. * 3) streaming has to be on.
  134. *
  135. * If a queue is buffered (for example a decoder hardware ringbuffer that has
  136. * to be drained before doing streamoff), allow scheduling without v4l2 buffers
  137. * on that queue.
  138. *
  139. * There may also be additional, custom requirements. In such case the driver
  140. * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
  141. * return 1 if the instance is ready.
  142. * An example of the above could be an instance that requires more than one
  143. * src/dst buffer per transaction.
  144. */
  145. void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
  146. /**
  147. * v4l2_m2m_job_finish() - inform the framework that a job has been finished
  148. * and have it clean up
  149. *
  150. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  151. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  152. *
  153. * Called by a driver to yield back the device after it has finished with it.
  154. * Should be called as soon as possible after reaching a state which allows
  155. * other instances to take control of the device.
  156. *
  157. * This function has to be called only after &v4l2_m2m_ops->device_run
  158. * callback has been called on the driver. To prevent recursion, it should
  159. * not be called directly from the &v4l2_m2m_ops->device_run callback though.
  160. */
  161. void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
  162. struct v4l2_m2m_ctx *m2m_ctx);
  163. static inline void
  164. v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
  165. {
  166. vb2_buffer_done(&buf->vb2_buf, state);
  167. }
  168. /**
  169. * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
  170. *
  171. * @file: pointer to struct &file
  172. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  173. * @reqbufs: pointer to struct &v4l2_requestbuffers
  174. */
  175. int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  176. struct v4l2_requestbuffers *reqbufs);
  177. /**
  178. * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
  179. *
  180. * @file: pointer to struct &file
  181. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  182. * @buf: pointer to struct &v4l2_buffer
  183. *
  184. * See v4l2_m2m_mmap() documentation for details.
  185. */
  186. int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  187. struct v4l2_buffer *buf);
  188. /**
  189. * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
  190. * the type
  191. *
  192. * @file: pointer to struct &file
  193. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  194. * @buf: pointer to struct &v4l2_buffer
  195. */
  196. int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  197. struct v4l2_buffer *buf);
  198. /**
  199. * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
  200. * the type
  201. *
  202. * @file: pointer to struct &file
  203. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  204. * @buf: pointer to struct &v4l2_buffer
  205. */
  206. int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  207. struct v4l2_buffer *buf);
  208. /**
  209. * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
  210. * the type
  211. *
  212. * @file: pointer to struct &file
  213. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  214. * @buf: pointer to struct &v4l2_buffer
  215. */
  216. int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  217. struct v4l2_buffer *buf);
  218. /**
  219. * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
  220. * on the type
  221. *
  222. * @file: pointer to struct &file
  223. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  224. * @create: pointer to struct &v4l2_create_buffers
  225. */
  226. int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  227. struct v4l2_create_buffers *create);
  228. /**
  229. * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
  230. * the type
  231. *
  232. * @file: pointer to struct &file
  233. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  234. * @eb: pointer to struct &v4l2_exportbuffer
  235. */
  236. int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  237. struct v4l2_exportbuffer *eb);
  238. /**
  239. * v4l2_m2m_streamon() - turn on streaming for a video queue
  240. *
  241. * @file: pointer to struct &file
  242. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  243. * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
  244. */
  245. int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  246. enum v4l2_buf_type type);
  247. /**
  248. * v4l2_m2m_streamoff() - turn off streaming for a video queue
  249. *
  250. * @file: pointer to struct &file
  251. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  252. * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
  253. */
  254. int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  255. enum v4l2_buf_type type);
  256. /**
  257. * v4l2_m2m_poll() - poll replacement, for destination buffers only
  258. *
  259. * @file: pointer to struct &file
  260. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  261. * @wait: pointer to struct &poll_table_struct
  262. *
  263. * Call from the driver's poll() function. Will poll both queues. If a buffer
  264. * is available to dequeue (with dqbuf) from the source queue, this will
  265. * indicate that a non-blocking write can be performed, while read will be
  266. * returned in case of the destination queue.
  267. */
  268. unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  269. struct poll_table_struct *wait);
  270. /**
  271. * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
  272. *
  273. * @file: pointer to struct &file
  274. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  275. * @vma: pointer to struct &vm_area_struct
  276. *
  277. * Call from driver's mmap() function. Will handle mmap() for both queues
  278. * seamlessly for videobuffer, which will receive normal per-queue offsets and
  279. * proper videobuf queue pointers. The differentiation is made outside videobuf
  280. * by adding a predefined offset to buffers from one of the queues and
  281. * subtracting it before passing it back to videobuf. Only drivers (and
  282. * thus applications) receive modified offsets.
  283. */
  284. int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  285. struct vm_area_struct *vma);
  286. /**
  287. * v4l2_m2m_init() - initialize per-driver m2m data
  288. *
  289. * @m2m_ops: pointer to struct v4l2_m2m_ops
  290. *
  291. * Usually called from driver's ``probe()`` function.
  292. *
  293. * Return: returns an opaque pointer to the internal data to handle M2M context
  294. */
  295. struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
  296. /**
  297. * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
  298. *
  299. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  300. *
  301. * Usually called from driver's ``remove()`` function.
  302. */
  303. void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
  304. /**
  305. * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
  306. *
  307. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  308. * @drv_priv: driver's instance private data
  309. * @queue_init: a callback for queue type-specific initialization function
  310. * to be used for initializing videobuf_queues
  311. *
  312. * Usually called from driver's ``open()`` function.
  313. */
  314. struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
  315. void *drv_priv,
  316. int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
  317. static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
  318. bool buffered)
  319. {
  320. m2m_ctx->out_q_ctx.buffered = buffered;
  321. }
  322. static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
  323. bool buffered)
  324. {
  325. m2m_ctx->cap_q_ctx.buffered = buffered;
  326. }
  327. /**
  328. * v4l2_m2m_ctx_release() - release m2m context
  329. *
  330. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  331. *
  332. * Usually called from driver's release() function.
  333. */
  334. void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
  335. /**
  336. * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
  337. *
  338. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  339. * @vbuf: pointer to struct &vb2_v4l2_buffer
  340. *
  341. * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
  342. */
  343. void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
  344. struct vb2_v4l2_buffer *vbuf);
  345. /**
  346. * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
  347. * use
  348. *
  349. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  350. */
  351. static inline
  352. unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
  353. {
  354. return m2m_ctx->out_q_ctx.num_rdy;
  355. }
  356. /**
  357. * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
  358. * ready for use
  359. *
  360. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  361. */
  362. static inline
  363. unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
  364. {
  365. return m2m_ctx->cap_q_ctx.num_rdy;
  366. }
  367. /**
  368. * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
  369. *
  370. * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
  371. */
  372. void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
  373. /**
  374. * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
  375. * buffers
  376. *
  377. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  378. */
  379. static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
  380. {
  381. return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
  382. }
  383. /**
  384. * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
  385. * ready buffers
  386. *
  387. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  388. */
  389. static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
  390. {
  391. return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
  392. }
  393. /**
  394. * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
  395. *
  396. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  397. */
  398. static inline
  399. struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
  400. {
  401. return &m2m_ctx->out_q_ctx.q;
  402. }
  403. /**
  404. * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
  405. *
  406. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  407. */
  408. static inline
  409. struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
  410. {
  411. return &m2m_ctx->cap_q_ctx.q;
  412. }
  413. /**
  414. * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
  415. * return it
  416. *
  417. * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
  418. */
  419. void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
  420. /**
  421. * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
  422. * buffers and return it
  423. *
  424. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  425. */
  426. static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
  427. {
  428. return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
  429. }
  430. /**
  431. * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
  432. * ready buffers and return it
  433. *
  434. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  435. */
  436. static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
  437. {
  438. return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
  439. }
  440. /* v4l2 ioctl helpers */
  441. int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
  442. struct v4l2_requestbuffers *rb);
  443. int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
  444. struct v4l2_create_buffers *create);
  445. int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
  446. struct v4l2_buffer *buf);
  447. int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
  448. struct v4l2_exportbuffer *eb);
  449. int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
  450. struct v4l2_buffer *buf);
  451. int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
  452. struct v4l2_buffer *buf);
  453. int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
  454. struct v4l2_buffer *buf);
  455. int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
  456. enum v4l2_buf_type type);
  457. int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
  458. enum v4l2_buf_type type);
  459. int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
  460. unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
  461. #endif /* _MEDIA_V4L2_MEM2MEM_H */