v4l2-mem2mem.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * Memory-to-memory device framework for Video for Linux 2.
  3. *
  4. * Helper functions for devices that use memory buffers for both source
  5. * and destination.
  6. *
  7. * Copyright (c) 2009 Samsung Electronics Co., Ltd.
  8. * Pawel Osciak, <pawel@osciak.com>
  9. * Marek Szyprowski, <m.szyprowski@samsung.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by the
  13. * Free Software Foundation; either version 2 of the
  14. * License, or (at your option) any later version
  15. */
  16. #ifndef _MEDIA_V4L2_MEM2MEM_H
  17. #define _MEDIA_V4L2_MEM2MEM_H
  18. #include <media/videobuf2-v4l2.h>
  19. /**
  20. * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
  21. * @device_run: required. Begin the actual job (transaction) inside this
  22. * callback.
  23. * The job does NOT have to end before this callback returns
  24. * (and it will be the usual case). When the job finishes,
  25. * v4l2_m2m_job_finish() has to be called.
  26. * @job_ready: optional. Should return 0 if the driver does not have a job
  27. * fully prepared to run yet (i.e. it will not be able to finish a
  28. * transaction without sleeping). If not provided, it will be
  29. * assumed that one source and one destination buffer are all
  30. * that is required for the driver to perform one full transaction.
  31. * This method may not sleep.
  32. * @job_abort: optional. Informs the driver that it has to abort the currently
  33. * running transaction as soon as possible (i.e. as soon as it can
  34. * stop the device safely; e.g. in the next interrupt handler),
  35. * even if the transaction would not have been finished by then.
  36. * After the driver performs the necessary steps, it has to call
  37. * v4l2_m2m_job_finish() (as if the transaction ended normally).
  38. * This function does not have to (and will usually not) wait
  39. * until the device enters a state when it can be stopped.
  40. */
  41. struct v4l2_m2m_ops {
  42. void (*device_run)(void *priv);
  43. int (*job_ready)(void *priv);
  44. void (*job_abort)(void *priv);
  45. };
  46. struct video_device;
  47. struct v4l2_m2m_dev;
  48. /**
  49. * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
  50. * processed
  51. *
  52. * @q: pointer to struct &vb2_queue
  53. * @rdy_queue: List of V4L2 mem-to-mem queues
  54. * @rdy_spinlock: spin lock to protect the struct usage
  55. * @num_rdy: number of buffers ready to be processed
  56. * @buffered: is the queue buffered?
  57. *
  58. * Queue for buffers ready to be processed as soon as this
  59. * instance receives access to the device.
  60. */
  61. struct v4l2_m2m_queue_ctx {
  62. struct vb2_queue q;
  63. struct list_head rdy_queue;
  64. spinlock_t rdy_spinlock;
  65. u8 num_rdy;
  66. bool buffered;
  67. };
  68. /**
  69. * struct v4l2_m2m_ctx - Memory to memory context structure
  70. *
  71. * @q_lock: struct &mutex lock
  72. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  73. * @cap_q_ctx: Capture (output to memory) queue context
  74. * @out_q_ctx: Output (input from memory) queue context
  75. * @queue: List of memory to memory contexts
  76. * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
  77. * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
  78. * @finished: Wait queue used to signalize when a job queue finished.
  79. * @priv: Instance private data
  80. *
  81. * The memory to memory context is specific to a file handle, NOT to e.g.
  82. * a device.
  83. */
  84. struct v4l2_m2m_ctx {
  85. /* optional cap/out vb2 queues lock */
  86. struct mutex *q_lock;
  87. /* internal use only */
  88. struct v4l2_m2m_dev *m2m_dev;
  89. struct v4l2_m2m_queue_ctx cap_q_ctx;
  90. struct v4l2_m2m_queue_ctx out_q_ctx;
  91. /* For device job queue */
  92. struct list_head queue;
  93. unsigned long job_flags;
  94. wait_queue_head_t finished;
  95. void *priv;
  96. };
  97. /**
  98. * struct v4l2_m2m_buffer - Memory to memory buffer
  99. *
  100. * @vb: pointer to struct &vb2_v4l2_buffer
  101. * @list: list of m2m buffers
  102. */
  103. struct v4l2_m2m_buffer {
  104. struct vb2_v4l2_buffer vb;
  105. struct list_head list;
  106. };
  107. /**
  108. * v4l2_m2m_get_curr_priv() - return driver private data for the currently
  109. * running instance or NULL if no instance is running
  110. *
  111. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  112. */
  113. void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
  114. /**
  115. * v4l2_m2m_get_vq() - return vb2_queue for the given type
  116. *
  117. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  118. * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
  119. */
  120. struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
  121. enum v4l2_buf_type type);
  122. /**
  123. * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
  124. * the pending job queue and add it if so.
  125. *
  126. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  127. *
  128. * There are three basic requirements an instance has to meet to be able to run:
  129. * 1) at least one source buffer has to be queued,
  130. * 2) at least one destination buffer has to be queued,
  131. * 3) streaming has to be on.
  132. *
  133. * If a queue is buffered (for example a decoder hardware ringbuffer that has
  134. * to be drained before doing streamoff), allow scheduling without v4l2 buffers
  135. * on that queue.
  136. *
  137. * There may also be additional, custom requirements. In such case the driver
  138. * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
  139. * return 1 if the instance is ready.
  140. * An example of the above could be an instance that requires more than one
  141. * src/dst buffer per transaction.
  142. */
  143. void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
  144. /**
  145. * v4l2_m2m_job_finish() - inform the framework that a job has been finished
  146. * and have it clean up
  147. *
  148. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  149. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  150. *
  151. * Called by a driver to yield back the device after it has finished with it.
  152. * Should be called as soon as possible after reaching a state which allows
  153. * other instances to take control of the device.
  154. *
  155. * This function has to be called only after &v4l2_m2m_ops->device_run
  156. * callback has been called on the driver. To prevent recursion, it should
  157. * not be called directly from the &v4l2_m2m_ops->device_run callback though.
  158. */
  159. void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
  160. struct v4l2_m2m_ctx *m2m_ctx);
  161. static inline void
  162. v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
  163. {
  164. vb2_buffer_done(&buf->vb2_buf, state);
  165. }
  166. /**
  167. * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
  168. *
  169. * @file: pointer to struct &file
  170. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  171. * @reqbufs: pointer to struct &v4l2_requestbuffers
  172. */
  173. int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  174. struct v4l2_requestbuffers *reqbufs);
  175. /**
  176. * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
  177. *
  178. * @file: pointer to struct &file
  179. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  180. * @buf: pointer to struct &v4l2_buffer
  181. *
  182. * See v4l2_m2m_mmap() documentation for details.
  183. */
  184. int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  185. struct v4l2_buffer *buf);
  186. /**
  187. * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
  188. * the type
  189. *
  190. * @file: pointer to struct &file
  191. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  192. * @buf: pointer to struct &v4l2_buffer
  193. */
  194. int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  195. struct v4l2_buffer *buf);
  196. /**
  197. * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
  198. * the type
  199. *
  200. * @file: pointer to struct &file
  201. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  202. * @buf: pointer to struct &v4l2_buffer
  203. */
  204. int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  205. struct v4l2_buffer *buf);
  206. /**
  207. * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
  208. * the type
  209. *
  210. * @file: pointer to struct &file
  211. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  212. * @buf: pointer to struct &v4l2_buffer
  213. */
  214. int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  215. struct v4l2_buffer *buf);
  216. /**
  217. * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
  218. * on the type
  219. *
  220. * @file: pointer to struct &file
  221. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  222. * @create: pointer to struct &v4l2_create_buffers
  223. */
  224. int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  225. struct v4l2_create_buffers *create);
  226. /**
  227. * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
  228. * the type
  229. *
  230. * @file: pointer to struct &file
  231. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  232. * @eb: pointer to struct &v4l2_exportbuffer
  233. */
  234. int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  235. struct v4l2_exportbuffer *eb);
  236. /**
  237. * v4l2_m2m_streamon() - turn on streaming for a video queue
  238. *
  239. * @file: pointer to struct &file
  240. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  241. * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
  242. */
  243. int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  244. enum v4l2_buf_type type);
  245. /**
  246. * v4l2_m2m_streamoff() - turn off streaming for a video queue
  247. *
  248. * @file: pointer to struct &file
  249. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  250. * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
  251. */
  252. int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  253. enum v4l2_buf_type type);
  254. /**
  255. * v4l2_m2m_poll() - poll replacement, for destination buffers only
  256. *
  257. * @file: pointer to struct &file
  258. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  259. * @wait: pointer to struct &poll_table_struct
  260. *
  261. * Call from the driver's poll() function. Will poll both queues. If a buffer
  262. * is available to dequeue (with dqbuf) from the source queue, this will
  263. * indicate that a non-blocking write can be performed, while read will be
  264. * returned in case of the destination queue.
  265. */
  266. __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  267. struct poll_table_struct *wait);
  268. /**
  269. * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
  270. *
  271. * @file: pointer to struct &file
  272. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  273. * @vma: pointer to struct &vm_area_struct
  274. *
  275. * Call from driver's mmap() function. Will handle mmap() for both queues
  276. * seamlessly for videobuffer, which will receive normal per-queue offsets and
  277. * proper videobuf queue pointers. The differentiation is made outside videobuf
  278. * by adding a predefined offset to buffers from one of the queues and
  279. * subtracting it before passing it back to videobuf. Only drivers (and
  280. * thus applications) receive modified offsets.
  281. */
  282. int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
  283. struct vm_area_struct *vma);
  284. /**
  285. * v4l2_m2m_init() - initialize per-driver m2m data
  286. *
  287. * @m2m_ops: pointer to struct v4l2_m2m_ops
  288. *
  289. * Usually called from driver's ``probe()`` function.
  290. *
  291. * Return: returns an opaque pointer to the internal data to handle M2M context
  292. */
  293. struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
  294. #if defined(CONFIG_MEDIA_CONTROLLER)
  295. void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
  296. int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
  297. struct video_device *vdev, int function);
  298. #else
  299. static inline void
  300. v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
  301. {
  302. }
  303. static inline int
  304. v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
  305. struct video_device *vdev, int function)
  306. {
  307. return 0;
  308. }
  309. #endif
  310. /**
  311. * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
  312. *
  313. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  314. *
  315. * Usually called from driver's ``remove()`` function.
  316. */
  317. void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
  318. /**
  319. * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
  320. *
  321. * @m2m_dev: opaque pointer to the internal data to handle M2M context
  322. * @drv_priv: driver's instance private data
  323. * @queue_init: a callback for queue type-specific initialization function
  324. * to be used for initializing videobuf_queues
  325. *
  326. * Usually called from driver's ``open()`` function.
  327. */
  328. struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
  329. void *drv_priv,
  330. int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
  331. static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
  332. bool buffered)
  333. {
  334. m2m_ctx->out_q_ctx.buffered = buffered;
  335. }
  336. static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
  337. bool buffered)
  338. {
  339. m2m_ctx->cap_q_ctx.buffered = buffered;
  340. }
  341. /**
  342. * v4l2_m2m_ctx_release() - release m2m context
  343. *
  344. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  345. *
  346. * Usually called from driver's release() function.
  347. */
  348. void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
  349. /**
  350. * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
  351. *
  352. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  353. * @vbuf: pointer to struct &vb2_v4l2_buffer
  354. *
  355. * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
  356. */
  357. void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
  358. struct vb2_v4l2_buffer *vbuf);
  359. /**
  360. * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
  361. * use
  362. *
  363. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  364. */
  365. static inline
  366. unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
  367. {
  368. return m2m_ctx->out_q_ctx.num_rdy;
  369. }
  370. /**
  371. * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
  372. * ready for use
  373. *
  374. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  375. */
  376. static inline
  377. unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
  378. {
  379. return m2m_ctx->cap_q_ctx.num_rdy;
  380. }
  381. /**
  382. * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
  383. *
  384. * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
  385. */
  386. void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
  387. /**
  388. * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
  389. * buffers
  390. *
  391. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  392. */
  393. static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
  394. {
  395. return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
  396. }
  397. /**
  398. * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
  399. * ready buffers
  400. *
  401. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  402. */
  403. static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
  404. {
  405. return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
  406. }
  407. /**
  408. * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
  409. *
  410. * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
  411. */
  412. void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
  413. /**
  414. * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
  415. * ready buffers
  416. *
  417. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  418. */
  419. static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
  420. {
  421. return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
  422. }
  423. /**
  424. * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
  425. * ready buffers
  426. *
  427. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  428. */
  429. static inline void *v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
  430. {
  431. return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
  432. }
  433. /**
  434. * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
  435. * buffers
  436. *
  437. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  438. * @b: current buffer of type struct v4l2_m2m_buffer
  439. */
  440. #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \
  441. list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
  442. /**
  443. * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
  444. *
  445. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  446. * @b: current buffer of type struct v4l2_m2m_buffer
  447. */
  448. #define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \
  449. list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
  450. /**
  451. * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
  452. * buffers safely
  453. *
  454. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  455. * @b: current buffer of type struct v4l2_m2m_buffer
  456. * @n: used as temporary storage
  457. */
  458. #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \
  459. list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
  460. /**
  461. * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
  462. * buffers safely
  463. *
  464. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  465. * @b: current buffer of type struct v4l2_m2m_buffer
  466. * @n: used as temporary storage
  467. */
  468. #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \
  469. list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
  470. /**
  471. * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
  472. *
  473. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  474. */
  475. static inline
  476. struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
  477. {
  478. return &m2m_ctx->out_q_ctx.q;
  479. }
  480. /**
  481. * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
  482. *
  483. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  484. */
  485. static inline
  486. struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
  487. {
  488. return &m2m_ctx->cap_q_ctx.q;
  489. }
  490. /**
  491. * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
  492. * return it
  493. *
  494. * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
  495. */
  496. void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
  497. /**
  498. * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
  499. * buffers and return it
  500. *
  501. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  502. */
  503. static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
  504. {
  505. return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
  506. }
  507. /**
  508. * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
  509. * ready buffers and return it
  510. *
  511. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  512. */
  513. static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
  514. {
  515. return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
  516. }
  517. /**
  518. * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
  519. * buffers
  520. *
  521. * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
  522. * @vbuf: the buffer to be removed
  523. */
  524. void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
  525. struct vb2_v4l2_buffer *vbuf);
  526. /**
  527. * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
  528. * of ready buffers
  529. *
  530. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  531. * @vbuf: the buffer to be removed
  532. */
  533. static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
  534. struct vb2_v4l2_buffer *vbuf)
  535. {
  536. v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
  537. }
  538. /**
  539. * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
  540. * list of ready buffers
  541. *
  542. * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  543. * @vbuf: the buffer to be removed
  544. */
  545. static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
  546. struct vb2_v4l2_buffer *vbuf)
  547. {
  548. v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
  549. }
  550. struct vb2_v4l2_buffer *
  551. v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
  552. static inline struct vb2_v4l2_buffer *
  553. v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
  554. {
  555. return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
  556. }
  557. static inline struct vb2_v4l2_buffer *
  558. v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
  559. {
  560. return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
  561. }
  562. /* v4l2 ioctl helpers */
  563. int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
  564. struct v4l2_requestbuffers *rb);
  565. int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
  566. struct v4l2_create_buffers *create);
  567. int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
  568. struct v4l2_buffer *buf);
  569. int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
  570. struct v4l2_exportbuffer *eb);
  571. int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
  572. struct v4l2_buffer *buf);
  573. int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
  574. struct v4l2_buffer *buf);
  575. int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
  576. struct v4l2_buffer *buf);
  577. int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
  578. enum v4l2_buf_type type);
  579. int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
  580. enum v4l2_buf_type type);
  581. int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
  582. __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
  583. #endif /* _MEDIA_V4L2_MEM2MEM_H */