videobuf2-vmalloc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. /*
  2. * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/mm.h>
  15. #include <linux/refcount.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <media/videobuf2-v4l2.h>
  20. #include <media/videobuf2-vmalloc.h>
  21. #include <media/videobuf2-memops.h>
  22. struct vb2_vmalloc_buf {
  23. void *vaddr;
  24. struct frame_vector *vec;
  25. enum dma_data_direction dma_dir;
  26. unsigned long size;
  27. refcount_t refcount;
  28. struct vb2_vmarea_handler handler;
  29. struct dma_buf *dbuf;
  30. };
  31. static void vb2_vmalloc_put(void *buf_priv);
  32. static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
  33. unsigned long size, enum dma_data_direction dma_dir,
  34. gfp_t gfp_flags)
  35. {
  36. struct vb2_vmalloc_buf *buf;
  37. buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  38. if (!buf)
  39. return ERR_PTR(-ENOMEM);
  40. buf->size = size;
  41. buf->vaddr = vmalloc_user(buf->size);
  42. buf->dma_dir = dma_dir;
  43. buf->handler.refcount = &buf->refcount;
  44. buf->handler.put = vb2_vmalloc_put;
  45. buf->handler.arg = buf;
  46. if (!buf->vaddr) {
  47. pr_debug("vmalloc of size %ld failed\n", buf->size);
  48. kfree(buf);
  49. return ERR_PTR(-ENOMEM);
  50. }
  51. refcount_set(&buf->refcount, 1);
  52. return buf;
  53. }
  54. static void vb2_vmalloc_put(void *buf_priv)
  55. {
  56. struct vb2_vmalloc_buf *buf = buf_priv;
  57. if (refcount_dec_and_test(&buf->refcount)) {
  58. vfree(buf->vaddr);
  59. kfree(buf);
  60. }
  61. }
  62. static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
  63. unsigned long size,
  64. enum dma_data_direction dma_dir)
  65. {
  66. struct vb2_vmalloc_buf *buf;
  67. struct frame_vector *vec;
  68. int n_pages, offset, i;
  69. int ret = -ENOMEM;
  70. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  71. if (!buf)
  72. return ERR_PTR(-ENOMEM);
  73. buf->dma_dir = dma_dir;
  74. offset = vaddr & ~PAGE_MASK;
  75. buf->size = size;
  76. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
  77. dma_dir == DMA_BIDIRECTIONAL);
  78. if (IS_ERR(vec)) {
  79. ret = PTR_ERR(vec);
  80. goto fail_pfnvec_create;
  81. }
  82. buf->vec = vec;
  83. n_pages = frame_vector_count(vec);
  84. if (frame_vector_to_pages(vec) < 0) {
  85. unsigned long *nums = frame_vector_pfns(vec);
  86. /*
  87. * We cannot get page pointers for these pfns. Check memory is
  88. * physically contiguous and use direct mapping.
  89. */
  90. for (i = 1; i < n_pages; i++)
  91. if (nums[i-1] + 1 != nums[i])
  92. goto fail_map;
  93. buf->vaddr = (__force void *)
  94. ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
  95. } else {
  96. buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
  97. PAGE_KERNEL);
  98. }
  99. if (!buf->vaddr)
  100. goto fail_map;
  101. buf->vaddr += offset;
  102. return buf;
  103. fail_map:
  104. vb2_destroy_framevec(vec);
  105. fail_pfnvec_create:
  106. kfree(buf);
  107. return ERR_PTR(ret);
  108. }
  109. static void vb2_vmalloc_put_userptr(void *buf_priv)
  110. {
  111. struct vb2_vmalloc_buf *buf = buf_priv;
  112. unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
  113. unsigned int i;
  114. struct page **pages;
  115. unsigned int n_pages;
  116. if (!buf->vec->is_pfns) {
  117. n_pages = frame_vector_count(buf->vec);
  118. pages = frame_vector_pages(buf->vec);
  119. if (vaddr)
  120. vm_unmap_ram((void *)vaddr, n_pages);
  121. if (buf->dma_dir == DMA_FROM_DEVICE ||
  122. buf->dma_dir == DMA_BIDIRECTIONAL)
  123. for (i = 0; i < n_pages; i++)
  124. set_page_dirty_lock(pages[i]);
  125. } else {
  126. iounmap((__force void __iomem *)buf->vaddr);
  127. }
  128. vb2_destroy_framevec(buf->vec);
  129. kfree(buf);
  130. }
  131. static void *vb2_vmalloc_vaddr(void *buf_priv)
  132. {
  133. struct vb2_vmalloc_buf *buf = buf_priv;
  134. if (!buf->vaddr) {
  135. pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
  136. return NULL;
  137. }
  138. return buf->vaddr;
  139. }
  140. static unsigned int vb2_vmalloc_num_users(void *buf_priv)
  141. {
  142. struct vb2_vmalloc_buf *buf = buf_priv;
  143. return refcount_read(&buf->refcount);
  144. }
  145. static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
  146. {
  147. struct vb2_vmalloc_buf *buf = buf_priv;
  148. int ret;
  149. if (!buf) {
  150. pr_err("No memory to map\n");
  151. return -EINVAL;
  152. }
  153. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  154. if (ret) {
  155. pr_err("Remapping vmalloc memory, error: %d\n", ret);
  156. return ret;
  157. }
  158. /*
  159. * Make sure that vm_areas for 2 buffers won't be merged together
  160. */
  161. vma->vm_flags |= VM_DONTEXPAND;
  162. /*
  163. * Use common vm_area operations to track buffer refcount.
  164. */
  165. vma->vm_private_data = &buf->handler;
  166. vma->vm_ops = &vb2_common_vm_ops;
  167. vma->vm_ops->open(vma);
  168. return 0;
  169. }
  170. #ifdef CONFIG_HAS_DMA
  171. /*********************************************/
  172. /* DMABUF ops for exporters */
  173. /*********************************************/
  174. struct vb2_vmalloc_attachment {
  175. struct sg_table sgt;
  176. enum dma_data_direction dma_dir;
  177. };
  178. static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
  179. struct dma_buf_attachment *dbuf_attach)
  180. {
  181. struct vb2_vmalloc_attachment *attach;
  182. struct vb2_vmalloc_buf *buf = dbuf->priv;
  183. int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
  184. struct sg_table *sgt;
  185. struct scatterlist *sg;
  186. void *vaddr = buf->vaddr;
  187. int ret;
  188. int i;
  189. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  190. if (!attach)
  191. return -ENOMEM;
  192. sgt = &attach->sgt;
  193. ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
  194. if (ret) {
  195. kfree(attach);
  196. return ret;
  197. }
  198. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  199. struct page *page = vmalloc_to_page(vaddr);
  200. if (!page) {
  201. sg_free_table(sgt);
  202. kfree(attach);
  203. return -ENOMEM;
  204. }
  205. sg_set_page(sg, page, PAGE_SIZE, 0);
  206. vaddr += PAGE_SIZE;
  207. }
  208. attach->dma_dir = DMA_NONE;
  209. dbuf_attach->priv = attach;
  210. return 0;
  211. }
  212. static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
  213. struct dma_buf_attachment *db_attach)
  214. {
  215. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  216. struct sg_table *sgt;
  217. if (!attach)
  218. return;
  219. sgt = &attach->sgt;
  220. /* release the scatterlist cache */
  221. if (attach->dma_dir != DMA_NONE)
  222. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  223. attach->dma_dir);
  224. sg_free_table(sgt);
  225. kfree(attach);
  226. db_attach->priv = NULL;
  227. }
  228. static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
  229. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  230. {
  231. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  232. /* stealing dmabuf mutex to serialize map/unmap operations */
  233. struct mutex *lock = &db_attach->dmabuf->lock;
  234. struct sg_table *sgt;
  235. mutex_lock(lock);
  236. sgt = &attach->sgt;
  237. /* return previously mapped sg table */
  238. if (attach->dma_dir == dma_dir) {
  239. mutex_unlock(lock);
  240. return sgt;
  241. }
  242. /* release any previous cache */
  243. if (attach->dma_dir != DMA_NONE) {
  244. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  245. attach->dma_dir);
  246. attach->dma_dir = DMA_NONE;
  247. }
  248. /* mapping to the client with new direction */
  249. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  250. dma_dir);
  251. if (!sgt->nents) {
  252. pr_err("failed to map scatterlist\n");
  253. mutex_unlock(lock);
  254. return ERR_PTR(-EIO);
  255. }
  256. attach->dma_dir = dma_dir;
  257. mutex_unlock(lock);
  258. return sgt;
  259. }
  260. static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  261. struct sg_table *sgt, enum dma_data_direction dma_dir)
  262. {
  263. /* nothing to be done here */
  264. }
  265. static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
  266. {
  267. /* drop reference obtained in vb2_vmalloc_get_dmabuf */
  268. vb2_vmalloc_put(dbuf->priv);
  269. }
  270. static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  271. {
  272. struct vb2_vmalloc_buf *buf = dbuf->priv;
  273. return buf->vaddr + pgnum * PAGE_SIZE;
  274. }
  275. static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  276. {
  277. struct vb2_vmalloc_buf *buf = dbuf->priv;
  278. return buf->vaddr;
  279. }
  280. static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  281. struct vm_area_struct *vma)
  282. {
  283. return vb2_vmalloc_mmap(dbuf->priv, vma);
  284. }
  285. static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
  286. .attach = vb2_vmalloc_dmabuf_ops_attach,
  287. .detach = vb2_vmalloc_dmabuf_ops_detach,
  288. .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
  289. .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
  290. .map = vb2_vmalloc_dmabuf_ops_kmap,
  291. .vmap = vb2_vmalloc_dmabuf_ops_vmap,
  292. .mmap = vb2_vmalloc_dmabuf_ops_mmap,
  293. .release = vb2_vmalloc_dmabuf_ops_release,
  294. };
  295. static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
  296. {
  297. struct vb2_vmalloc_buf *buf = buf_priv;
  298. struct dma_buf *dbuf;
  299. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  300. exp_info.ops = &vb2_vmalloc_dmabuf_ops;
  301. exp_info.size = buf->size;
  302. exp_info.flags = flags;
  303. exp_info.priv = buf;
  304. if (WARN_ON(!buf->vaddr))
  305. return NULL;
  306. dbuf = dma_buf_export(&exp_info);
  307. if (IS_ERR(dbuf))
  308. return NULL;
  309. /* dmabuf keeps reference to vb2 buffer */
  310. refcount_inc(&buf->refcount);
  311. return dbuf;
  312. }
  313. #endif /* CONFIG_HAS_DMA */
  314. /*********************************************/
  315. /* callbacks for DMABUF buffers */
  316. /*********************************************/
  317. static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  318. {
  319. struct vb2_vmalloc_buf *buf = mem_priv;
  320. buf->vaddr = dma_buf_vmap(buf->dbuf);
  321. return buf->vaddr ? 0 : -EFAULT;
  322. }
  323. static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  324. {
  325. struct vb2_vmalloc_buf *buf = mem_priv;
  326. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  327. buf->vaddr = NULL;
  328. }
  329. static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  330. {
  331. struct vb2_vmalloc_buf *buf = mem_priv;
  332. if (buf->vaddr)
  333. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  334. kfree(buf);
  335. }
  336. static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  337. unsigned long size, enum dma_data_direction dma_dir)
  338. {
  339. struct vb2_vmalloc_buf *buf;
  340. if (dbuf->size < size)
  341. return ERR_PTR(-EFAULT);
  342. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  343. if (!buf)
  344. return ERR_PTR(-ENOMEM);
  345. buf->dbuf = dbuf;
  346. buf->dma_dir = dma_dir;
  347. buf->size = size;
  348. return buf;
  349. }
  350. const struct vb2_mem_ops vb2_vmalloc_memops = {
  351. .alloc = vb2_vmalloc_alloc,
  352. .put = vb2_vmalloc_put,
  353. .get_userptr = vb2_vmalloc_get_userptr,
  354. .put_userptr = vb2_vmalloc_put_userptr,
  355. #ifdef CONFIG_HAS_DMA
  356. .get_dmabuf = vb2_vmalloc_get_dmabuf,
  357. #endif
  358. .map_dmabuf = vb2_vmalloc_map_dmabuf,
  359. .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
  360. .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
  361. .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
  362. .vaddr = vb2_vmalloc_vaddr,
  363. .mmap = vb2_vmalloc_mmap,
  364. .num_users = vb2_vmalloc_num_users,
  365. };
  366. EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
  367. MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
  368. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  369. MODULE_LICENSE("GPL");