videobuf2-vmalloc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-v4l2.h>
  19. #include <media/videobuf2-vmalloc.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_vmalloc_buf {
  22. void *vaddr;
  23. struct frame_vector *vec;
  24. enum dma_data_direction dma_dir;
  25. unsigned long size;
  26. atomic_t refcount;
  27. struct vb2_vmarea_handler handler;
  28. struct dma_buf *dbuf;
  29. };
  30. static void vb2_vmalloc_put(void *buf_priv);
  31. static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
  32. unsigned long size, enum dma_data_direction dma_dir,
  33. gfp_t gfp_flags)
  34. {
  35. struct vb2_vmalloc_buf *buf;
  36. buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  37. if (!buf)
  38. return ERR_PTR(-ENOMEM);
  39. buf->size = size;
  40. buf->vaddr = vmalloc_user(buf->size);
  41. buf->dma_dir = dma_dir;
  42. buf->handler.refcount = &buf->refcount;
  43. buf->handler.put = vb2_vmalloc_put;
  44. buf->handler.arg = buf;
  45. if (!buf->vaddr) {
  46. pr_debug("vmalloc of size %ld failed\n", buf->size);
  47. kfree(buf);
  48. return ERR_PTR(-ENOMEM);
  49. }
  50. atomic_inc(&buf->refcount);
  51. return buf;
  52. }
  53. static void vb2_vmalloc_put(void *buf_priv)
  54. {
  55. struct vb2_vmalloc_buf *buf = buf_priv;
  56. if (atomic_dec_and_test(&buf->refcount)) {
  57. vfree(buf->vaddr);
  58. kfree(buf);
  59. }
  60. }
  61. static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
  62. unsigned long size,
  63. enum dma_data_direction dma_dir)
  64. {
  65. struct vb2_vmalloc_buf *buf;
  66. struct frame_vector *vec;
  67. int n_pages, offset, i;
  68. int ret = -ENOMEM;
  69. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  70. if (!buf)
  71. return ERR_PTR(-ENOMEM);
  72. buf->dma_dir = dma_dir;
  73. offset = vaddr & ~PAGE_MASK;
  74. buf->size = size;
  75. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  76. if (IS_ERR(vec)) {
  77. ret = PTR_ERR(vec);
  78. goto fail_pfnvec_create;
  79. }
  80. buf->vec = vec;
  81. n_pages = frame_vector_count(vec);
  82. if (frame_vector_to_pages(vec) < 0) {
  83. unsigned long *nums = frame_vector_pfns(vec);
  84. /*
  85. * We cannot get page pointers for these pfns. Check memory is
  86. * physically contiguous and use direct mapping.
  87. */
  88. for (i = 1; i < n_pages; i++)
  89. if (nums[i-1] + 1 != nums[i])
  90. goto fail_map;
  91. buf->vaddr = (__force void *)
  92. ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
  93. } else {
  94. buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
  95. PAGE_KERNEL);
  96. }
  97. if (!buf->vaddr)
  98. goto fail_map;
  99. buf->vaddr += offset;
  100. return buf;
  101. fail_map:
  102. vb2_destroy_framevec(vec);
  103. fail_pfnvec_create:
  104. kfree(buf);
  105. return ERR_PTR(ret);
  106. }
  107. static void vb2_vmalloc_put_userptr(void *buf_priv)
  108. {
  109. struct vb2_vmalloc_buf *buf = buf_priv;
  110. unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
  111. unsigned int i;
  112. struct page **pages;
  113. unsigned int n_pages;
  114. if (!buf->vec->is_pfns) {
  115. n_pages = frame_vector_count(buf->vec);
  116. pages = frame_vector_pages(buf->vec);
  117. if (vaddr)
  118. vm_unmap_ram((void *)vaddr, n_pages);
  119. if (buf->dma_dir == DMA_FROM_DEVICE)
  120. for (i = 0; i < n_pages; i++)
  121. set_page_dirty_lock(pages[i]);
  122. } else {
  123. iounmap((__force void __iomem *)buf->vaddr);
  124. }
  125. vb2_destroy_framevec(buf->vec);
  126. kfree(buf);
  127. }
  128. static void *vb2_vmalloc_vaddr(void *buf_priv)
  129. {
  130. struct vb2_vmalloc_buf *buf = buf_priv;
  131. if (!buf->vaddr) {
  132. pr_err("Address of an unallocated plane requested "
  133. "or cannot map user pointer\n");
  134. return NULL;
  135. }
  136. return buf->vaddr;
  137. }
  138. static unsigned int vb2_vmalloc_num_users(void *buf_priv)
  139. {
  140. struct vb2_vmalloc_buf *buf = buf_priv;
  141. return atomic_read(&buf->refcount);
  142. }
  143. static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
  144. {
  145. struct vb2_vmalloc_buf *buf = buf_priv;
  146. int ret;
  147. if (!buf) {
  148. pr_err("No memory to map\n");
  149. return -EINVAL;
  150. }
  151. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  152. if (ret) {
  153. pr_err("Remapping vmalloc memory, error: %d\n", ret);
  154. return ret;
  155. }
  156. /*
  157. * Make sure that vm_areas for 2 buffers won't be merged together
  158. */
  159. vma->vm_flags |= VM_DONTEXPAND;
  160. /*
  161. * Use common vm_area operations to track buffer refcount.
  162. */
  163. vma->vm_private_data = &buf->handler;
  164. vma->vm_ops = &vb2_common_vm_ops;
  165. vma->vm_ops->open(vma);
  166. return 0;
  167. }
  168. #ifdef CONFIG_HAS_DMA
  169. /*********************************************/
  170. /* DMABUF ops for exporters */
  171. /*********************************************/
  172. struct vb2_vmalloc_attachment {
  173. struct sg_table sgt;
  174. enum dma_data_direction dma_dir;
  175. };
  176. static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  177. struct dma_buf_attachment *dbuf_attach)
  178. {
  179. struct vb2_vmalloc_attachment *attach;
  180. struct vb2_vmalloc_buf *buf = dbuf->priv;
  181. int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
  182. struct sg_table *sgt;
  183. struct scatterlist *sg;
  184. void *vaddr = buf->vaddr;
  185. int ret;
  186. int i;
  187. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  188. if (!attach)
  189. return -ENOMEM;
  190. sgt = &attach->sgt;
  191. ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
  192. if (ret) {
  193. kfree(attach);
  194. return ret;
  195. }
  196. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  197. struct page *page = vmalloc_to_page(vaddr);
  198. if (!page) {
  199. sg_free_table(sgt);
  200. kfree(attach);
  201. return -ENOMEM;
  202. }
  203. sg_set_page(sg, page, PAGE_SIZE, 0);
  204. vaddr += PAGE_SIZE;
  205. }
  206. attach->dma_dir = DMA_NONE;
  207. dbuf_attach->priv = attach;
  208. return 0;
  209. }
  210. static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
  211. struct dma_buf_attachment *db_attach)
  212. {
  213. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  214. struct sg_table *sgt;
  215. if (!attach)
  216. return;
  217. sgt = &attach->sgt;
  218. /* release the scatterlist cache */
  219. if (attach->dma_dir != DMA_NONE)
  220. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  221. attach->dma_dir);
  222. sg_free_table(sgt);
  223. kfree(attach);
  224. db_attach->priv = NULL;
  225. }
  226. static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
  227. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  228. {
  229. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  230. /* stealing dmabuf mutex to serialize map/unmap operations */
  231. struct mutex *lock = &db_attach->dmabuf->lock;
  232. struct sg_table *sgt;
  233. mutex_lock(lock);
  234. sgt = &attach->sgt;
  235. /* return previously mapped sg table */
  236. if (attach->dma_dir == dma_dir) {
  237. mutex_unlock(lock);
  238. return sgt;
  239. }
  240. /* release any previous cache */
  241. if (attach->dma_dir != DMA_NONE) {
  242. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  243. attach->dma_dir);
  244. attach->dma_dir = DMA_NONE;
  245. }
  246. /* mapping to the client with new direction */
  247. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  248. dma_dir);
  249. if (!sgt->nents) {
  250. pr_err("failed to map scatterlist\n");
  251. mutex_unlock(lock);
  252. return ERR_PTR(-EIO);
  253. }
  254. attach->dma_dir = dma_dir;
  255. mutex_unlock(lock);
  256. return sgt;
  257. }
  258. static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  259. struct sg_table *sgt, enum dma_data_direction dma_dir)
  260. {
  261. /* nothing to be done here */
  262. }
  263. static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
  264. {
  265. /* drop reference obtained in vb2_vmalloc_get_dmabuf */
  266. vb2_vmalloc_put(dbuf->priv);
  267. }
  268. static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  269. {
  270. struct vb2_vmalloc_buf *buf = dbuf->priv;
  271. return buf->vaddr + pgnum * PAGE_SIZE;
  272. }
  273. static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  274. {
  275. struct vb2_vmalloc_buf *buf = dbuf->priv;
  276. return buf->vaddr;
  277. }
  278. static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  279. struct vm_area_struct *vma)
  280. {
  281. return vb2_vmalloc_mmap(dbuf->priv, vma);
  282. }
  283. static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
  284. .attach = vb2_vmalloc_dmabuf_ops_attach,
  285. .detach = vb2_vmalloc_dmabuf_ops_detach,
  286. .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
  287. .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
  288. .kmap = vb2_vmalloc_dmabuf_ops_kmap,
  289. .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
  290. .vmap = vb2_vmalloc_dmabuf_ops_vmap,
  291. .mmap = vb2_vmalloc_dmabuf_ops_mmap,
  292. .release = vb2_vmalloc_dmabuf_ops_release,
  293. };
  294. static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
  295. {
  296. struct vb2_vmalloc_buf *buf = buf_priv;
  297. struct dma_buf *dbuf;
  298. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  299. exp_info.ops = &vb2_vmalloc_dmabuf_ops;
  300. exp_info.size = buf->size;
  301. exp_info.flags = flags;
  302. exp_info.priv = buf;
  303. if (WARN_ON(!buf->vaddr))
  304. return NULL;
  305. dbuf = dma_buf_export(&exp_info);
  306. if (IS_ERR(dbuf))
  307. return NULL;
  308. /* dmabuf keeps reference to vb2 buffer */
  309. atomic_inc(&buf->refcount);
  310. return dbuf;
  311. }
  312. #endif /* CONFIG_HAS_DMA */
  313. /*********************************************/
  314. /* callbacks for DMABUF buffers */
  315. /*********************************************/
  316. static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  317. {
  318. struct vb2_vmalloc_buf *buf = mem_priv;
  319. buf->vaddr = dma_buf_vmap(buf->dbuf);
  320. return buf->vaddr ? 0 : -EFAULT;
  321. }
  322. static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  323. {
  324. struct vb2_vmalloc_buf *buf = mem_priv;
  325. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  326. buf->vaddr = NULL;
  327. }
  328. static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  329. {
  330. struct vb2_vmalloc_buf *buf = mem_priv;
  331. if (buf->vaddr)
  332. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  333. kfree(buf);
  334. }
  335. static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  336. unsigned long size, enum dma_data_direction dma_dir)
  337. {
  338. struct vb2_vmalloc_buf *buf;
  339. if (dbuf->size < size)
  340. return ERR_PTR(-EFAULT);
  341. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  342. if (!buf)
  343. return ERR_PTR(-ENOMEM);
  344. buf->dbuf = dbuf;
  345. buf->dma_dir = dma_dir;
  346. buf->size = size;
  347. return buf;
  348. }
  349. const struct vb2_mem_ops vb2_vmalloc_memops = {
  350. .alloc = vb2_vmalloc_alloc,
  351. .put = vb2_vmalloc_put,
  352. .get_userptr = vb2_vmalloc_get_userptr,
  353. .put_userptr = vb2_vmalloc_put_userptr,
  354. #ifdef CONFIG_HAS_DMA
  355. .get_dmabuf = vb2_vmalloc_get_dmabuf,
  356. #endif
  357. .map_dmabuf = vb2_vmalloc_map_dmabuf,
  358. .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
  359. .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
  360. .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
  361. .vaddr = vb2_vmalloc_vaddr,
  362. .mmap = vb2_vmalloc_mmap,
  363. .num_users = vb2_vmalloc_num_users,
  364. };
  365. EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
  366. MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
  367. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  368. MODULE_LICENSE("GPL");