123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787 |
- /*
- * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
- *
- * Copyright (C) 2010 Samsung Electronics
- *
- * Author: Pawel Osciak <pawel@osciak.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- */
- #include <linux/dma-buf.h>
- #include <linux/module.h>
- #include <linux/refcount.h>
- #include <linux/scatterlist.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/dma-mapping.h>
- #include <media/videobuf2-v4l2.h>
- #include <media/videobuf2-dma-contig.h>
- #include <media/videobuf2-memops.h>
- struct vb2_dc_buf {
- struct device *dev;
- void *vaddr;
- unsigned long size;
- void *cookie;
- dma_addr_t dma_addr;
- unsigned long attrs;
- enum dma_data_direction dma_dir;
- struct sg_table *dma_sgt;
- struct frame_vector *vec;
- /* MMAP related */
- struct vb2_vmarea_handler handler;
- refcount_t refcount;
- struct sg_table *sgt_base;
- /* DMABUF related */
- struct dma_buf_attachment *db_attach;
- };
- /*********************************************/
- /* scatterlist table functions */
- /*********************************************/
- static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
- {
- struct scatterlist *s;
- dma_addr_t expected = sg_dma_address(sgt->sgl);
- unsigned int i;
- unsigned long size = 0;
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- if (sg_dma_address(s) != expected)
- break;
- expected = sg_dma_address(s) + sg_dma_len(s);
- size += sg_dma_len(s);
- }
- return size;
- }
- /*********************************************/
- /* callbacks for all buffers */
- /*********************************************/
- static void *vb2_dc_cookie(void *buf_priv)
- {
- struct vb2_dc_buf *buf = buf_priv;
- return &buf->dma_addr;
- }
- static void *vb2_dc_vaddr(void *buf_priv)
- {
- struct vb2_dc_buf *buf = buf_priv;
- if (!buf->vaddr && buf->db_attach)
- buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
- return buf->vaddr;
- }
- static unsigned int vb2_dc_num_users(void *buf_priv)
- {
- struct vb2_dc_buf *buf = buf_priv;
- return refcount_read(&buf->refcount);
- }
- static void vb2_dc_prepare(void *buf_priv)
- {
- struct vb2_dc_buf *buf = buf_priv;
- struct sg_table *sgt = buf->dma_sgt;
- /* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
- return;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
- }
- static void vb2_dc_finish(void *buf_priv)
- {
- struct vb2_dc_buf *buf = buf_priv;
- struct sg_table *sgt = buf->dma_sgt;
- /* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
- return;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
- }
- /*********************************************/
- /* callbacks for MMAP buffers */
- /*********************************************/
- static void vb2_dc_put(void *buf_priv)
- {
- struct vb2_dc_buf *buf = buf_priv;
- if (!refcount_dec_and_test(&buf->refcount))
- return;
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
- }
- dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- buf->attrs);
- put_device(buf->dev);
- kfree(buf);
- }
- static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
- {
- struct vb2_dc_buf *buf;
- if (WARN_ON(!dev))
- return ERR_PTR(-EINVAL);
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
- if (attrs)
- buf->attrs = attrs;
- buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, buf->attrs);
- if (!buf->cookie) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
- return ERR_PTR(-ENOMEM);
- }
- if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
- buf->vaddr = buf->cookie;
- /* Prevent the device from being released while the buffer is used */
- buf->dev = get_device(dev);
- buf->size = size;
- buf->dma_dir = dma_dir;
- buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_dc_put;
- buf->handler.arg = buf;
- refcount_set(&buf->refcount, 1);
- return buf;
- }
- static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
- {
- struct vb2_dc_buf *buf = buf_priv;
- int ret;
- if (!buf) {
- printk(KERN_ERR "No buffer to map\n");
- return -EINVAL;
- }
- /*
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
- * map whole buffer
- */
- vma->vm_pgoff = 0;
- ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, buf->attrs);
- if (ret) {
- pr_err("Remapping memory failed, error: %d\n", ret);
- return ret;
- }
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = &buf->handler;
- vma->vm_ops = &vb2_common_vm_ops;
- vma->vm_ops->open(vma);
- pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, (unsigned long)buf->dma_addr, vma->vm_start,
- buf->size);
- return 0;
- }
- /*********************************************/
- /* DMABUF ops for exporters */
- /*********************************************/
- struct vb2_dc_attachment {
- struct sg_table sgt;
- enum dma_data_direction dma_dir;
- };
- static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
- struct dma_buf_attachment *dbuf_attach)
- {
- struct vb2_dc_attachment *attach;
- unsigned int i;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt;
- struct vb2_dc_buf *buf = dbuf->priv;
- int ret;
- attach = kzalloc(sizeof(*attach), GFP_KERNEL);
- if (!attach)
- return -ENOMEM;
- sgt = &attach->sgt;
- /* Copy the buf->base_sgt scatter list to the attachment, as we can't
- * map the same scatter list to multiple attachments at the same time.
- */
- ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
- if (ret) {
- kfree(attach);
- return -ENOMEM;
- }
- rd = buf->sgt_base->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
- }
- attach->dma_dir = DMA_NONE;
- dbuf_attach->priv = attach;
- return 0;
- }
- static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
- struct dma_buf_attachment *db_attach)
- {
- struct vb2_dc_attachment *attach = db_attach->priv;
- struct sg_table *sgt;
- if (!attach)
- return;
- sgt = &attach->sgt;
- /* release the scatterlist cache */
- if (attach->dma_dir != DMA_NONE)
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
- sg_free_table(sgt);
- kfree(attach);
- db_attach->priv = NULL;
- }
- static struct sg_table *vb2_dc_dmabuf_ops_map(
- struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
- {
- struct vb2_dc_attachment *attach = db_attach->priv;
- /* stealing dmabuf mutex to serialize map/unmap operations */
- struct mutex *lock = &db_attach->dmabuf->lock;
- struct sg_table *sgt;
- mutex_lock(lock);
- sgt = &attach->sgt;
- /* return previously mapped sg table */
- if (attach->dma_dir == dma_dir) {
- mutex_unlock(lock);
- return sgt;
- }
- /* release any previous cache */
- if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
- attach->dma_dir = DMA_NONE;
- }
- /* mapping to the client with new direction */
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- dma_dir);
- if (!sgt->nents) {
- pr_err("failed to map scatterlist\n");
- mutex_unlock(lock);
- return ERR_PTR(-EIO);
- }
- attach->dma_dir = dma_dir;
- mutex_unlock(lock);
- return sgt;
- }
- static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
- struct sg_table *sgt, enum dma_data_direction dma_dir)
- {
- /* nothing to be done here */
- }
- static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
- {
- /* drop reference obtained in vb2_dc_get_dmabuf */
- vb2_dc_put(dbuf->priv);
- }
- static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
- {
- struct vb2_dc_buf *buf = dbuf->priv;
- return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
- }
- static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
- {
- struct vb2_dc_buf *buf = dbuf->priv;
- return buf->vaddr;
- }
- static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
- struct vm_area_struct *vma)
- {
- return vb2_dc_mmap(dbuf->priv, vma);
- }
- static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
- .attach = vb2_dc_dmabuf_ops_attach,
- .detach = vb2_dc_dmabuf_ops_detach,
- .map_dma_buf = vb2_dc_dmabuf_ops_map,
- .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
- .map = vb2_dc_dmabuf_ops_kmap,
- .vmap = vb2_dc_dmabuf_ops_vmap,
- .mmap = vb2_dc_dmabuf_ops_mmap,
- .release = vb2_dc_dmabuf_ops_release,
- };
- static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
- {
- int ret;
- struct sg_table *sgt;
- sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt) {
- dev_err(buf->dev, "failed to alloc sg table\n");
- return NULL;
- }
- ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
- buf->size, buf->attrs);
- if (ret < 0) {
- dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
- kfree(sgt);
- return NULL;
- }
- return sgt;
- }
- static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
- {
- struct vb2_dc_buf *buf = buf_priv;
- struct dma_buf *dbuf;
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- exp_info.ops = &vb2_dc_dmabuf_ops;
- exp_info.size = buf->size;
- exp_info.flags = flags;
- exp_info.priv = buf;
- if (!buf->sgt_base)
- buf->sgt_base = vb2_dc_get_base_sgt(buf);
- if (WARN_ON(!buf->sgt_base))
- return NULL;
- dbuf = dma_buf_export(&exp_info);
- if (IS_ERR(dbuf))
- return NULL;
- /* dmabuf keeps reference to vb2 buffer */
- refcount_inc(&buf->refcount);
- return dbuf;
- }
- /*********************************************/
- /* callbacks for USERPTR buffers */
- /*********************************************/
- static void vb2_dc_put_userptr(void *buf_priv)
- {
- struct vb2_dc_buf *buf = buf_priv;
- struct sg_table *sgt = buf->dma_sgt;
- int i;
- struct page **pages;
- if (sgt) {
- /*
- * No need to sync to CPU, it's already synced to the CPU
- * since the finish() memop will have been called before this.
- */
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- pages = frame_vector_pages(buf->vec);
- /* sgt should exist only if vector contains pages... */
- BUG_ON(IS_ERR(pages));
- if (buf->dma_dir == DMA_FROM_DEVICE ||
- buf->dma_dir == DMA_BIDIRECTIONAL)
- for (i = 0; i < frame_vector_count(buf->vec); i++)
- set_page_dirty_lock(pages[i]);
- sg_free_table(sgt);
- kfree(sgt);
- }
- vb2_destroy_framevec(buf->vec);
- kfree(buf);
- }
- /*
- * For some kind of reserved memory there might be no struct page available,
- * so all that can be done to support such 'pages' is to try to convert
- * pfn to dma address or at the last resort just assume that
- * dma address == physical address (like it has been assumed in earlier version
- * of videobuf2-dma-contig
- */
- #ifdef __arch_pfn_to_dma
- static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
- {
- return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
- }
- #elif defined(__pfn_to_bus)
- static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
- {
- return (dma_addr_t)__pfn_to_bus(pfn);
- }
- #elif defined(__pfn_to_phys)
- static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
- {
- return (dma_addr_t)__pfn_to_phys(pfn);
- }
- #else
- static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
- {
- /* really, we cannot do anything better at this point */
- return (dma_addr_t)(pfn) << PAGE_SHIFT;
- }
- #endif
- static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size, enum dma_data_direction dma_dir)
- {
- struct vb2_dc_buf *buf;
- struct frame_vector *vec;
- unsigned int offset;
- int n_pages, i;
- int ret = 0;
- struct sg_table *sgt;
- unsigned long contig_size;
- unsigned long dma_align = dma_get_cache_alignment();
- /* Only cache aligned DMA transfers are reliable */
- if (!IS_ALIGNED(vaddr | size, dma_align)) {
- pr_debug("user data must be aligned to %lu bytes\n", dma_align);
- return ERR_PTR(-EINVAL);
- }
- if (!size) {
- pr_debug("size is zero\n");
- return ERR_PTR(-EINVAL);
- }
- if (WARN_ON(!dev))
- return ERR_PTR(-EINVAL);
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
- buf->dev = dev;
- buf->dma_dir = dma_dir;
- offset = lower_32_bits(offset_in_page(vaddr));
- vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
- dma_dir == DMA_BIDIRECTIONAL);
- if (IS_ERR(vec)) {
- ret = PTR_ERR(vec);
- goto fail_buf;
- }
- buf->vec = vec;
- n_pages = frame_vector_count(vec);
- ret = frame_vector_to_pages(vec);
- if (ret < 0) {
- unsigned long *nums = frame_vector_pfns(vec);
- /*
- * Failed to convert to pages... Check the memory is physically
- * contiguous and use direct mapping
- */
- for (i = 1; i < n_pages; i++)
- if (nums[i-1] + 1 != nums[i])
- goto fail_pfnvec;
- buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
- goto out;
- }
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt) {
- pr_err("failed to allocate sg table\n");
- ret = -ENOMEM;
- goto fail_pfnvec;
- }
- ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
- offset, size, GFP_KERNEL);
- if (ret) {
- pr_err("failed to initialize sg table\n");
- goto fail_sgt;
- }
- /*
- * No need to sync to the device, this will happen later when the
- * prepare() memop is called.
- */
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (sgt->nents <= 0) {
- pr_err("failed to map scatterlist\n");
- ret = -EIO;
- goto fail_sgt_init;
- }
- contig_size = vb2_dc_get_contiguous_size(sgt);
- if (contig_size < size) {
- pr_err("contiguous mapping is too small %lu/%lu\n",
- contig_size, size);
- ret = -EFAULT;
- goto fail_map_sg;
- }
- buf->dma_addr = sg_dma_address(sgt->sgl);
- buf->dma_sgt = sgt;
- out:
- buf->size = size;
- return buf;
- fail_map_sg:
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- fail_sgt_init:
- sg_free_table(sgt);
- fail_sgt:
- kfree(sgt);
- fail_pfnvec:
- vb2_destroy_framevec(vec);
- fail_buf:
- kfree(buf);
- return ERR_PTR(ret);
- }
- /*********************************************/
- /* callbacks for DMABUF buffers */
- /*********************************************/
- static int vb2_dc_map_dmabuf(void *mem_priv)
- {
- struct vb2_dc_buf *buf = mem_priv;
- struct sg_table *sgt;
- unsigned long contig_size;
- if (WARN_ON(!buf->db_attach)) {
- pr_err("trying to pin a non attached buffer\n");
- return -EINVAL;
- }
- if (WARN_ON(buf->dma_sgt)) {
- pr_err("dmabuf buffer is already pinned\n");
- return 0;
- }
- /* get the associated scatterlist for this buffer */
- sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
- if (IS_ERR(sgt)) {
- pr_err("Error getting dmabuf scatterlist\n");
- return -EINVAL;
- }
- /* checking if dmabuf is big enough to store contiguous chunk */
- contig_size = vb2_dc_get_contiguous_size(sgt);
- if (contig_size < buf->size) {
- pr_err("contiguous chunk is too small %lu/%lu b\n",
- contig_size, buf->size);
- dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
- return -EFAULT;
- }
- buf->dma_addr = sg_dma_address(sgt->sgl);
- buf->dma_sgt = sgt;
- buf->vaddr = NULL;
- return 0;
- }
- static void vb2_dc_unmap_dmabuf(void *mem_priv)
- {
- struct vb2_dc_buf *buf = mem_priv;
- struct sg_table *sgt = buf->dma_sgt;
- if (WARN_ON(!buf->db_attach)) {
- pr_err("trying to unpin a not attached buffer\n");
- return;
- }
- if (WARN_ON(!sgt)) {
- pr_err("dmabuf buffer is already unpinned\n");
- return;
- }
- if (buf->vaddr) {
- dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
- buf->vaddr = NULL;
- }
- dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
- buf->dma_addr = 0;
- buf->dma_sgt = NULL;
- }
- static void vb2_dc_detach_dmabuf(void *mem_priv)
- {
- struct vb2_dc_buf *buf = mem_priv;
- /* if vb2 works correctly you should never detach mapped buffer */
- if (WARN_ON(buf->dma_addr))
- vb2_dc_unmap_dmabuf(buf);
- /* detach this attachment */
- dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
- kfree(buf);
- }
- static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
- {
- struct vb2_dc_buf *buf;
- struct dma_buf_attachment *dba;
- if (dbuf->size < size)
- return ERR_PTR(-EFAULT);
- if (WARN_ON(!dev))
- return ERR_PTR(-EINVAL);
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
- buf->dev = dev;
- /* create attachment for the dmabuf with the user device */
- dba = dma_buf_attach(dbuf, buf->dev);
- if (IS_ERR(dba)) {
- pr_err("failed to attach dmabuf\n");
- kfree(buf);
- return dba;
- }
- buf->dma_dir = dma_dir;
- buf->size = size;
- buf->db_attach = dba;
- return buf;
- }
- /*********************************************/
- /* DMA CONTIG exported functions */
- /*********************************************/
- const struct vb2_mem_ops vb2_dma_contig_memops = {
- .alloc = vb2_dc_alloc,
- .put = vb2_dc_put,
- .get_dmabuf = vb2_dc_get_dmabuf,
- .cookie = vb2_dc_cookie,
- .vaddr = vb2_dc_vaddr,
- .mmap = vb2_dc_mmap,
- .get_userptr = vb2_dc_get_userptr,
- .put_userptr = vb2_dc_put_userptr,
- .prepare = vb2_dc_prepare,
- .finish = vb2_dc_finish,
- .map_dmabuf = vb2_dc_map_dmabuf,
- .unmap_dmabuf = vb2_dc_unmap_dmabuf,
- .attach_dmabuf = vb2_dc_attach_dmabuf,
- .detach_dmabuf = vb2_dc_detach_dmabuf,
- .num_users = vb2_dc_num_users,
- };
- EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
- /**
- * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
- * @dev: device for configuring DMA parameters
- * @size: size of DMA max segment size to set
- *
- * To allow mapping the scatter-list into a single chunk in the DMA
- * address space, the device is required to have the DMA max segment
- * size parameter set to a value larger than the buffer size. Otherwise,
- * the DMA-mapping subsystem will split the mapping into max segment
- * size chunks. This function sets the DMA max segment size
- * parameter to let DMA-mapping map a buffer as a single chunk in DMA
- * address space.
- * This code assumes that the DMA-mapping subsystem will merge all
- * scatterlist segments if this is really possible (for example when
- * an IOMMU is available and enabled).
- * Ideally, this parameter should be set by the generic bus code, but it
- * is left with the default 64KiB value due to historical litmiations in
- * other subsystems (like limited USB host drivers) and there no good
- * place to set it to the proper value.
- * This function should be called from the drivers, which are known to
- * operate on platforms with IOMMU and provide access to shared buffers
- * (either USERPTR or DMABUF). This should be done before initializing
- * videobuf2 queue.
- */
- int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
- {
- if (!dev->dma_parms) {
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
- }
- if (dma_get_max_seg_size(dev) < size)
- return dma_set_max_seg_size(dev, size);
- return 0;
- }
- EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
- /*
- * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
- * @dev: device for configuring DMA parameters
- *
- * This function releases resources allocated to configure DMA parameters
- * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
- * device drivers on driver remove.
- */
- void vb2_dma_contig_clear_max_seg_size(struct device *dev)
- {
- kfree(dev->dma_parms);
- dev->dma_parms = NULL;
- }
- EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
- MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
- MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
- MODULE_LICENSE("GPL");
|