dma-buf.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * Framework for buffer objects that can be shared across devices/subsystems.
  3. *
  4. * Copyright(C) 2011 Linaro Limited. All rights reserved.
  5. * Copyright (C) 2021 XiaoMi, Inc.
  6. * Author: Sumit Semwal <sumit.semwal@ti.com>
  7. *
  8. * Many thanks to linaro-mm-sig list, and specially
  9. * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  10. * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  11. * refining of this idea.
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms of the GNU General Public License version 2 as published by
  15. * the Free Software Foundation.
  16. *
  17. * This program is distributed in the hope that it will be useful, but WITHOUT
  18. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  20. * more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along with
  23. * this program. If not, see <http://www.gnu.org/licenses/>.
  24. */
  25. #include <linux/fs.h>
  26. #include <linux/slab.h>
  27. #include <linux/dma-buf.h>
  28. #include <linux/dma-fence.h>
  29. #include <linux/anon_inodes.h>
  30. #include <linux/export.h>
  31. #include <linux/debugfs.h>
  32. #include <linux/module.h>
  33. #include <linux/seq_file.h>
  34. #include <linux/poll.h>
  35. #include <linux/reservation.h>
  36. #include <linux/mm.h>
  37. #include <linux/mount.h>
  38. #include <uapi/linux/dma-buf.h>
  39. #include <uapi/linux/magic.h>
  40. static inline int is_dma_buf_file(struct file *);
  41. struct dma_buf_list {
  42. struct list_head head;
  43. struct mutex lock;
  44. };
  45. static struct dma_buf_list db_list;
  46. static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
  47. {
  48. struct dma_buf *dmabuf;
  49. char name[DMA_BUF_NAME_LEN];
  50. size_t ret = 0;
  51. dmabuf = dentry->d_fsdata;
  52. spin_lock(&dmabuf->name_lock);
  53. if (dmabuf->name)
  54. ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
  55. spin_unlock(&dmabuf->name_lock);
  56. return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
  57. dentry->d_name.name, ret > 0 ? name : "");
  58. }
  59. static void dma_buf_release(struct dentry *dentry)
  60. {
  61. struct dma_buf *dmabuf;
  62. dmabuf = dentry->d_fsdata;
  63. BUG_ON(dmabuf->vmapping_counter);
  64. /*
  65. * Any fences that a dma-buf poll can wait on should be signaled
  66. * before releasing dma-buf. This is the responsibility of each
  67. * driver that uses the reservation objects.
  68. *
  69. * If you hit this BUG() it means someone dropped their ref to the
  70. * dma-buf while still having pending operation to the buffer.
  71. */
  72. BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
  73. dmabuf->ops->release(dmabuf);
  74. mutex_lock(&db_list.lock);
  75. list_del(&dmabuf->list_node);
  76. mutex_unlock(&db_list.lock);
  77. if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
  78. reservation_object_fini(dmabuf->resv);
  79. module_put(dmabuf->owner);
  80. kfree(dmabuf->name);
  81. kfree(dmabuf);
  82. }
  83. static const struct dentry_operations dma_buf_dentry_ops = {
  84. .d_dname = dmabuffs_dname,
  85. .d_release = dma_buf_release,
  86. };
  87. static struct vfsmount *dma_buf_mnt;
  88. static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type,
  89. int flags, const char *name, void *data)
  90. {
  91. return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops,
  92. DMA_BUF_MAGIC);
  93. }
  94. static struct file_system_type dma_buf_fs_type = {
  95. .name = "dmabuf",
  96. .mount = dma_buf_fs_mount,
  97. .kill_sb = kill_anon_super,
  98. };
  99. static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
  100. {
  101. struct dma_buf *dmabuf;
  102. if (!is_dma_buf_file(file))
  103. return -EINVAL;
  104. dmabuf = file->private_data;
  105. /* check for overflowing the buffer's size */
  106. if (vma->vm_pgoff + vma_pages(vma) >
  107. dmabuf->size >> PAGE_SHIFT)
  108. return -EINVAL;
  109. return dmabuf->ops->mmap(dmabuf, vma);
  110. }
  111. static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
  112. {
  113. struct dma_buf *dmabuf;
  114. loff_t base;
  115. if (!is_dma_buf_file(file))
  116. return -EBADF;
  117. dmabuf = file->private_data;
  118. /* only support discovering the end of the buffer,
  119. but also allow SEEK_SET to maintain the idiomatic
  120. SEEK_END(0), SEEK_CUR(0) pattern */
  121. if (whence == SEEK_END)
  122. base = dmabuf->size;
  123. else if (whence == SEEK_SET)
  124. base = 0;
  125. else
  126. return -EINVAL;
  127. if (offset != 0)
  128. return -EINVAL;
  129. return base + offset;
  130. }
  131. /**
  132. * DOC: fence polling
  133. *
  134. * To support cross-device and cross-driver synchronization of buffer access
  135. * implicit fences (represented internally in the kernel with &struct fence) can
  136. * be attached to a &dma_buf. The glue for that and a few related things are
  137. * provided in the &reservation_object structure.
  138. *
  139. * Userspace can query the state of these implicitly tracked fences using poll()
  140. * and related system calls:
  141. *
  142. * - Checking for POLLIN, i.e. read access, can be use to query the state of the
  143. * most recent write or exclusive fence.
  144. *
  145. * - Checking for POLLOUT, i.e. write access, can be used to query the state of
  146. * all attached fences, shared and exclusive ones.
  147. *
  148. * Note that this only signals the completion of the respective fences, i.e. the
  149. * DMA transfers are complete. Cache flushing and any other necessary
  150. * preparations before CPU access can begin still need to happen.
  151. */
  152. static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
  153. {
  154. struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
  155. unsigned long flags;
  156. spin_lock_irqsave(&dcb->poll->lock, flags);
  157. wake_up_locked_poll(dcb->poll, dcb->active);
  158. dcb->active = 0;
  159. spin_unlock_irqrestore(&dcb->poll->lock, flags);
  160. }
  161. static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
  162. {
  163. struct dma_buf *dmabuf;
  164. struct reservation_object *resv;
  165. struct reservation_object_list *fobj;
  166. struct dma_fence *fence_excl;
  167. unsigned long events;
  168. unsigned shared_count, seq;
  169. dmabuf = file->private_data;
  170. if (!dmabuf || !dmabuf->resv)
  171. return POLLERR;
  172. resv = dmabuf->resv;
  173. poll_wait(file, &dmabuf->poll, poll);
  174. events = poll_requested_events(poll) & (POLLIN | POLLOUT);
  175. if (!events)
  176. return 0;
  177. retry:
  178. seq = read_seqcount_begin(&resv->seq);
  179. rcu_read_lock();
  180. fobj = rcu_dereference(resv->fence);
  181. if (fobj)
  182. shared_count = fobj->shared_count;
  183. else
  184. shared_count = 0;
  185. fence_excl = rcu_dereference(resv->fence_excl);
  186. if (read_seqcount_retry(&resv->seq, seq)) {
  187. rcu_read_unlock();
  188. goto retry;
  189. }
  190. if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
  191. struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
  192. unsigned long pevents = POLLIN;
  193. if (shared_count == 0)
  194. pevents |= POLLOUT;
  195. spin_lock_irq(&dmabuf->poll.lock);
  196. if (dcb->active) {
  197. dcb->active |= pevents;
  198. events &= ~pevents;
  199. } else
  200. dcb->active = pevents;
  201. spin_unlock_irq(&dmabuf->poll.lock);
  202. if (events & pevents) {
  203. if (!dma_fence_get_rcu(fence_excl)) {
  204. /* force a recheck */
  205. events &= ~pevents;
  206. dma_buf_poll_cb(NULL, &dcb->cb);
  207. } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
  208. dma_buf_poll_cb)) {
  209. events &= ~pevents;
  210. dma_fence_put(fence_excl);
  211. } else {
  212. /*
  213. * No callback queued, wake up any additional
  214. * waiters.
  215. */
  216. dma_fence_put(fence_excl);
  217. dma_buf_poll_cb(NULL, &dcb->cb);
  218. }
  219. }
  220. }
  221. if ((events & POLLOUT) && shared_count > 0) {
  222. struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
  223. int i;
  224. /* Only queue a new callback if no event has fired yet */
  225. spin_lock_irq(&dmabuf->poll.lock);
  226. if (dcb->active)
  227. events &= ~POLLOUT;
  228. else
  229. dcb->active = POLLOUT;
  230. spin_unlock_irq(&dmabuf->poll.lock);
  231. if (!(events & POLLOUT))
  232. goto out;
  233. for (i = 0; i < shared_count; ++i) {
  234. struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
  235. if (!dma_fence_get_rcu(fence)) {
  236. /*
  237. * fence refcount dropped to zero, this means
  238. * that fobj has been freed
  239. *
  240. * call dma_buf_poll_cb and force a recheck!
  241. */
  242. events &= ~POLLOUT;
  243. dma_buf_poll_cb(NULL, &dcb->cb);
  244. break;
  245. }
  246. if (!dma_fence_add_callback(fence, &dcb->cb,
  247. dma_buf_poll_cb)) {
  248. dma_fence_put(fence);
  249. events &= ~POLLOUT;
  250. break;
  251. }
  252. dma_fence_put(fence);
  253. }
  254. /* No callback queued, wake up any additional waiters. */
  255. if (i == shared_count)
  256. dma_buf_poll_cb(NULL, &dcb->cb);
  257. }
  258. out:
  259. rcu_read_unlock();
  260. return events;
  261. }
  262. /**
  263. * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
  264. * The name of the dma-buf buffer can only be set when the dma-buf is not
  265. * attached to any devices. It could theoritically support changing the
  266. * name of the dma-buf if the same piece of memory is used for multiple
  267. * purpose between different devices.
  268. *
  269. * @dmabuf [in] dmabuf buffer that will be renamed.
  270. * @buf: [in] A piece of userspace memory that contains the name of
  271. * the dma-buf.
  272. *
  273. * Returns 0 on success. If the dma-buf buffer is already attached to
  274. * devices, return -EBUSY.
  275. *
  276. */
  277. static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
  278. {
  279. char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
  280. long ret = 0;
  281. if (IS_ERR(name))
  282. return PTR_ERR(name);
  283. mutex_lock(&dmabuf->lock);
  284. if (!list_empty(&dmabuf->attachments)) {
  285. ret = -EBUSY;
  286. kfree(name);
  287. goto out_unlock;
  288. }
  289. spin_lock(&dmabuf->name_lock);
  290. kfree(dmabuf->name);
  291. dmabuf->name = name;
  292. spin_unlock(&dmabuf->name_lock);
  293. out_unlock:
  294. mutex_unlock(&dmabuf->lock);
  295. return ret;
  296. }
  297. static long dma_buf_ioctl(struct file *file,
  298. unsigned int cmd, unsigned long arg)
  299. {
  300. struct dma_buf *dmabuf;
  301. struct dma_buf_sync sync;
  302. enum dma_data_direction direction;
  303. int ret;
  304. dmabuf = file->private_data;
  305. switch (cmd) {
  306. case DMA_BUF_IOCTL_SYNC:
  307. if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
  308. return -EFAULT;
  309. if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
  310. return -EINVAL;
  311. switch (sync.flags & DMA_BUF_SYNC_RW) {
  312. case DMA_BUF_SYNC_READ:
  313. direction = DMA_FROM_DEVICE;
  314. break;
  315. case DMA_BUF_SYNC_WRITE:
  316. direction = DMA_TO_DEVICE;
  317. break;
  318. case DMA_BUF_SYNC_RW:
  319. direction = DMA_BIDIRECTIONAL;
  320. break;
  321. default:
  322. return -EINVAL;
  323. }
  324. if (sync.flags & DMA_BUF_SYNC_END)
  325. ret = dma_buf_end_cpu_access(dmabuf, direction);
  326. else
  327. ret = dma_buf_begin_cpu_access(dmabuf, direction);
  328. return ret;
  329. case DMA_BUF_SET_NAME_A:
  330. case DMA_BUF_SET_NAME_B:
  331. return dma_buf_set_name(dmabuf, (const char __user *)arg);
  332. default:
  333. return -ENOTTY;
  334. }
  335. }
  336. static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
  337. {
  338. struct dma_buf *dmabuf = file->private_data;
  339. seq_printf(m, "size:\t%zu\n", dmabuf->size);
  340. /* Don't count the temporary reference taken inside procfs seq_show */
  341. seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
  342. seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
  343. spin_lock(&dmabuf->name_lock);
  344. if (dmabuf->name)
  345. seq_printf(m, "name:\t%s\n", dmabuf->name);
  346. spin_unlock(&dmabuf->name_lock);
  347. }
  348. static const struct file_operations dma_buf_fops = {
  349. .mmap = dma_buf_mmap_internal,
  350. .llseek = dma_buf_llseek,
  351. .poll = dma_buf_poll,
  352. .unlocked_ioctl = dma_buf_ioctl,
  353. #ifdef CONFIG_COMPAT
  354. .compat_ioctl = dma_buf_ioctl,
  355. #endif
  356. .show_fdinfo = dma_buf_show_fdinfo,
  357. };
  358. /*
  359. * is_dma_buf_file - Check if struct file* is associated with dma_buf
  360. */
  361. static inline int is_dma_buf_file(struct file *file)
  362. {
  363. return file->f_op == &dma_buf_fops;
  364. }
  365. static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
  366. {
  367. static const struct qstr this = QSTR_INIT("dmabuf", 6);
  368. struct path path;
  369. struct file *file;
  370. struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
  371. if (IS_ERR(inode))
  372. return ERR_CAST(inode);
  373. inode->i_size = dmabuf->size;
  374. inode_set_bytes(inode, dmabuf->size);
  375. path.dentry = d_alloc_pseudo(dma_buf_mnt->mnt_sb, &this);
  376. if (!path.dentry) {
  377. file = ERR_PTR(-ENOMEM);
  378. goto err_d_alloc;
  379. }
  380. path.mnt = mntget(dma_buf_mnt);
  381. d_instantiate(path.dentry, inode);
  382. file = alloc_file(&path, OPEN_FMODE(flags) | FMODE_LSEEK,
  383. &dma_buf_fops);
  384. if (IS_ERR(file))
  385. goto err_alloc_file;
  386. file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
  387. file->private_data = dmabuf;
  388. file->f_path.dentry->d_fsdata = dmabuf;
  389. return file;
  390. err_alloc_file:
  391. path_put(&path);
  392. err_d_alloc:
  393. iput(inode);
  394. return file;
  395. }
  396. /**
  397. * DOC: dma buf device access
  398. *
  399. * For device DMA access to a shared DMA buffer the usual sequence of operations
  400. * is fairly simple:
  401. *
  402. * 1. The exporter defines his exporter instance using
  403. * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
  404. * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
  405. * as a file descriptor by calling dma_buf_fd().
  406. *
  407. * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
  408. * to share with: First the filedescriptor is converted to a &dma_buf using
  409. * dma_buf_get(). The the buffer is attached to the device using
  410. * dma_buf_attach().
  411. *
  412. * Up to this stage the exporter is still free to migrate or reallocate the
  413. * backing storage.
  414. *
  415. * 3. Once the buffer is attached to all devices userspace can inniate DMA
  416. * access to the shared buffer. In the kernel this is done by calling
  417. * dma_buf_map_attachment() and dma_buf_unmap_attachment().
  418. *
  419. * 4. Once a driver is done with a shared buffer it needs to call
  420. * dma_buf_detach() (after cleaning up any mappings) and then release the
  421. * reference acquired with dma_buf_get by calling dma_buf_put().
  422. *
  423. * For the detailed semantics exporters are expected to implement see
  424. * &dma_buf_ops.
  425. */
  426. /**
  427. * dma_buf_export - Creates a new dma_buf, and associates an anon file
  428. * with this buffer, so it can be exported.
  429. * Also connect the allocator specific data and ops to the buffer.
  430. * Additionally, provide a name string for exporter; useful in debugging.
  431. *
  432. * @exp_info: [in] holds all the export related information provided
  433. * by the exporter. see &struct dma_buf_export_info
  434. * for further details.
  435. *
  436. * Returns, on success, a newly created dma_buf object, which wraps the
  437. * supplied private data and operations for dma_buf_ops. On either missing
  438. * ops, or error in allocating struct dma_buf, will return negative error.
  439. *
  440. * For most cases the easiest way to create @exp_info is through the
  441. * %DEFINE_DMA_BUF_EXPORT_INFO macro.
  442. */
  443. struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
  444. {
  445. struct dma_buf *dmabuf;
  446. struct reservation_object *resv = exp_info->resv;
  447. struct file *file;
  448. size_t alloc_size = sizeof(struct dma_buf);
  449. int ret;
  450. if (!exp_info->resv)
  451. alloc_size += sizeof(struct reservation_object);
  452. else
  453. /* prevent &dma_buf[1] == dma_buf->resv */
  454. alloc_size += 1;
  455. if (WARN_ON(!exp_info->priv
  456. || !exp_info->ops
  457. || !exp_info->ops->map_dma_buf
  458. || !exp_info->ops->unmap_dma_buf
  459. || !exp_info->ops->release
  460. || !exp_info->ops->map_atomic
  461. || !exp_info->ops->map
  462. || !exp_info->ops->mmap)) {
  463. return ERR_PTR(-EINVAL);
  464. }
  465. if (!try_module_get(exp_info->owner))
  466. return ERR_PTR(-ENOENT);
  467. dmabuf = kzalloc(alloc_size, GFP_KERNEL);
  468. if (!dmabuf) {
  469. ret = -ENOMEM;
  470. goto err_module;
  471. }
  472. atomic_set(&dmabuf->ref_dbg, 0);
  473. dmabuf->priv = exp_info->priv;
  474. dmabuf->ops = exp_info->ops;
  475. dmabuf->size = exp_info->size;
  476. dmabuf->exp_name = exp_info->exp_name;
  477. dmabuf->owner = exp_info->owner;
  478. spin_lock_init(&dmabuf->name_lock);
  479. init_waitqueue_head(&dmabuf->poll);
  480. dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
  481. dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
  482. if (!resv) {
  483. resv = (struct reservation_object *)&dmabuf[1];
  484. reservation_object_init(resv);
  485. }
  486. dmabuf->resv = resv;
  487. file = dma_buf_getfile(dmabuf, exp_info->flags);
  488. if (IS_ERR(file)) {
  489. ret = PTR_ERR(file);
  490. goto err_dmabuf;
  491. }
  492. file->f_mode |= FMODE_LSEEK;
  493. dmabuf->file = file;
  494. mutex_init(&dmabuf->lock);
  495. INIT_LIST_HEAD(&dmabuf->attachments);
  496. mutex_lock(&db_list.lock);
  497. list_add(&dmabuf->list_node, &db_list.head);
  498. mutex_unlock(&db_list.lock);
  499. return dmabuf;
  500. err_dmabuf:
  501. kfree(dmabuf);
  502. err_module:
  503. module_put(exp_info->owner);
  504. return ERR_PTR(ret);
  505. }
  506. EXPORT_SYMBOL_GPL(dma_buf_export);
  507. /**
  508. * dma_buf_fd - returns a file descriptor for the given dma_buf
  509. * @dmabuf: [in] pointer to dma_buf for which fd is required.
  510. * @flags: [in] flags to give to fd
  511. *
  512. * On success, returns an associated 'fd'. Else, returns error.
  513. */
  514. int dma_buf_fd(struct dma_buf *dmabuf, int flags)
  515. {
  516. int fd;
  517. if (!dmabuf || !dmabuf->file)
  518. return -EINVAL;
  519. fd = get_unused_fd_flags(flags);
  520. if (fd < 0)
  521. return fd;
  522. fd_install(fd, dmabuf->file);
  523. return fd;
  524. }
  525. EXPORT_SYMBOL_GPL(dma_buf_fd);
  526. /**
  527. * dma_buf_get - returns the dma_buf structure related to an fd
  528. * @fd: [in] fd associated with the dma_buf to be returned
  529. *
  530. * On success, returns the dma_buf structure associated with an fd; uses
  531. * file's refcounting done by fget to increase refcount. returns ERR_PTR
  532. * otherwise.
  533. */
  534. struct dma_buf *dma_buf_get(int fd)
  535. {
  536. struct file *file;
  537. struct dma_buf *dmabuf;
  538. file = fget(fd);
  539. if (!file)
  540. return ERR_PTR(-EBADF);
  541. if (!is_dma_buf_file(file)) {
  542. fput(file);
  543. return ERR_PTR(-EINVAL);
  544. }
  545. dmabuf = file->private_data;
  546. atomic_inc(&dmabuf->ref_dbg);
  547. return dmabuf;
  548. }
  549. EXPORT_SYMBOL_GPL(dma_buf_get);
  550. /**
  551. * dma_buf_put - decreases refcount of the buffer
  552. * @dmabuf: [in] buffer to reduce refcount of
  553. *
  554. * Uses file's refcounting done implicitly by fput().
  555. *
  556. * If, as a result of this call, the refcount becomes 0, the 'release' file
  557. * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
  558. * in turn, and frees the memory allocated for dmabuf when exported.
  559. */
  560. void dma_buf_put(struct dma_buf *dmabuf)
  561. {
  562. if (WARN_ON(!dmabuf || !dmabuf->file))
  563. return;
  564. if (atomic_dec_return(&dmabuf->ref_dbg) < 0) {
  565. pr_info("[Warn] %s, ref underflow!\n", __func__);
  566. atomic_set(&dmabuf->ref_dbg, 0);
  567. }
  568. fput(dmabuf->file);
  569. }
  570. EXPORT_SYMBOL_GPL(dma_buf_put);
  571. /**
  572. * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
  573. * calls attach() of dma_buf_ops to allow device-specific attach functionality
  574. * @dmabuf: [in] buffer to attach device to.
  575. * @dev: [in] device to be attached.
  576. *
  577. * Returns struct dma_buf_attachment pointer for this attachment. Attachments
  578. * must be cleaned up by calling dma_buf_detach().
  579. *
  580. * Returns:
  581. *
  582. * A pointer to newly created &dma_buf_attachment on success, or a negative
  583. * error code wrapped into a pointer on failure.
  584. *
  585. * Note that this can fail if the backing storage of @dmabuf is in a place not
  586. * accessible to @dev, and cannot be moved to a more suitable place. This is
  587. * indicated with the error code -EBUSY.
  588. */
  589. struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
  590. struct device *dev)
  591. {
  592. struct dma_buf_attachment *attach;
  593. int ret;
  594. if (WARN_ON(!dmabuf || !dev))
  595. return ERR_PTR(-EINVAL);
  596. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  597. if (!attach)
  598. return ERR_PTR(-ENOMEM);
  599. attach->dev = dev;
  600. attach->dmabuf = dmabuf;
  601. mutex_lock(&dmabuf->lock);
  602. if (dmabuf->ops->attach) {
  603. ret = dmabuf->ops->attach(dmabuf, dev, attach);
  604. if (ret)
  605. goto err_attach;
  606. }
  607. list_add(&attach->node, &dmabuf->attachments);
  608. mutex_unlock(&dmabuf->lock);
  609. return attach;
  610. err_attach:
  611. kfree(attach);
  612. mutex_unlock(&dmabuf->lock);
  613. return ERR_PTR(ret);
  614. }
  615. EXPORT_SYMBOL_GPL(dma_buf_attach);
  616. /**
  617. * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
  618. * optionally calls detach() of dma_buf_ops for device-specific detach
  619. * @dmabuf: [in] buffer to detach from.
  620. * @attach: [in] attachment to be detached; is free'd after this call.
  621. *
  622. * Clean up a device attachment obtained by calling dma_buf_attach().
  623. */
  624. void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
  625. {
  626. if (WARN_ON(!dmabuf || !attach))
  627. return;
  628. mutex_lock(&dmabuf->lock);
  629. list_del(&attach->node);
  630. if (dmabuf->ops->detach)
  631. dmabuf->ops->detach(dmabuf, attach);
  632. mutex_unlock(&dmabuf->lock);
  633. kfree(attach);
  634. }
  635. EXPORT_SYMBOL_GPL(dma_buf_detach);
  636. /**
  637. * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
  638. * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
  639. * dma_buf_ops.
  640. * @attach: [in] attachment whose scatterlist is to be returned
  641. * @direction: [in] direction of DMA transfer
  642. *
  643. * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
  644. * on error. May return -EINTR if it is interrupted by a signal.
  645. *
  646. * A mapping must be unmapped again using dma_buf_map_attachment(). Note that
  647. * the underlying backing storage is pinned for as long as a mapping exists,
  648. * therefore users/importers should not hold onto a mapping for undue amounts of
  649. * time.
  650. */
  651. struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
  652. enum dma_data_direction direction)
  653. {
  654. struct sg_table *sg_table;
  655. might_sleep();
  656. if (WARN_ON(!attach || !attach->dmabuf))
  657. return ERR_PTR(-EINVAL);
  658. sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
  659. if (!sg_table)
  660. sg_table = ERR_PTR(-ENOMEM);
  661. return sg_table;
  662. }
  663. EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
  664. /**
  665. * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
  666. * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
  667. * dma_buf_ops.
  668. * @attach: [in] attachment to unmap buffer from
  669. * @sg_table: [in] scatterlist info of the buffer to unmap
  670. * @direction: [in] direction of DMA transfer
  671. *
  672. * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
  673. */
  674. void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
  675. struct sg_table *sg_table,
  676. enum dma_data_direction direction)
  677. {
  678. might_sleep();
  679. if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
  680. return;
  681. attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
  682. direction);
  683. }
  684. EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  685. /**
  686. * DOC: cpu access
  687. *
  688. * There are mutliple reasons for supporting CPU access to a dma buffer object:
  689. *
  690. * - Fallback operations in the kernel, for example when a device is connected
  691. * over USB and the kernel needs to shuffle the data around first before
  692. * sending it away. Cache coherency is handled by braketing any transactions
  693. * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
  694. * access.
  695. *
  696. * To support dma_buf objects residing in highmem cpu access is page-based
  697. * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
  698. * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
  699. * returns a pointer in kernel virtual address space. Afterwards the chunk
  700. * needs to be unmapped again. There is no limit on how often a given chunk
  701. * can be mapped and unmapped, i.e. the importer does not need to call
  702. * begin_cpu_access again before mapping the same chunk again.
  703. *
  704. * Interfaces::
  705. * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
  706. * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
  707. *
  708. * There are also atomic variants of these interfaces. Like for kmap they
  709. * facilitate non-blocking fast-paths. Neither the importer nor the exporter
  710. * (in the callback) is allowed to block when using these.
  711. *
  712. * Interfaces::
  713. * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
  714. * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
  715. *
  716. * For importers all the restrictions of using kmap apply, like the limited
  717. * supply of kmap_atomic slots. Hence an importer shall only hold onto at
  718. * max 2 atomic dma_buf kmaps at the same time (in any given process context).
  719. *
  720. * dma_buf kmap calls outside of the range specified in begin_cpu_access are
  721. * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
  722. * the partial chunks at the beginning and end but may return stale or bogus
  723. * data outside of the range (in these partial chunks).
  724. *
  725. * Note that these calls need to always succeed. The exporter needs to
  726. * complete any preparations that might fail in begin_cpu_access.
  727. *
  728. * For some cases the overhead of kmap can be too high, a vmap interface
  729. * is introduced. This interface should be used very carefully, as vmalloc
  730. * space is a limited resources on many architectures.
  731. *
  732. * Interfaces::
  733. * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
  734. * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
  735. *
  736. * The vmap call can fail if there is no vmap support in the exporter, or if
  737. * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
  738. * that the dma-buf layer keeps a reference count for all vmap access and
  739. * calls down into the exporter's vmap function only when no vmapping exists,
  740. * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
  741. * provided by taking the dma_buf->lock mutex.
  742. *
  743. * - For full compatibility on the importer side with existing userspace
  744. * interfaces, which might already support mmap'ing buffers. This is needed in
  745. * many processing pipelines (e.g. feeding a software rendered image into a
  746. * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
  747. * framework already supported this and for DMA buffer file descriptors to
  748. * replace ION buffers mmap support was needed.
  749. *
  750. * There is no special interfaces, userspace simply calls mmap on the dma-buf
  751. * fd. But like for CPU access there's a need to braket the actual access,
  752. * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
  753. * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
  754. * be restarted.
  755. *
  756. * Some systems might need some sort of cache coherency management e.g. when
  757. * CPU and GPU domains are being accessed through dma-buf at the same time.
  758. * To circumvent this problem there are begin/end coherency markers, that
  759. * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
  760. * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
  761. * sequence would be used like following:
  762. *
  763. * - mmap dma-buf fd
  764. * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
  765. * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
  766. * want (with the new data being consumed by say the GPU or the scanout
  767. * device)
  768. * - munmap once you don't need the buffer any more
  769. *
  770. * For correctness and optimal performance, it is always required to use
  771. * SYNC_START and SYNC_END before and after, respectively, when accessing the
  772. * mapped address. Userspace cannot rely on coherent access, even when there
  773. * are systems where it just works without calling these ioctls.
  774. *
  775. * - And as a CPU fallback in userspace processing pipelines.
  776. *
  777. * Similar to the motivation for kernel cpu access it is again important that
  778. * the userspace code of a given importing subsystem can use the same
  779. * interfaces with a imported dma-buf buffer object as with a native buffer
  780. * object. This is especially important for drm where the userspace part of
  781. * contemporary OpenGL, X, and other drivers is huge, and reworking them to
  782. * use a different way to mmap a buffer rather invasive.
  783. *
  784. * The assumption in the current dma-buf interfaces is that redirecting the
  785. * initial mmap is all that's needed. A survey of some of the existing
  786. * subsystems shows that no driver seems to do any nefarious thing like
  787. * syncing up with outstanding asynchronous processing on the device or
  788. * allocating special resources at fault time. So hopefully this is good
  789. * enough, since adding interfaces to intercept pagefaults and allow pte
  790. * shootdowns would increase the complexity quite a bit.
  791. *
  792. * Interface::
  793. * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
  794. * unsigned long);
  795. *
  796. * If the importing subsystem simply provides a special-purpose mmap call to
  797. * set up a mapping in userspace, calling do_mmap with dma_buf->file will
  798. * equally achieve that for a dma-buf object.
  799. */
  800. static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  801. enum dma_data_direction direction)
  802. {
  803. bool write = (direction == DMA_BIDIRECTIONAL ||
  804. direction == DMA_TO_DEVICE);
  805. struct reservation_object *resv = dmabuf->resv;
  806. long ret;
  807. /* Wait on any implicit rendering fences */
  808. ret = reservation_object_wait_timeout_rcu(resv, write, true,
  809. MAX_SCHEDULE_TIMEOUT);
  810. if (ret < 0)
  811. return ret;
  812. return 0;
  813. }
  814. /**
  815. * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
  816. * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
  817. * preparations. Coherency is only guaranteed in the specified range for the
  818. * specified access direction.
  819. * @dmabuf: [in] buffer to prepare cpu access for.
  820. * @direction: [in] length of range for cpu access.
  821. *
  822. * After the cpu access is complete the caller should call
  823. * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
  824. * it guaranteed to be coherent with other DMA access.
  825. *
  826. * Can return negative error values, returns 0 on success.
  827. */
  828. int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  829. enum dma_data_direction direction)
  830. {
  831. int ret = 0;
  832. if (WARN_ON(!dmabuf))
  833. return -EINVAL;
  834. if (dmabuf->ops->begin_cpu_access)
  835. ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
  836. /* Ensure that all fences are waited upon - but we first allow
  837. * the native handler the chance to do so more efficiently if it
  838. * chooses. A double invocation here will be reasonably cheap no-op.
  839. */
  840. if (ret == 0)
  841. ret = __dma_buf_begin_cpu_access(dmabuf, direction);
  842. return ret;
  843. }
  844. EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
  845. /**
  846. * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
  847. * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
  848. * actions. Coherency is only guaranteed in the specified range for the
  849. * specified access direction.
  850. * @dmabuf: [in] buffer to complete cpu access for.
  851. * @direction: [in] length of range for cpu access.
  852. *
  853. * This terminates CPU access started with dma_buf_begin_cpu_access().
  854. *
  855. * Can return negative error values, returns 0 on success.
  856. */
  857. int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
  858. enum dma_data_direction direction)
  859. {
  860. int ret = 0;
  861. WARN_ON(!dmabuf);
  862. if (dmabuf->ops->end_cpu_access)
  863. ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
  864. return ret;
  865. }
  866. EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
  867. /**
  868. * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
  869. * space. The same restrictions as for kmap_atomic and friends apply.
  870. * @dmabuf: [in] buffer to map page from.
  871. * @page_num: [in] page in PAGE_SIZE units to map.
  872. *
  873. * This call must always succeed, any necessary preparations that might fail
  874. * need to be done in begin_cpu_access.
  875. */
  876. void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
  877. {
  878. WARN_ON(!dmabuf);
  879. return dmabuf->ops->map_atomic(dmabuf, page_num);
  880. }
  881. EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
  882. /**
  883. * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
  884. * @dmabuf: [in] buffer to unmap page from.
  885. * @page_num: [in] page in PAGE_SIZE units to unmap.
  886. * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
  887. *
  888. * This call must always succeed.
  889. */
  890. void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
  891. void *vaddr)
  892. {
  893. WARN_ON(!dmabuf);
  894. if (dmabuf->ops->unmap_atomic)
  895. dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
  896. }
  897. EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
  898. /**
  899. * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
  900. * same restrictions as for kmap and friends apply.
  901. * @dmabuf: [in] buffer to map page from.
  902. * @page_num: [in] page in PAGE_SIZE units to map.
  903. *
  904. * This call must always succeed, any necessary preparations that might fail
  905. * need to be done in begin_cpu_access.
  906. */
  907. void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
  908. {
  909. WARN_ON(!dmabuf);
  910. return dmabuf->ops->map(dmabuf, page_num);
  911. }
  912. EXPORT_SYMBOL_GPL(dma_buf_kmap);
  913. /**
  914. * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
  915. * @dmabuf: [in] buffer to unmap page from.
  916. * @page_num: [in] page in PAGE_SIZE units to unmap.
  917. * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
  918. *
  919. * This call must always succeed.
  920. */
  921. void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
  922. void *vaddr)
  923. {
  924. WARN_ON(!dmabuf);
  925. if (dmabuf->ops->unmap)
  926. dmabuf->ops->unmap(dmabuf, page_num, vaddr);
  927. }
  928. EXPORT_SYMBOL_GPL(dma_buf_kunmap);
  929. /**
  930. * dma_buf_mmap - Setup up a userspace mmap with the given vma
  931. * @dmabuf: [in] buffer that should back the vma
  932. * @vma: [in] vma for the mmap
  933. * @pgoff: [in] offset in pages where this mmap should start within the
  934. * dma-buf buffer.
  935. *
  936. * This function adjusts the passed in vma so that it points at the file of the
  937. * dma_buf operation. It also adjusts the starting pgoff and does bounds
  938. * checking on the size of the vma. Then it calls the exporters mmap function to
  939. * set up the mapping.
  940. *
  941. * Can return negative error values, returns 0 on success.
  942. */
  943. int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
  944. unsigned long pgoff)
  945. {
  946. struct file *oldfile;
  947. int ret;
  948. if (WARN_ON(!dmabuf || !vma))
  949. return -EINVAL;
  950. /* check for offset overflow */
  951. if (pgoff + vma_pages(vma) < pgoff)
  952. return -EOVERFLOW;
  953. /* check for overflowing the buffer's size */
  954. if (pgoff + vma_pages(vma) >
  955. dmabuf->size >> PAGE_SHIFT)
  956. return -EINVAL;
  957. /* readjust the vma */
  958. get_file(dmabuf->file);
  959. oldfile = vma->vm_file;
  960. vma->vm_file = dmabuf->file;
  961. vma->vm_pgoff = pgoff;
  962. ret = dmabuf->ops->mmap(dmabuf, vma);
  963. if (ret) {
  964. /* restore old parameters on failure */
  965. vma->vm_file = oldfile;
  966. fput(dmabuf->file);
  967. } else {
  968. if (oldfile)
  969. fput(oldfile);
  970. }
  971. return ret;
  972. }
  973. EXPORT_SYMBOL_GPL(dma_buf_mmap);
  974. /**
  975. * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
  976. * address space. Same restrictions as for vmap and friends apply.
  977. * @dmabuf: [in] buffer to vmap
  978. *
  979. * This call may fail due to lack of virtual mapping address space.
  980. * These calls are optional in drivers. The intended use for them
  981. * is for mapping objects linear in kernel space for high use objects.
  982. * Please attempt to use kmap/kunmap before thinking about these interfaces.
  983. *
  984. * Returns NULL on error.
  985. */
  986. void *dma_buf_vmap(struct dma_buf *dmabuf)
  987. {
  988. void *ptr;
  989. if (WARN_ON(!dmabuf))
  990. return NULL;
  991. if (!dmabuf->ops->vmap)
  992. return NULL;
  993. mutex_lock(&dmabuf->lock);
  994. if (dmabuf->vmapping_counter) {
  995. dmabuf->vmapping_counter++;
  996. BUG_ON(!dmabuf->vmap_ptr);
  997. ptr = dmabuf->vmap_ptr;
  998. goto out_unlock;
  999. }
  1000. BUG_ON(dmabuf->vmap_ptr);
  1001. ptr = dmabuf->ops->vmap(dmabuf);
  1002. if (WARN_ON_ONCE(IS_ERR(ptr)))
  1003. ptr = NULL;
  1004. if (!ptr)
  1005. goto out_unlock;
  1006. dmabuf->vmap_ptr = ptr;
  1007. dmabuf->vmapping_counter = 1;
  1008. out_unlock:
  1009. mutex_unlock(&dmabuf->lock);
  1010. return ptr;
  1011. }
  1012. EXPORT_SYMBOL_GPL(dma_buf_vmap);
  1013. /**
  1014. * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
  1015. * @dmabuf: [in] buffer to vunmap
  1016. * @vaddr: [in] vmap to vunmap
  1017. */
  1018. void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
  1019. {
  1020. if (WARN_ON(!dmabuf))
  1021. return;
  1022. BUG_ON(!dmabuf->vmap_ptr);
  1023. BUG_ON(dmabuf->vmapping_counter == 0);
  1024. BUG_ON(dmabuf->vmap_ptr != vaddr);
  1025. mutex_lock(&dmabuf->lock);
  1026. if (--dmabuf->vmapping_counter == 0) {
  1027. if (dmabuf->ops->vunmap)
  1028. dmabuf->ops->vunmap(dmabuf, vaddr);
  1029. dmabuf->vmap_ptr = NULL;
  1030. }
  1031. mutex_unlock(&dmabuf->lock);
  1032. }
  1033. EXPORT_SYMBOL_GPL(dma_buf_vunmap);
  1034. #ifdef CONFIG_DEBUG_FS
  1035. static int dma_buf_debug_show(struct seq_file *s, void *unused)
  1036. {
  1037. int ret;
  1038. struct dma_buf *buf_obj;
  1039. struct dma_buf_attachment *attach_obj;
  1040. struct reservation_object *robj;
  1041. struct reservation_object_list *fobj;
  1042. struct dma_fence *fence;
  1043. unsigned seq;
  1044. int count = 0, attach_count, shared_count, i;
  1045. size_t size = 0;
  1046. kasan_disable_current();
  1047. ret = mutex_lock_interruptible(&db_list.lock);
  1048. if (ret)
  1049. return ret;
  1050. seq_puts(s, "\nDma-buf Objects:\n");
  1051. seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
  1052. "size", "flags", "mode", "count", "ino");
  1053. list_for_each_entry(buf_obj, &db_list.head, list_node) {
  1054. ret = mutex_lock_interruptible(&buf_obj->lock);
  1055. if (ret) {
  1056. seq_puts(s,
  1057. "\tERROR locking buffer object: skipping\n");
  1058. continue;
  1059. }
  1060. if (file_inode(buf_obj->file) == NULL)
  1061. continue;
  1062. seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
  1063. buf_obj->size,
  1064. buf_obj->file->f_flags, buf_obj->file->f_mode,
  1065. file_count(buf_obj->file),
  1066. buf_obj->exp_name,
  1067. file_inode(buf_obj->file)->i_ino,
  1068. buf_obj->name ?: "");
  1069. robj = buf_obj->resv;
  1070. while (true) {
  1071. seq = read_seqcount_begin(&robj->seq);
  1072. rcu_read_lock();
  1073. fobj = rcu_dereference(robj->fence);
  1074. shared_count = fobj ? fobj->shared_count : 0;
  1075. fence = rcu_dereference(robj->fence_excl);
  1076. if (!read_seqcount_retry(&robj->seq, seq))
  1077. break;
  1078. rcu_read_unlock();
  1079. }
  1080. if (fence)
  1081. seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
  1082. fence->ops->get_driver_name(fence),
  1083. fence->ops->get_timeline_name(fence),
  1084. dma_fence_is_signaled(fence) ? "" : "un");
  1085. for (i = 0; i < shared_count; i++) {
  1086. fence = rcu_dereference(fobj->shared[i]);
  1087. if (!dma_fence_get_rcu(fence))
  1088. continue;
  1089. seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
  1090. fence->ops->get_driver_name(fence),
  1091. fence->ops->get_timeline_name(fence),
  1092. dma_fence_is_signaled(fence) ? "" : "un");
  1093. dma_fence_put(fence);
  1094. }
  1095. rcu_read_unlock();
  1096. seq_puts(s, "\tAttached Devices:\n");
  1097. attach_count = 0;
  1098. list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
  1099. seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
  1100. attach_count++;
  1101. }
  1102. seq_printf(s, "Total %d devices attached\n\n",
  1103. attach_count);
  1104. count++;
  1105. size += buf_obj->size;
  1106. mutex_unlock(&buf_obj->lock);
  1107. }
  1108. seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
  1109. mutex_unlock(&db_list.lock);
  1110. kasan_enable_current();
  1111. return 0;
  1112. }
  1113. static int dma_buf_debug_open(struct inode *inode, struct file *file)
  1114. {
  1115. return single_open(file, dma_buf_debug_show, NULL);
  1116. }
  1117. static const struct file_operations dma_buf_debug_fops = {
  1118. .open = dma_buf_debug_open,
  1119. .read = seq_read,
  1120. .llseek = seq_lseek,
  1121. .release = single_release,
  1122. };
  1123. static struct dentry *dma_buf_debugfs_dir;
  1124. static int dma_buf_init_debugfs(void)
  1125. {
  1126. struct dentry *d;
  1127. int err = 0;
  1128. d = debugfs_create_dir("dma_buf", NULL);
  1129. if (IS_ERR(d))
  1130. return PTR_ERR(d);
  1131. dma_buf_debugfs_dir = d;
  1132. d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
  1133. NULL, &dma_buf_debug_fops);
  1134. if (IS_ERR(d)) {
  1135. pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
  1136. debugfs_remove_recursive(dma_buf_debugfs_dir);
  1137. dma_buf_debugfs_dir = NULL;
  1138. err = PTR_ERR(d);
  1139. }
  1140. return err;
  1141. }
  1142. static void dma_buf_uninit_debugfs(void)
  1143. {
  1144. if (dma_buf_debugfs_dir)
  1145. debugfs_remove_recursive(dma_buf_debugfs_dir);
  1146. }
  1147. #else
  1148. static inline int dma_buf_init_debugfs(void)
  1149. {
  1150. return 0;
  1151. }
  1152. static inline void dma_buf_uninit_debugfs(void)
  1153. {
  1154. }
  1155. #endif
  1156. static int __init dma_buf_init(void)
  1157. {
  1158. dma_buf_mnt = kern_mount(&dma_buf_fs_type);
  1159. if (IS_ERR(dma_buf_mnt))
  1160. return PTR_ERR(dma_buf_mnt);
  1161. mutex_init(&db_list.lock);
  1162. INIT_LIST_HEAD(&db_list.head);
  1163. dma_buf_init_debugfs();
  1164. return 0;
  1165. }
  1166. subsys_initcall(dma_buf_init);
  1167. static void __exit dma_buf_deinit(void)
  1168. {
  1169. dma_buf_uninit_debugfs();
  1170. kern_unmount(dma_buf_mnt);
  1171. }
  1172. __exitcall(dma_buf_deinit);