drm_gem_cma_helper.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. /*
  2. * drm gem CMA (contiguous memory allocator) helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. *
  6. * Based on Samsung Exynos code
  7. *
  8. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/slab.h>
  21. #include <linux/mutex.h>
  22. #include <linux/export.h>
  23. #include <linux/dma-buf.h>
  24. #include <linux/dma-mapping.h>
  25. #include <drm/drmP.h>
  26. #include <drm/drm.h>
  27. #include <drm/drm_gem_cma_helper.h>
  28. #include <drm/drm_vma_manager.h>
  29. /**
  30. * DOC: cma helpers
  31. *
  32. * The Contiguous Memory Allocator reserves a pool of memory at early boot
  33. * that is used to service requests for large blocks of contiguous memory.
  34. *
  35. * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
  36. * objects that are physically contiguous in memory. This is useful for
  37. * display drivers that are unable to map scattered buffers via an IOMMU.
  38. */
  39. /**
  40. * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
  41. * @drm: DRM device
  42. * @size: size of the object to allocate
  43. *
  44. * This function creates and initializes a GEM CMA object of the given size,
  45. * but doesn't allocate any memory to back the object.
  46. *
  47. * Returns:
  48. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  49. * error code on failure.
  50. */
  51. static struct drm_gem_cma_object *
  52. __drm_gem_cma_create(struct drm_device *drm, size_t size)
  53. {
  54. struct drm_gem_cma_object *cma_obj;
  55. struct drm_gem_object *gem_obj;
  56. int ret;
  57. if (drm->driver->gem_create_object)
  58. gem_obj = drm->driver->gem_create_object(drm, size);
  59. else
  60. gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
  61. if (!gem_obj)
  62. return ERR_PTR(-ENOMEM);
  63. cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
  64. ret = drm_gem_object_init(drm, gem_obj, size);
  65. if (ret)
  66. goto error;
  67. ret = drm_gem_create_mmap_offset(gem_obj);
  68. if (ret) {
  69. drm_gem_object_release(gem_obj);
  70. goto error;
  71. }
  72. return cma_obj;
  73. error:
  74. kfree(cma_obj);
  75. return ERR_PTR(ret);
  76. }
  77. /**
  78. * drm_gem_cma_create - allocate an object with the given size
  79. * @drm: DRM device
  80. * @size: size of the object to allocate
  81. *
  82. * This function creates a CMA GEM object and allocates a contiguous chunk of
  83. * memory as backing store. The backing memory has the writecombine attribute
  84. * set.
  85. *
  86. * Returns:
  87. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  88. * error code on failure.
  89. */
  90. struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
  91. size_t size)
  92. {
  93. struct drm_gem_cma_object *cma_obj;
  94. int ret;
  95. size = round_up(size, PAGE_SIZE);
  96. cma_obj = __drm_gem_cma_create(drm, size);
  97. if (IS_ERR(cma_obj))
  98. return cma_obj;
  99. cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
  100. GFP_KERNEL | __GFP_NOWARN);
  101. if (!cma_obj->vaddr) {
  102. dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n",
  103. size);
  104. ret = -ENOMEM;
  105. goto error;
  106. }
  107. return cma_obj;
  108. error:
  109. drm_gem_object_put_unlocked(&cma_obj->base);
  110. return ERR_PTR(ret);
  111. }
  112. EXPORT_SYMBOL_GPL(drm_gem_cma_create);
  113. /**
  114. * drm_gem_cma_create_with_handle - allocate an object with the given size and
  115. * return a GEM handle to it
  116. * @file_priv: DRM file-private structure to register the handle for
  117. * @drm: DRM device
  118. * @size: size of the object to allocate
  119. * @handle: return location for the GEM handle
  120. *
  121. * This function creates a CMA GEM object, allocating a physically contiguous
  122. * chunk of memory as backing store. The GEM object is then added to the list
  123. * of object associated with the given file and a handle to it is returned.
  124. *
  125. * Returns:
  126. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  127. * error code on failure.
  128. */
  129. static struct drm_gem_cma_object *
  130. drm_gem_cma_create_with_handle(struct drm_file *file_priv,
  131. struct drm_device *drm, size_t size,
  132. uint32_t *handle)
  133. {
  134. struct drm_gem_cma_object *cma_obj;
  135. struct drm_gem_object *gem_obj;
  136. int ret;
  137. cma_obj = drm_gem_cma_create(drm, size);
  138. if (IS_ERR(cma_obj))
  139. return cma_obj;
  140. gem_obj = &cma_obj->base;
  141. /*
  142. * allocate a id of idr table where the obj is registered
  143. * and handle has the id what user can see.
  144. */
  145. ret = drm_gem_handle_create(file_priv, gem_obj, handle);
  146. /* drop reference from allocate - handle holds it now. */
  147. drm_gem_object_put_unlocked(gem_obj);
  148. if (ret)
  149. return ERR_PTR(ret);
  150. return cma_obj;
  151. }
  152. /**
  153. * drm_gem_cma_free_object - free resources associated with a CMA GEM object
  154. * @gem_obj: GEM object to free
  155. *
  156. * This function frees the backing memory of the CMA GEM object, cleans up the
  157. * GEM object state and frees the memory used to store the object itself.
  158. * Drivers using the CMA helpers should set this as their
  159. * &drm_driver.gem_free_object_unlocked callback.
  160. */
  161. void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
  162. {
  163. struct drm_gem_cma_object *cma_obj;
  164. cma_obj = to_drm_gem_cma_obj(gem_obj);
  165. if (cma_obj->vaddr) {
  166. dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
  167. cma_obj->vaddr, cma_obj->paddr);
  168. } else if (gem_obj->import_attach) {
  169. drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
  170. }
  171. drm_gem_object_release(gem_obj);
  172. kfree(cma_obj);
  173. }
  174. EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
  175. /**
  176. * drm_gem_cma_dumb_create_internal - create a dumb buffer object
  177. * @file_priv: DRM file-private structure to create the dumb buffer for
  178. * @drm: DRM device
  179. * @args: IOCTL data
  180. *
  181. * This aligns the pitch and size arguments to the minimum required. This is
  182. * an internal helper that can be wrapped by a driver to account for hardware
  183. * with more specific alignment requirements. It should not be used directly
  184. * as their &drm_driver.dumb_create callback.
  185. *
  186. * Returns:
  187. * 0 on success or a negative error code on failure.
  188. */
  189. int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
  190. struct drm_device *drm,
  191. struct drm_mode_create_dumb *args)
  192. {
  193. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  194. struct drm_gem_cma_object *cma_obj;
  195. if (args->pitch < min_pitch)
  196. args->pitch = min_pitch;
  197. if (args->size < args->pitch * args->height)
  198. args->size = args->pitch * args->height;
  199. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  200. &args->handle);
  201. return PTR_ERR_OR_ZERO(cma_obj);
  202. }
  203. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
  204. /**
  205. * drm_gem_cma_dumb_create - create a dumb buffer object
  206. * @file_priv: DRM file-private structure to create the dumb buffer for
  207. * @drm: DRM device
  208. * @args: IOCTL data
  209. *
  210. * This function computes the pitch of the dumb buffer and rounds it up to an
  211. * integer number of bytes per pixel. Drivers for hardware that doesn't have
  212. * any additional restrictions on the pitch can directly use this function as
  213. * their &drm_driver.dumb_create callback.
  214. *
  215. * For hardware with additional restrictions, drivers can adjust the fields
  216. * set up by userspace and pass the IOCTL data along to the
  217. * drm_gem_cma_dumb_create_internal() function.
  218. *
  219. * Returns:
  220. * 0 on success or a negative error code on failure.
  221. */
  222. int drm_gem_cma_dumb_create(struct drm_file *file_priv,
  223. struct drm_device *drm,
  224. struct drm_mode_create_dumb *args)
  225. {
  226. struct drm_gem_cma_object *cma_obj;
  227. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  228. args->size = args->pitch * args->height;
  229. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  230. &args->handle);
  231. return PTR_ERR_OR_ZERO(cma_obj);
  232. }
  233. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
  234. const struct vm_operations_struct drm_gem_cma_vm_ops = {
  235. .open = drm_gem_vm_open,
  236. .close = drm_gem_vm_close,
  237. };
  238. EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
  239. static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
  240. struct vm_area_struct *vma)
  241. {
  242. int ret;
  243. /*
  244. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
  245. * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
  246. * the whole buffer.
  247. */
  248. vma->vm_flags &= ~VM_PFNMAP;
  249. vma->vm_pgoff = 0;
  250. ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
  251. cma_obj->paddr, vma->vm_end - vma->vm_start);
  252. if (ret)
  253. drm_gem_vm_close(vma);
  254. return ret;
  255. }
  256. /**
  257. * drm_gem_cma_mmap - memory-map a CMA GEM object
  258. * @filp: file object
  259. * @vma: VMA for the area to be mapped
  260. *
  261. * This function implements an augmented version of the GEM DRM file mmap
  262. * operation for CMA objects: In addition to the usual GEM VMA setup it
  263. * immediately faults in the entire object instead of using on-demaind
  264. * faulting. Drivers which employ the CMA helpers should use this function
  265. * as their ->mmap() handler in the DRM device file's file_operations
  266. * structure.
  267. *
  268. * Instead of directly referencing this function, drivers should use the
  269. * DEFINE_DRM_GEM_CMA_FOPS().macro.
  270. *
  271. * Returns:
  272. * 0 on success or a negative error code on failure.
  273. */
  274. int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
  275. {
  276. struct drm_gem_cma_object *cma_obj;
  277. struct drm_gem_object *gem_obj;
  278. int ret;
  279. ret = drm_gem_mmap(filp, vma);
  280. if (ret)
  281. return ret;
  282. gem_obj = vma->vm_private_data;
  283. cma_obj = to_drm_gem_cma_obj(gem_obj);
  284. return drm_gem_cma_mmap_obj(cma_obj, vma);
  285. }
  286. EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
  287. #ifndef CONFIG_MMU
  288. /**
  289. * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
  290. * @filp: file object
  291. * @addr: memory address
  292. * @len: buffer size
  293. * @pgoff: page offset
  294. * @flags: memory flags
  295. *
  296. * This function is used in noMMU platforms to propose address mapping
  297. * for a given buffer.
  298. * It's intended to be used as a direct handler for the struct
  299. * &file_operations.get_unmapped_area operation.
  300. *
  301. * Returns:
  302. * mapping address on success or a negative error code on failure.
  303. */
  304. unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
  305. unsigned long addr,
  306. unsigned long len,
  307. unsigned long pgoff,
  308. unsigned long flags)
  309. {
  310. struct drm_gem_cma_object *cma_obj;
  311. struct drm_gem_object *obj = NULL;
  312. struct drm_file *priv = filp->private_data;
  313. struct drm_device *dev = priv->minor->dev;
  314. struct drm_vma_offset_node *node;
  315. if (drm_dev_is_unplugged(dev))
  316. return -ENODEV;
  317. drm_vma_offset_lock_lookup(dev->vma_offset_manager);
  318. node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
  319. pgoff,
  320. len >> PAGE_SHIFT);
  321. if (likely(node)) {
  322. obj = container_of(node, struct drm_gem_object, vma_node);
  323. /*
  324. * When the object is being freed, after it hits 0-refcnt it
  325. * proceeds to tear down the object. In the process it will
  326. * attempt to remove the VMA offset and so acquire this
  327. * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
  328. * that matches our range, we know it is in the process of being
  329. * destroyed and will be freed as soon as we release the lock -
  330. * so we have to check for the 0-refcnted object and treat it as
  331. * invalid.
  332. */
  333. if (!kref_get_unless_zero(&obj->refcount))
  334. obj = NULL;
  335. }
  336. drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
  337. if (!obj)
  338. return -EINVAL;
  339. if (!drm_vma_node_is_allowed(node, priv)) {
  340. drm_gem_object_put_unlocked(obj);
  341. return -EACCES;
  342. }
  343. cma_obj = to_drm_gem_cma_obj(obj);
  344. drm_gem_object_put_unlocked(obj);
  345. return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
  346. }
  347. EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
  348. #endif
  349. /**
  350. * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
  351. * @p: DRM printer
  352. * @indent: Tab indentation level
  353. * @obj: GEM object
  354. *
  355. * This function can be used as the &drm_driver->gem_print_info callback.
  356. * It prints paddr and vaddr for use in e.g. debugfs output.
  357. */
  358. void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
  359. const struct drm_gem_object *obj)
  360. {
  361. const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  362. drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
  363. drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
  364. }
  365. EXPORT_SYMBOL(drm_gem_cma_print_info);
  366. /**
  367. * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
  368. * pages for a CMA GEM object
  369. * @obj: GEM object
  370. *
  371. * This function exports a scatter/gather table suitable for PRIME usage by
  372. * calling the standard DMA mapping API. Drivers using the CMA helpers should
  373. * set this as their &drm_driver.gem_prime_get_sg_table callback.
  374. *
  375. * Returns:
  376. * A pointer to the scatter/gather table of pinned pages or NULL on failure.
  377. */
  378. struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
  379. {
  380. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  381. struct sg_table *sgt;
  382. int ret;
  383. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  384. if (!sgt)
  385. return NULL;
  386. ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
  387. cma_obj->paddr, obj->size);
  388. if (ret < 0)
  389. goto out;
  390. return sgt;
  391. out:
  392. kfree(sgt);
  393. return NULL;
  394. }
  395. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
  396. /**
  397. * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
  398. * driver's scatter/gather table of pinned pages
  399. * @dev: device to import into
  400. * @attach: DMA-BUF attachment
  401. * @sgt: scatter/gather table of pinned pages
  402. *
  403. * This function imports a scatter/gather table exported via DMA-BUF by
  404. * another driver. Imported buffers must be physically contiguous in memory
  405. * (i.e. the scatter/gather table must contain a single entry). Drivers that
  406. * use the CMA helpers should set this as their
  407. * &drm_driver.gem_prime_import_sg_table callback.
  408. *
  409. * Returns:
  410. * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
  411. * error code on failure.
  412. */
  413. struct drm_gem_object *
  414. drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
  415. struct dma_buf_attachment *attach,
  416. struct sg_table *sgt)
  417. {
  418. struct drm_gem_cma_object *cma_obj;
  419. if (sgt->nents != 1) {
  420. /* check if the entries in the sg_table are contiguous */
  421. dma_addr_t next_addr = sg_dma_address(sgt->sgl);
  422. struct scatterlist *s;
  423. unsigned int i;
  424. for_each_sg(sgt->sgl, s, sgt->nents, i) {
  425. /*
  426. * sg_dma_address(s) is only valid for entries
  427. * that have sg_dma_len(s) != 0
  428. */
  429. if (!sg_dma_len(s))
  430. continue;
  431. if (sg_dma_address(s) != next_addr)
  432. return ERR_PTR(-EINVAL);
  433. next_addr = sg_dma_address(s) + sg_dma_len(s);
  434. }
  435. }
  436. /* Create a CMA GEM buffer. */
  437. cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
  438. if (IS_ERR(cma_obj))
  439. return ERR_CAST(cma_obj);
  440. cma_obj->paddr = sg_dma_address(sgt->sgl);
  441. cma_obj->sgt = sgt;
  442. DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
  443. return &cma_obj->base;
  444. }
  445. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
  446. /**
  447. * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
  448. * @obj: GEM object
  449. * @vma: VMA for the area to be mapped
  450. *
  451. * This function maps a buffer imported via DRM PRIME into a userspace
  452. * process's address space. Drivers that use the CMA helpers should set this
  453. * as their &drm_driver.gem_prime_mmap callback.
  454. *
  455. * Returns:
  456. * 0 on success or a negative error code on failure.
  457. */
  458. int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
  459. struct vm_area_struct *vma)
  460. {
  461. struct drm_gem_cma_object *cma_obj;
  462. int ret;
  463. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  464. if (ret < 0)
  465. return ret;
  466. cma_obj = to_drm_gem_cma_obj(obj);
  467. return drm_gem_cma_mmap_obj(cma_obj, vma);
  468. }
  469. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
  470. /**
  471. * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
  472. * address space
  473. * @obj: GEM object
  474. *
  475. * This function maps a buffer exported via DRM PRIME into the kernel's
  476. * virtual address space. Since the CMA buffers are already mapped into the
  477. * kernel virtual address space this simply returns the cached virtual
  478. * address. Drivers using the CMA helpers should set this as their DRM
  479. * driver's &drm_driver.gem_prime_vmap callback.
  480. *
  481. * Returns:
  482. * The kernel virtual address of the CMA GEM object's backing store.
  483. */
  484. void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
  485. {
  486. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  487. return cma_obj->vaddr;
  488. }
  489. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
  490. /**
  491. * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
  492. * address space
  493. * @obj: GEM object
  494. * @vaddr: kernel virtual address where the CMA GEM object was mapped
  495. *
  496. * This function removes a buffer exported via DRM PRIME from the kernel's
  497. * virtual address space. This is a no-op because CMA buffers cannot be
  498. * unmapped from kernel space. Drivers using the CMA helpers should set this
  499. * as their &drm_driver.gem_prime_vunmap callback.
  500. */
  501. void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  502. {
  503. /* Nothing to do */
  504. }
  505. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);