udl_gem.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * Copyright (C) 2012 Red Hat
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License v2. See the file COPYING in the main directory of this archive for
  6. * more details.
  7. */
  8. #include <drm/drmP.h>
  9. #include "udl_drv.h"
  10. #include <linux/shmem_fs.h>
  11. #include <linux/dma-buf.h>
  12. struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
  13. size_t size)
  14. {
  15. struct udl_gem_object *obj;
  16. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  17. if (obj == NULL)
  18. return NULL;
  19. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  20. kfree(obj);
  21. return NULL;
  22. }
  23. obj->flags = UDL_BO_CACHEABLE;
  24. return obj;
  25. }
  26. static int
  27. udl_gem_create(struct drm_file *file,
  28. struct drm_device *dev,
  29. uint64_t size,
  30. uint32_t *handle_p)
  31. {
  32. struct udl_gem_object *obj;
  33. int ret;
  34. u32 handle;
  35. size = roundup(size, PAGE_SIZE);
  36. obj = udl_gem_alloc_object(dev, size);
  37. if (obj == NULL)
  38. return -ENOMEM;
  39. ret = drm_gem_handle_create(file, &obj->base, &handle);
  40. if (ret) {
  41. drm_gem_object_release(&obj->base);
  42. kfree(obj);
  43. return ret;
  44. }
  45. drm_gem_object_put_unlocked(&obj->base);
  46. *handle_p = handle;
  47. return 0;
  48. }
  49. static void update_vm_cache_attr(struct udl_gem_object *obj,
  50. struct vm_area_struct *vma)
  51. {
  52. DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  53. /* non-cacheable as default. */
  54. if (obj->flags & UDL_BO_CACHEABLE) {
  55. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  56. } else if (obj->flags & UDL_BO_WC) {
  57. vma->vm_page_prot =
  58. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  59. } else {
  60. vma->vm_page_prot =
  61. pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  62. }
  63. }
  64. int udl_dumb_create(struct drm_file *file,
  65. struct drm_device *dev,
  66. struct drm_mode_create_dumb *args)
  67. {
  68. args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
  69. args->size = args->pitch * args->height;
  70. return udl_gem_create(file, dev,
  71. args->size, &args->handle);
  72. }
  73. int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  74. {
  75. int ret;
  76. ret = drm_gem_mmap(filp, vma);
  77. if (ret)
  78. return ret;
  79. vma->vm_flags &= ~VM_PFNMAP;
  80. vma->vm_flags |= VM_MIXEDMAP;
  81. update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
  82. return ret;
  83. }
  84. vm_fault_t udl_gem_fault(struct vm_fault *vmf)
  85. {
  86. struct vm_area_struct *vma = vmf->vma;
  87. struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
  88. struct page *page;
  89. unsigned int page_offset;
  90. page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  91. if (!obj->pages)
  92. return VM_FAULT_SIGBUS;
  93. page = obj->pages[page_offset];
  94. return vmf_insert_page(vma, vmf->address, page);
  95. }
  96. int udl_gem_get_pages(struct udl_gem_object *obj)
  97. {
  98. struct page **pages;
  99. if (obj->pages)
  100. return 0;
  101. pages = drm_gem_get_pages(&obj->base);
  102. if (IS_ERR(pages))
  103. return PTR_ERR(pages);
  104. obj->pages = pages;
  105. return 0;
  106. }
  107. void udl_gem_put_pages(struct udl_gem_object *obj)
  108. {
  109. if (obj->base.import_attach) {
  110. kvfree(obj->pages);
  111. obj->pages = NULL;
  112. return;
  113. }
  114. drm_gem_put_pages(&obj->base, obj->pages, false, false);
  115. obj->pages = NULL;
  116. }
  117. int udl_gem_vmap(struct udl_gem_object *obj)
  118. {
  119. int page_count = obj->base.size / PAGE_SIZE;
  120. int ret;
  121. if (obj->base.import_attach) {
  122. obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
  123. if (!obj->vmapping)
  124. return -ENOMEM;
  125. return 0;
  126. }
  127. ret = udl_gem_get_pages(obj);
  128. if (ret)
  129. return ret;
  130. obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
  131. if (!obj->vmapping)
  132. return -ENOMEM;
  133. return 0;
  134. }
  135. void udl_gem_vunmap(struct udl_gem_object *obj)
  136. {
  137. if (obj->base.import_attach) {
  138. dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
  139. return;
  140. }
  141. vunmap(obj->vmapping);
  142. udl_gem_put_pages(obj);
  143. }
  144. void udl_gem_free_object(struct drm_gem_object *gem_obj)
  145. {
  146. struct udl_gem_object *obj = to_udl_bo(gem_obj);
  147. if (obj->vmapping)
  148. udl_gem_vunmap(obj);
  149. if (gem_obj->import_attach) {
  150. drm_prime_gem_destroy(gem_obj, obj->sg);
  151. put_device(gem_obj->dev->dev);
  152. }
  153. if (obj->pages)
  154. udl_gem_put_pages(obj);
  155. drm_gem_free_mmap_offset(gem_obj);
  156. }
  157. /* the dumb interface doesn't work with the GEM straight MMAP
  158. interface, it expects to do MMAP on the drm fd, like normal */
  159. int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
  160. uint32_t handle, uint64_t *offset)
  161. {
  162. struct udl_gem_object *gobj;
  163. struct drm_gem_object *obj;
  164. struct udl_device *udl = to_udl(dev);
  165. int ret = 0;
  166. mutex_lock(&udl->gem_lock);
  167. obj = drm_gem_object_lookup(file, handle);
  168. if (obj == NULL) {
  169. ret = -ENOENT;
  170. goto unlock;
  171. }
  172. gobj = to_udl_bo(obj);
  173. ret = udl_gem_get_pages(gobj);
  174. if (ret)
  175. goto out;
  176. ret = drm_gem_create_mmap_offset(obj);
  177. if (ret)
  178. goto out;
  179. *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
  180. out:
  181. drm_gem_object_put_unlocked(&gobj->base);
  182. unlock:
  183. mutex_unlock(&udl->gem_lock);
  184. return ret;
  185. }