fb_defio.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * linux/drivers/video/fb_defio.c
  3. *
  4. * Copyright (C) 2006 Jaya Kumar
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/string.h>
  14. #include <linux/mm.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/delay.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/fb.h>
  19. #include <linux/list.h>
  20. /* to support deferred IO */
  21. #include <linux/rmap.h>
  22. #include <linux/pagemap.h>
  23. static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
  24. {
  25. void *screen_base = (void __force *) info->screen_base;
  26. struct page *page;
  27. if (is_vmalloc_addr(screen_base + offs))
  28. page = vmalloc_to_page(screen_base + offs);
  29. else
  30. page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
  31. return page;
  32. }
  33. /* this is to find and return the vmalloc-ed fb pages */
  34. static int fb_deferred_io_fault(struct vm_area_struct *vma,
  35. struct vm_fault *vmf)
  36. {
  37. unsigned long offset;
  38. struct page *page;
  39. struct fb_info *info = vma->vm_private_data;
  40. offset = vmf->pgoff << PAGE_SHIFT;
  41. if (offset >= info->fix.smem_len)
  42. return VM_FAULT_SIGBUS;
  43. page = fb_deferred_io_page(info, offset);
  44. if (!page)
  45. return VM_FAULT_SIGBUS;
  46. get_page(page);
  47. if (vma->vm_file)
  48. page->mapping = vma->vm_file->f_mapping;
  49. else
  50. printk(KERN_ERR "no mapping available\n");
  51. BUG_ON(!page->mapping);
  52. page->index = vmf->pgoff;
  53. vmf->page = page;
  54. return 0;
  55. }
  56. int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  57. {
  58. struct fb_info *info = file->private_data;
  59. struct inode *inode = file_inode(file);
  60. int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
  61. if (err)
  62. return err;
  63. /* Skip if deferred io is compiled-in but disabled on this fbdev */
  64. if (!info->fbdefio)
  65. return 0;
  66. mutex_lock(&inode->i_mutex);
  67. /* Kill off the delayed work */
  68. cancel_delayed_work_sync(&info->deferred_work);
  69. /* Run it immediately */
  70. schedule_delayed_work(&info->deferred_work, 0);
  71. mutex_unlock(&inode->i_mutex);
  72. return 0;
  73. }
  74. EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
  75. /* vm_ops->page_mkwrite handler */
  76. static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
  77. struct vm_fault *vmf)
  78. {
  79. struct page *page = vmf->page;
  80. struct fb_info *info = vma->vm_private_data;
  81. struct fb_deferred_io *fbdefio = info->fbdefio;
  82. struct page *cur;
  83. /* this is a callback we get when userspace first tries to
  84. write to the page. we schedule a workqueue. that workqueue
  85. will eventually mkclean the touched pages and execute the
  86. deferred framebuffer IO. then if userspace touches a page
  87. again, we repeat the same scheme */
  88. file_update_time(vma->vm_file);
  89. /* protect against the workqueue changing the page list */
  90. mutex_lock(&fbdefio->lock);
  91. /* first write in this cycle, notify the driver */
  92. if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
  93. fbdefio->first_io(info);
  94. /*
  95. * We want the page to remain locked from ->page_mkwrite until
  96. * the PTE is marked dirty to avoid page_mkclean() being called
  97. * before the PTE is updated, which would leave the page ignored
  98. * by defio.
  99. * Do this by locking the page here and informing the caller
  100. * about it with VM_FAULT_LOCKED.
  101. */
  102. lock_page(page);
  103. /* we loop through the pagelist before adding in order
  104. to keep the pagelist sorted */
  105. list_for_each_entry(cur, &fbdefio->pagelist, lru) {
  106. /* this check is to catch the case where a new
  107. process could start writing to the same page
  108. through a new pte. this new access can cause the
  109. mkwrite even when the original ps's pte is marked
  110. writable */
  111. if (unlikely(cur == page))
  112. goto page_already_added;
  113. else if (cur->index > page->index)
  114. break;
  115. }
  116. list_add_tail(&page->lru, &cur->lru);
  117. page_already_added:
  118. mutex_unlock(&fbdefio->lock);
  119. /* come back after delay to process the deferred IO */
  120. schedule_delayed_work(&info->deferred_work, fbdefio->delay);
  121. return VM_FAULT_LOCKED;
  122. }
  123. static const struct vm_operations_struct fb_deferred_io_vm_ops = {
  124. .fault = fb_deferred_io_fault,
  125. .page_mkwrite = fb_deferred_io_mkwrite,
  126. };
  127. static int fb_deferred_io_set_page_dirty(struct page *page)
  128. {
  129. if (!PageDirty(page))
  130. SetPageDirty(page);
  131. return 0;
  132. }
  133. static const struct address_space_operations fb_deferred_io_aops = {
  134. .set_page_dirty = fb_deferred_io_set_page_dirty,
  135. };
  136. static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
  137. {
  138. vma->vm_ops = &fb_deferred_io_vm_ops;
  139. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  140. if (!(info->flags & FBINFO_VIRTFB))
  141. vma->vm_flags |= VM_IO;
  142. vma->vm_private_data = info;
  143. return 0;
  144. }
  145. /* workqueue callback */
  146. static void fb_deferred_io_work(struct work_struct *work)
  147. {
  148. struct fb_info *info = container_of(work, struct fb_info,
  149. deferred_work.work);
  150. struct list_head *node, *next;
  151. struct page *cur;
  152. struct fb_deferred_io *fbdefio = info->fbdefio;
  153. /* here we mkclean the pages, then do all deferred IO */
  154. mutex_lock(&fbdefio->lock);
  155. list_for_each_entry(cur, &fbdefio->pagelist, lru) {
  156. lock_page(cur);
  157. page_mkclean(cur);
  158. unlock_page(cur);
  159. }
  160. /* driver's callback with pagelist */
  161. fbdefio->deferred_io(info, &fbdefio->pagelist);
  162. /* clear the list */
  163. list_for_each_safe(node, next, &fbdefio->pagelist) {
  164. list_del(node);
  165. }
  166. mutex_unlock(&fbdefio->lock);
  167. }
  168. void fb_deferred_io_init(struct fb_info *info)
  169. {
  170. struct fb_deferred_io *fbdefio = info->fbdefio;
  171. BUG_ON(!fbdefio);
  172. mutex_init(&fbdefio->lock);
  173. info->fbops->fb_mmap = fb_deferred_io_mmap;
  174. INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
  175. INIT_LIST_HEAD(&fbdefio->pagelist);
  176. if (fbdefio->delay == 0) /* set a default of 1 s */
  177. fbdefio->delay = HZ;
  178. }
  179. EXPORT_SYMBOL_GPL(fb_deferred_io_init);
  180. void fb_deferred_io_open(struct fb_info *info,
  181. struct inode *inode,
  182. struct file *file)
  183. {
  184. file->f_mapping->a_ops = &fb_deferred_io_aops;
  185. }
  186. EXPORT_SYMBOL_GPL(fb_deferred_io_open);
  187. void fb_deferred_io_cleanup(struct fb_info *info)
  188. {
  189. struct fb_deferred_io *fbdefio = info->fbdefio;
  190. struct page *page;
  191. int i;
  192. BUG_ON(!fbdefio);
  193. cancel_delayed_work_sync(&info->deferred_work);
  194. /* clear out the mapping that we setup */
  195. for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
  196. page = fb_deferred_io_page(info, i);
  197. page->mapping = NULL;
  198. }
  199. info->fbops->fb_mmap = NULL;
  200. mutex_destroy(&fbdefio->lock);
  201. }
  202. EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);