prio_tree.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /*
  2. * mm/prio_tree.c - priority search tree for mapping->i_mmap
  3. *
  4. * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
  5. *
  6. * This file is released under the GPL v2.
  7. *
  8. * Based on the radix priority search tree proposed by Edward M. McCreight
  9. * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985
  10. *
  11. * 02Feb2004 Initial version
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/prio_tree.h>
  15. #include <linux/prefetch.h>
  16. /*
  17. * See lib/prio_tree.c for details on the general radix priority search tree
  18. * code.
  19. */
  20. /*
  21. * The following #defines are mirrored from lib/prio_tree.c. They're only used
  22. * for debugging, and should be removed (along with the debugging code using
  23. * them) when switching also VMAs to the regular prio_tree code.
  24. */
  25. #define RADIX_INDEX(vma) ((vma)->vm_pgoff)
  26. #define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT)
  27. /* avoid overflow */
  28. #define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1))
  29. /*
  30. * Radix priority search tree for address_space->i_mmap
  31. *
  32. * For each vma that map a unique set of file pages i.e., unique [radix_index,
  33. * heap_index] value, we have a corresponding priority search tree node. If
  34. * multiple vmas have identical [radix_index, heap_index] value, then one of
  35. * them is used as a tree node and others are stored in a vm_set list. The tree
  36. * node points to the first vma (head) of the list using vm_set.head.
  37. *
  38. * prio_tree_root
  39. * |
  40. * A vm_set.head
  41. * / \ /
  42. * L R -> H-I-J-K-M-N-O-P-Q-S
  43. * ^ ^ <-- vm_set.list -->
  44. * tree nodes
  45. *
  46. * We need some way to identify whether a vma is a tree node, head of a vm_set
  47. * list, or just a member of a vm_set list. We cannot use vm_flags to store
  48. * such information. The reason is, in the above figure, it is possible that
  49. * vm_flags' of R and H are covered by the different mmap_sems. When R is
  50. * removed under R->mmap_sem, H replaces R as a tree node. Since we do not hold
  51. * H->mmap_sem, we cannot use H->vm_flags for marking that H is a tree node now.
  52. * That's why some trick involving shared.vm_set.parent is used for identifying
  53. * tree nodes and list head nodes.
  54. *
  55. * vma radix priority search tree node rules:
  56. *
  57. * vma->shared.vm_set.parent != NULL ==> a tree node
  58. * vma->shared.vm_set.head != NULL ==> list of others mapping same range
  59. * vma->shared.vm_set.head == NULL ==> no others map the same range
  60. *
  61. * vma->shared.vm_set.parent == NULL
  62. * vma->shared.vm_set.head != NULL ==> list head of vmas mapping same range
  63. * vma->shared.vm_set.head == NULL ==> a list node
  64. */
  65. /*
  66. * Add a new vma known to map the same set of pages as the old vma:
  67. * useful for fork's dup_mmap as well as vma_prio_tree_insert below.
  68. * Note that it just happens to work correctly on i_mmap_nonlinear too.
  69. */
  70. void vma_prio_tree_add(struct vm_area_struct *vma, struct vm_area_struct *old)
  71. {
  72. /* Leave these BUG_ONs till prio_tree patch stabilizes */
  73. BUG_ON(RADIX_INDEX(vma) != RADIX_INDEX(old));
  74. BUG_ON(HEAP_INDEX(vma) != HEAP_INDEX(old));
  75. vma->shared.vm_set.head = NULL;
  76. vma->shared.vm_set.parent = NULL;
  77. if (!old->shared.vm_set.parent)
  78. list_add(&vma->shared.vm_set.list,
  79. &old->shared.vm_set.list);
  80. else if (old->shared.vm_set.head)
  81. list_add_tail(&vma->shared.vm_set.list,
  82. &old->shared.vm_set.head->shared.vm_set.list);
  83. else {
  84. INIT_LIST_HEAD(&vma->shared.vm_set.list);
  85. vma->shared.vm_set.head = old;
  86. old->shared.vm_set.head = vma;
  87. }
  88. }
  89. void vma_prio_tree_insert(struct vm_area_struct *vma,
  90. struct prio_tree_root *root)
  91. {
  92. struct prio_tree_node *ptr;
  93. struct vm_area_struct *old;
  94. vma->shared.vm_set.head = NULL;
  95. ptr = raw_prio_tree_insert(root, &vma->shared.prio_tree_node);
  96. if (ptr != (struct prio_tree_node *) &vma->shared.prio_tree_node) {
  97. old = prio_tree_entry(ptr, struct vm_area_struct,
  98. shared.prio_tree_node);
  99. vma_prio_tree_add(vma, old);
  100. }
  101. }
  102. void vma_prio_tree_remove(struct vm_area_struct *vma,
  103. struct prio_tree_root *root)
  104. {
  105. struct vm_area_struct *node, *head, *new_head;
  106. if (!vma->shared.vm_set.head) {
  107. if (!vma->shared.vm_set.parent)
  108. list_del_init(&vma->shared.vm_set.list);
  109. else
  110. raw_prio_tree_remove(root, &vma->shared.prio_tree_node);
  111. } else {
  112. /* Leave this BUG_ON till prio_tree patch stabilizes */
  113. BUG_ON(vma->shared.vm_set.head->shared.vm_set.head != vma);
  114. if (vma->shared.vm_set.parent) {
  115. head = vma->shared.vm_set.head;
  116. if (!list_empty(&head->shared.vm_set.list)) {
  117. new_head = list_entry(
  118. head->shared.vm_set.list.next,
  119. struct vm_area_struct,
  120. shared.vm_set.list);
  121. list_del_init(&head->shared.vm_set.list);
  122. } else
  123. new_head = NULL;
  124. raw_prio_tree_replace(root, &vma->shared.prio_tree_node,
  125. &head->shared.prio_tree_node);
  126. head->shared.vm_set.head = new_head;
  127. if (new_head)
  128. new_head->shared.vm_set.head = head;
  129. } else {
  130. node = vma->shared.vm_set.head;
  131. if (!list_empty(&vma->shared.vm_set.list)) {
  132. new_head = list_entry(
  133. vma->shared.vm_set.list.next,
  134. struct vm_area_struct,
  135. shared.vm_set.list);
  136. list_del_init(&vma->shared.vm_set.list);
  137. node->shared.vm_set.head = new_head;
  138. new_head->shared.vm_set.head = node;
  139. } else
  140. node->shared.vm_set.head = NULL;
  141. }
  142. }
  143. }
  144. /*
  145. * Helper function to enumerate vmas that map a given file page or a set of
  146. * contiguous file pages. The function returns vmas that at least map a single
  147. * page in the given range of contiguous file pages.
  148. */
  149. struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
  150. struct prio_tree_iter *iter)
  151. {
  152. struct prio_tree_node *ptr;
  153. struct vm_area_struct *next;
  154. if (!vma) {
  155. /*
  156. * First call is with NULL vma
  157. */
  158. ptr = prio_tree_next(iter);
  159. if (ptr) {
  160. next = prio_tree_entry(ptr, struct vm_area_struct,
  161. shared.prio_tree_node);
  162. prefetch(next->shared.vm_set.head);
  163. return next;
  164. } else
  165. return NULL;
  166. }
  167. if (vma->shared.vm_set.parent) {
  168. if (vma->shared.vm_set.head) {
  169. next = vma->shared.vm_set.head;
  170. prefetch(next->shared.vm_set.list.next);
  171. return next;
  172. }
  173. } else {
  174. next = list_entry(vma->shared.vm_set.list.next,
  175. struct vm_area_struct, shared.vm_set.list);
  176. if (!next->shared.vm_set.head) {
  177. prefetch(next->shared.vm_set.list.next);
  178. return next;
  179. }
  180. }
  181. ptr = prio_tree_next(iter);
  182. if (ptr) {
  183. next = prio_tree_entry(ptr, struct vm_area_struct,
  184. shared.prio_tree_node);
  185. prefetch(next->shared.vm_set.head);
  186. return next;
  187. } else
  188. return NULL;
  189. }