cma_debug.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * CMA DebugFS Interface
  3. *
  4. * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/cma.h>
  8. #include <linux/list.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/mm_types.h>
  12. #include "cma.h"
  13. struct cma_mem {
  14. struct hlist_node node;
  15. struct page *p;
  16. unsigned long n;
  17. };
  18. static struct dentry *cma_debugfs_root;
  19. static int cma_debugfs_get(void *data, u64 *val)
  20. {
  21. unsigned long *p = data;
  22. *val = *p;
  23. return 0;
  24. }
  25. DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
  26. static int cma_used_get(void *data, u64 *val)
  27. {
  28. struct cma *cma = data;
  29. unsigned long used;
  30. mutex_lock(&cma->lock);
  31. /* pages counter is smaller than sizeof(int) */
  32. used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
  33. mutex_unlock(&cma->lock);
  34. *val = (u64)used << cma->order_per_bit;
  35. return 0;
  36. }
  37. DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
  38. static int cma_maxchunk_get(void *data, u64 *val)
  39. {
  40. struct cma *cma = data;
  41. unsigned long maxchunk = 0;
  42. unsigned long start, end = 0;
  43. unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
  44. mutex_lock(&cma->lock);
  45. for (;;) {
  46. start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
  47. if (start >= cma->count)
  48. break;
  49. end = find_next_bit(cma->bitmap, bitmap_maxno, start);
  50. maxchunk = max(end - start, maxchunk);
  51. }
  52. mutex_unlock(&cma->lock);
  53. *val = (u64)maxchunk << cma->order_per_bit;
  54. return 0;
  55. }
  56. DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
  57. static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
  58. {
  59. spin_lock(&cma->mem_head_lock);
  60. hlist_add_head(&mem->node, &cma->mem_head);
  61. spin_unlock(&cma->mem_head_lock);
  62. }
  63. static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
  64. {
  65. struct cma_mem *mem = NULL;
  66. spin_lock(&cma->mem_head_lock);
  67. if (!hlist_empty(&cma->mem_head)) {
  68. mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
  69. hlist_del_init(&mem->node);
  70. }
  71. spin_unlock(&cma->mem_head_lock);
  72. return mem;
  73. }
  74. static int cma_free_mem(struct cma *cma, int count)
  75. {
  76. struct cma_mem *mem = NULL;
  77. while (count) {
  78. mem = cma_get_entry_from_list(cma);
  79. if (mem == NULL)
  80. return 0;
  81. if (mem->n <= count) {
  82. cma_release(cma, mem->p, mem->n);
  83. count -= mem->n;
  84. kfree(mem);
  85. } else if (cma->order_per_bit == 0) {
  86. cma_release(cma, mem->p, count);
  87. mem->p += count;
  88. mem->n -= count;
  89. count = 0;
  90. cma_add_to_cma_mem_list(cma, mem);
  91. } else {
  92. pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
  93. cma_add_to_cma_mem_list(cma, mem);
  94. break;
  95. }
  96. }
  97. return 0;
  98. }
  99. static int cma_free_write(void *data, u64 val)
  100. {
  101. int pages = val;
  102. struct cma *cma = data;
  103. return cma_free_mem(cma, pages);
  104. }
  105. DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
  106. static int cma_alloc_mem(struct cma *cma, int count)
  107. {
  108. struct cma_mem *mem;
  109. struct page *p;
  110. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  111. if (!mem)
  112. return -ENOMEM;
  113. p = cma_alloc(cma, count, 0);
  114. if (!p) {
  115. kfree(mem);
  116. return -ENOMEM;
  117. }
  118. mem->p = p;
  119. mem->n = count;
  120. cma_add_to_cma_mem_list(cma, mem);
  121. return 0;
  122. }
  123. static int cma_alloc_write(void *data, u64 val)
  124. {
  125. int pages = val;
  126. struct cma *cma = data;
  127. return cma_alloc_mem(cma, pages);
  128. }
  129. DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
  130. static void cma_debugfs_add_one(struct cma *cma, int idx)
  131. {
  132. struct dentry *tmp;
  133. char name[16];
  134. int u32s;
  135. sprintf(name, "cma-%d", idx);
  136. tmp = debugfs_create_dir(name, cma_debugfs_root);
  137. debugfs_create_file("alloc", S_IWUSR, tmp, cma,
  138. &cma_alloc_fops);
  139. debugfs_create_file("free", S_IWUSR, tmp, cma,
  140. &cma_free_fops);
  141. debugfs_create_file("base_pfn", S_IRUGO, tmp,
  142. &cma->base_pfn, &cma_debugfs_fops);
  143. debugfs_create_file("count", S_IRUGO, tmp,
  144. &cma->count, &cma_debugfs_fops);
  145. debugfs_create_file("order_per_bit", S_IRUGO, tmp,
  146. &cma->order_per_bit, &cma_debugfs_fops);
  147. debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
  148. debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
  149. u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
  150. debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
  151. }
  152. static int __init cma_debugfs_init(void)
  153. {
  154. int i;
  155. cma_debugfs_root = debugfs_create_dir("cma", NULL);
  156. if (!cma_debugfs_root)
  157. return -ENOMEM;
  158. for (i = 0; i < cma_area_count; i++)
  159. cma_debugfs_add_one(&cma_areas[i], i);
  160. return 0;
  161. }
  162. late_initcall(cma_debugfs_init);