ump_kernel_memory_backend_os.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
  3. *
  4. * This program is free software and is provided to you under the terms of the GNU General Public License version 2
  5. * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
  6. *
  7. * A copy of the licence is included with the program, and can also be obtained from Free Software
  8. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  9. */
  10. /* needed to detect kernel version specific code */
  11. #include <linux/version.h>
  12. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
  13. #include <linux/semaphore.h>
  14. #else /* pre 2.6.26 the file was in the arch specific location */
  15. #include <asm/semaphore.h>
  16. #endif
  17. #include <linux/dma-mapping.h>
  18. #include <linux/mm.h>
  19. #include <linux/slab.h>
  20. #include <asm/atomic.h>
  21. #include <linux/vmalloc.h>
  22. #include <asm/cacheflush.h>
  23. #include "ump_kernel_common.h"
  24. #include "ump_kernel_memory_backend.h"
  25. typedef struct os_allocator
  26. {
  27. struct semaphore mutex;
  28. u32 num_pages_max; /**< Maximum number of pages to allocate from the OS */
  29. u32 num_pages_allocated; /**< Number of pages allocated from the OS */
  30. } os_allocator;
  31. static void os_free(void* ctx, ump_dd_mem * descriptor);
  32. static int os_allocate(void* ctx, ump_dd_mem * descriptor);
  33. static void os_memory_backend_destroy(ump_memory_backend * backend);
  34. static u32 os_stat(struct ump_memory_backend *backend);
  35. /*
  36. * Create OS memory backend
  37. */
  38. ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
  39. {
  40. ump_memory_backend * backend;
  41. os_allocator * info;
  42. info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
  43. if (NULL == info)
  44. {
  45. return NULL;
  46. }
  47. info->num_pages_max = max_allocation >> PAGE_SHIFT;
  48. info->num_pages_allocated = 0;
  49. sema_init(&info->mutex, 1);
  50. backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
  51. if (NULL == backend)
  52. {
  53. kfree(info);
  54. return NULL;
  55. }
  56. backend->ctx = info;
  57. backend->allocate = os_allocate;
  58. backend->release = os_free;
  59. backend->shutdown = os_memory_backend_destroy;
  60. backend->stat = os_stat;
  61. backend->pre_allocate_physical_check = NULL;
  62. backend->adjust_to_mali_phys = NULL;
  63. return backend;
  64. }
  65. /*
  66. * Destroy specified OS memory backend
  67. */
  68. static void os_memory_backend_destroy(ump_memory_backend * backend)
  69. {
  70. os_allocator * info = (os_allocator*)backend->ctx;
  71. DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
  72. kfree(info);
  73. kfree(backend);
  74. }
  75. /*
  76. * Allocate UMP memory
  77. */
  78. static int os_allocate(void* ctx, ump_dd_mem * descriptor)
  79. {
  80. u32 left;
  81. os_allocator * info;
  82. int pages_allocated = 0;
  83. int is_cached;
  84. BUG_ON(!descriptor);
  85. BUG_ON(!ctx);
  86. info = (os_allocator*)ctx;
  87. left = descriptor->size_bytes;
  88. is_cached = descriptor->is_cached;
  89. if (down_interruptible(&info->mutex))
  90. {
  91. DBG_MSG(1, ("Failed to get mutex in os_free\n"));
  92. return 0; /* failure */
  93. }
  94. descriptor->backend_info = NULL;
  95. descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
  96. DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
  97. descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
  98. if (NULL == descriptor->block_array)
  99. {
  100. up(&info->mutex);
  101. DBG_MSG(1, ("Block array could not be allocated\n"));
  102. return 0; /* failure */
  103. }
  104. while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
  105. {
  106. struct page * new_page;
  107. if (is_cached)
  108. {
  109. new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
  110. } else
  111. {
  112. new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
  113. }
  114. if (NULL == new_page)
  115. {
  116. break;
  117. }
  118. /* Ensure page caches are flushed. */
  119. if ( is_cached )
  120. {
  121. descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
  122. descriptor->block_array[pages_allocated].size = PAGE_SIZE;
  123. } else
  124. {
  125. descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
  126. descriptor->block_array[pages_allocated].size = PAGE_SIZE;
  127. }
  128. DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
  129. if (left < PAGE_SIZE)
  130. {
  131. left = 0;
  132. }
  133. else
  134. {
  135. left -= PAGE_SIZE;
  136. }
  137. pages_allocated++;
  138. }
  139. DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id, pages_allocated));
  140. if (left)
  141. {
  142. DBG_MSG(1, ("Failed to allocate needed pages\n"));
  143. while(pages_allocated)
  144. {
  145. pages_allocated--;
  146. if ( !is_cached )
  147. {
  148. dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
  149. }
  150. __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
  151. }
  152. up(&info->mutex);
  153. return 0; /* failure */
  154. }
  155. info->num_pages_allocated += pages_allocated;
  156. DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
  157. up(&info->mutex);
  158. return 1; /* success*/
  159. }
  160. /*
  161. * Free specified UMP memory
  162. */
  163. static void os_free(void* ctx, ump_dd_mem * descriptor)
  164. {
  165. os_allocator * info;
  166. int i;
  167. BUG_ON(!ctx);
  168. BUG_ON(!descriptor);
  169. info = (os_allocator*)ctx;
  170. BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
  171. if (down_interruptible(&info->mutex))
  172. {
  173. DBG_MSG(1, ("Failed to get mutex in os_free\n"));
  174. return;
  175. }
  176. DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
  177. info->num_pages_allocated -= descriptor->nr_blocks;
  178. up(&info->mutex);
  179. for ( i = 0; i < descriptor->nr_blocks; i++)
  180. {
  181. DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
  182. if ( ! descriptor->is_cached)
  183. {
  184. dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
  185. }
  186. __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
  187. }
  188. vfree(descriptor->block_array);
  189. }
  190. static u32 os_stat(struct ump_memory_backend *backend)
  191. {
  192. os_allocator *info;
  193. info = (os_allocator*)backend->ctx;
  194. return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
  195. }