ctvmem.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /**
  2. * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
  3. *
  4. * This source file is released under GPL v2 license (no other versions).
  5. * See the COPYING file included in the main directory of this source
  6. * distribution for the license terms and conditions.
  7. *
  8. * @File ctvmem.c
  9. *
  10. * @Brief
  11. * This file contains the implementation of virtual memory management object
  12. * for card device.
  13. *
  14. * @Author Liu Chun
  15. * @Date Apr 1 2008
  16. */
  17. #include "ctvmem.h"
  18. #include "ctatc.h"
  19. #include <linux/slab.h>
  20. #include <linux/mm.h>
  21. #include <linux/io.h>
  22. #include <sound/pcm.h>
  23. #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
  24. #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
  25. /* *
  26. * Find or create vm block based on requested @size.
  27. * @size must be page aligned.
  28. * */
  29. static struct ct_vm_block *
  30. get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
  31. {
  32. struct ct_vm_block *block = NULL, *entry;
  33. struct list_head *pos;
  34. size = CT_PAGE_ALIGN(size);
  35. if (size > vm->size) {
  36. dev_err(atc->card->dev,
  37. "Fail! No sufficient device virtual memory space available!\n");
  38. return NULL;
  39. }
  40. mutex_lock(&vm->lock);
  41. list_for_each(pos, &vm->unused) {
  42. entry = list_entry(pos, struct ct_vm_block, list);
  43. if (entry->size >= size)
  44. break; /* found a block that is big enough */
  45. }
  46. if (pos == &vm->unused)
  47. goto out;
  48. if (entry->size == size) {
  49. /* Move the vm node from unused list to used list directly */
  50. list_move(&entry->list, &vm->used);
  51. vm->size -= size;
  52. block = entry;
  53. goto out;
  54. }
  55. block = kzalloc(sizeof(*block), GFP_KERNEL);
  56. if (!block)
  57. goto out;
  58. block->addr = entry->addr;
  59. block->size = size;
  60. list_add(&block->list, &vm->used);
  61. entry->addr += size;
  62. entry->size -= size;
  63. vm->size -= size;
  64. out:
  65. mutex_unlock(&vm->lock);
  66. return block;
  67. }
  68. static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
  69. {
  70. struct ct_vm_block *entry, *pre_ent;
  71. struct list_head *pos, *pre;
  72. block->size = CT_PAGE_ALIGN(block->size);
  73. mutex_lock(&vm->lock);
  74. list_del(&block->list);
  75. vm->size += block->size;
  76. list_for_each(pos, &vm->unused) {
  77. entry = list_entry(pos, struct ct_vm_block, list);
  78. if (entry->addr >= (block->addr + block->size))
  79. break; /* found a position */
  80. }
  81. if (pos == &vm->unused) {
  82. list_add_tail(&block->list, &vm->unused);
  83. entry = block;
  84. } else {
  85. if ((block->addr + block->size) == entry->addr) {
  86. entry->addr = block->addr;
  87. entry->size += block->size;
  88. kfree(block);
  89. } else {
  90. __list_add(&block->list, pos->prev, pos);
  91. entry = block;
  92. }
  93. }
  94. pos = &entry->list;
  95. pre = pos->prev;
  96. while (pre != &vm->unused) {
  97. entry = list_entry(pos, struct ct_vm_block, list);
  98. pre_ent = list_entry(pre, struct ct_vm_block, list);
  99. if ((pre_ent->addr + pre_ent->size) > entry->addr)
  100. break;
  101. pre_ent->size += entry->size;
  102. list_del(pos);
  103. kfree(entry);
  104. pos = pre;
  105. pre = pos->prev;
  106. }
  107. mutex_unlock(&vm->lock);
  108. }
  109. /* Map host addr (kmalloced/vmalloced) to device logical addr. */
  110. static struct ct_vm_block *
  111. ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
  112. {
  113. struct ct_vm_block *block;
  114. unsigned int pte_start;
  115. unsigned i, pages;
  116. unsigned long *ptp;
  117. struct ct_atc *atc = snd_pcm_substream_chip(substream);
  118. block = get_vm_block(vm, size, atc);
  119. if (block == NULL) {
  120. dev_err(atc->card->dev,
  121. "No virtual memory block that is big enough to allocate!\n");
  122. return NULL;
  123. }
  124. ptp = (unsigned long *)vm->ptp[0].area;
  125. pte_start = (block->addr >> CT_PAGE_SHIFT);
  126. pages = block->size >> CT_PAGE_SHIFT;
  127. for (i = 0; i < pages; i++) {
  128. unsigned long addr;
  129. addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
  130. ptp[pte_start + i] = addr;
  131. }
  132. block->size = size;
  133. return block;
  134. }
  135. static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
  136. {
  137. /* do unmapping */
  138. put_vm_block(vm, block);
  139. }
  140. /* *
  141. * return the host physical addr of the @index-th device
  142. * page table page on success, or ~0UL on failure.
  143. * The first returned ~0UL indicates the termination.
  144. * */
  145. static dma_addr_t
  146. ct_get_ptp_phys(struct ct_vm *vm, int index)
  147. {
  148. dma_addr_t addr;
  149. addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
  150. return addr;
  151. }
  152. int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
  153. {
  154. struct ct_vm *vm;
  155. struct ct_vm_block *block;
  156. int i, err = 0;
  157. *rvm = NULL;
  158. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  159. if (!vm)
  160. return -ENOMEM;
  161. mutex_init(&vm->lock);
  162. /* Allocate page table pages */
  163. for (i = 0; i < CT_PTP_NUM; i++) {
  164. err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
  165. snd_dma_pci_data(pci),
  166. PAGE_SIZE, &vm->ptp[i]);
  167. if (err < 0)
  168. break;
  169. }
  170. if (err < 0) {
  171. /* no page table pages are allocated */
  172. ct_vm_destroy(vm);
  173. return -ENOMEM;
  174. }
  175. vm->size = CT_ADDRS_PER_PAGE * i;
  176. vm->map = ct_vm_map;
  177. vm->unmap = ct_vm_unmap;
  178. vm->get_ptp_phys = ct_get_ptp_phys;
  179. INIT_LIST_HEAD(&vm->unused);
  180. INIT_LIST_HEAD(&vm->used);
  181. block = kzalloc(sizeof(*block), GFP_KERNEL);
  182. if (NULL != block) {
  183. block->addr = 0;
  184. block->size = vm->size;
  185. list_add(&block->list, &vm->unused);
  186. }
  187. *rvm = vm;
  188. return 0;
  189. }
  190. /* The caller must ensure no mapping pages are being used
  191. * by hardware before calling this function */
  192. void ct_vm_destroy(struct ct_vm *vm)
  193. {
  194. int i;
  195. struct list_head *pos;
  196. struct ct_vm_block *entry;
  197. /* free used and unused list nodes */
  198. while (!list_empty(&vm->used)) {
  199. pos = vm->used.next;
  200. list_del(pos);
  201. entry = list_entry(pos, struct ct_vm_block, list);
  202. kfree(entry);
  203. }
  204. while (!list_empty(&vm->unused)) {
  205. pos = vm->unused.next;
  206. list_del(pos);
  207. entry = list_entry(pos, struct ct_vm_block, list);
  208. kfree(entry);
  209. }
  210. /* free allocated page table pages */
  211. for (i = 0; i < CT_PTP_NUM; i++)
  212. snd_dma_free_pages(&vm->ptp[i]);
  213. vm->size = 0;
  214. kfree(vm);
  215. }