malloc.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright © 2001-2007 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/init.h>
  15. #include <linux/jffs2.h>
  16. #include "nodelist.h"
  17. /* These are initialised to NULL in the kernel startup code.
  18. If you're porting to other operating systems, beware */
  19. static struct kmem_cache *full_dnode_slab;
  20. static struct kmem_cache *raw_dirent_slab;
  21. static struct kmem_cache *raw_inode_slab;
  22. static struct kmem_cache *tmp_dnode_info_slab;
  23. static struct kmem_cache *raw_node_ref_slab;
  24. static struct kmem_cache *node_frag_slab;
  25. static struct kmem_cache *inode_cache_slab;
  26. #ifdef CONFIG_JFFS2_FS_XATTR
  27. static struct kmem_cache *xattr_datum_cache;
  28. static struct kmem_cache *xattr_ref_cache;
  29. #endif
  30. int __init jffs2_create_slab_caches(void)
  31. {
  32. full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
  33. sizeof(struct jffs2_full_dnode),
  34. 0, 0, NULL);
  35. if (!full_dnode_slab)
  36. goto err;
  37. raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
  38. sizeof(struct jffs2_raw_dirent),
  39. 0, SLAB_HWCACHE_ALIGN, NULL);
  40. if (!raw_dirent_slab)
  41. goto err;
  42. raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
  43. sizeof(struct jffs2_raw_inode),
  44. 0, SLAB_HWCACHE_ALIGN, NULL);
  45. if (!raw_inode_slab)
  46. goto err;
  47. tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
  48. sizeof(struct jffs2_tmp_dnode_info),
  49. 0, 0, NULL);
  50. if (!tmp_dnode_info_slab)
  51. goto err;
  52. raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
  53. sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
  54. 0, 0, NULL);
  55. if (!raw_node_ref_slab)
  56. goto err;
  57. node_frag_slab = kmem_cache_create("jffs2_node_frag",
  58. sizeof(struct jffs2_node_frag),
  59. 0, 0, NULL);
  60. if (!node_frag_slab)
  61. goto err;
  62. inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
  63. sizeof(struct jffs2_inode_cache),
  64. 0, 0, NULL);
  65. if (!inode_cache_slab)
  66. goto err;
  67. #ifdef CONFIG_JFFS2_FS_XATTR
  68. xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
  69. sizeof(struct jffs2_xattr_datum),
  70. 0, 0, NULL);
  71. if (!xattr_datum_cache)
  72. goto err;
  73. xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
  74. sizeof(struct jffs2_xattr_ref),
  75. 0, 0, NULL);
  76. if (!xattr_ref_cache)
  77. goto err;
  78. #endif
  79. return 0;
  80. err:
  81. jffs2_destroy_slab_caches();
  82. return -ENOMEM;
  83. }
  84. void jffs2_destroy_slab_caches(void)
  85. {
  86. if(full_dnode_slab)
  87. kmem_cache_destroy(full_dnode_slab);
  88. if(raw_dirent_slab)
  89. kmem_cache_destroy(raw_dirent_slab);
  90. if(raw_inode_slab)
  91. kmem_cache_destroy(raw_inode_slab);
  92. if(tmp_dnode_info_slab)
  93. kmem_cache_destroy(tmp_dnode_info_slab);
  94. if(raw_node_ref_slab)
  95. kmem_cache_destroy(raw_node_ref_slab);
  96. if(node_frag_slab)
  97. kmem_cache_destroy(node_frag_slab);
  98. if(inode_cache_slab)
  99. kmem_cache_destroy(inode_cache_slab);
  100. #ifdef CONFIG_JFFS2_FS_XATTR
  101. if (xattr_datum_cache)
  102. kmem_cache_destroy(xattr_datum_cache);
  103. if (xattr_ref_cache)
  104. kmem_cache_destroy(xattr_ref_cache);
  105. #endif
  106. }
  107. struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
  108. {
  109. struct jffs2_full_dirent *ret;
  110. ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
  111. dbg_memalloc("%p\n", ret);
  112. return ret;
  113. }
  114. void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
  115. {
  116. dbg_memalloc("%p\n", x);
  117. kfree(x);
  118. }
  119. struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
  120. {
  121. struct jffs2_full_dnode *ret;
  122. ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
  123. dbg_memalloc("%p\n", ret);
  124. return ret;
  125. }
  126. void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
  127. {
  128. dbg_memalloc("%p\n", x);
  129. kmem_cache_free(full_dnode_slab, x);
  130. }
  131. struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
  132. {
  133. struct jffs2_raw_dirent *ret;
  134. ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
  135. dbg_memalloc("%p\n", ret);
  136. return ret;
  137. }
  138. void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
  139. {
  140. dbg_memalloc("%p\n", x);
  141. kmem_cache_free(raw_dirent_slab, x);
  142. }
  143. struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
  144. {
  145. struct jffs2_raw_inode *ret;
  146. ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
  147. dbg_memalloc("%p\n", ret);
  148. return ret;
  149. }
  150. void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
  151. {
  152. dbg_memalloc("%p\n", x);
  153. kmem_cache_free(raw_inode_slab, x);
  154. }
  155. struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
  156. {
  157. struct jffs2_tmp_dnode_info *ret;
  158. ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
  159. dbg_memalloc("%p\n",
  160. ret);
  161. return ret;
  162. }
  163. void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
  164. {
  165. dbg_memalloc("%p\n", x);
  166. kmem_cache_free(tmp_dnode_info_slab, x);
  167. }
  168. static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
  169. {
  170. struct jffs2_raw_node_ref *ret;
  171. ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
  172. if (ret) {
  173. int i = 0;
  174. for (i=0; i < REFS_PER_BLOCK; i++) {
  175. ret[i].flash_offset = REF_EMPTY_NODE;
  176. ret[i].next_in_ino = NULL;
  177. }
  178. ret[i].flash_offset = REF_LINK_NODE;
  179. ret[i].next_in_ino = NULL;
  180. }
  181. return ret;
  182. }
  183. int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
  184. struct jffs2_eraseblock *jeb, int nr)
  185. {
  186. struct jffs2_raw_node_ref **p, *ref;
  187. int i = nr;
  188. dbg_memalloc("%d\n", nr);
  189. p = &jeb->last_node;
  190. ref = *p;
  191. dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
  192. /* If jeb->last_node is really a valid node then skip over it */
  193. if (ref && ref->flash_offset != REF_EMPTY_NODE)
  194. ref++;
  195. while (i) {
  196. if (!ref) {
  197. dbg_memalloc("Allocating new refblock linked from %p\n", p);
  198. ref = *p = jffs2_alloc_refblock();
  199. if (!ref)
  200. return -ENOMEM;
  201. }
  202. if (ref->flash_offset == REF_LINK_NODE) {
  203. p = &ref->next_in_ino;
  204. ref = *p;
  205. continue;
  206. }
  207. i--;
  208. ref++;
  209. }
  210. jeb->allocated_refs = nr;
  211. dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
  212. nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
  213. jeb->last_node->next_in_ino);
  214. return 0;
  215. }
  216. void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
  217. {
  218. dbg_memalloc("%p\n", x);
  219. kmem_cache_free(raw_node_ref_slab, x);
  220. }
  221. struct jffs2_node_frag *jffs2_alloc_node_frag(void)
  222. {
  223. struct jffs2_node_frag *ret;
  224. ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
  225. dbg_memalloc("%p\n", ret);
  226. return ret;
  227. }
  228. void jffs2_free_node_frag(struct jffs2_node_frag *x)
  229. {
  230. dbg_memalloc("%p\n", x);
  231. kmem_cache_free(node_frag_slab, x);
  232. }
  233. struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
  234. {
  235. struct jffs2_inode_cache *ret;
  236. ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
  237. dbg_memalloc("%p\n", ret);
  238. return ret;
  239. }
  240. void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
  241. {
  242. dbg_memalloc("%p\n", x);
  243. kmem_cache_free(inode_cache_slab, x);
  244. }
  245. #ifdef CONFIG_JFFS2_FS_XATTR
  246. struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
  247. {
  248. struct jffs2_xattr_datum *xd;
  249. xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
  250. dbg_memalloc("%p\n", xd);
  251. if (!xd)
  252. return NULL;
  253. xd->class = RAWNODE_CLASS_XATTR_DATUM;
  254. xd->node = (void *)xd;
  255. INIT_LIST_HEAD(&xd->xindex);
  256. return xd;
  257. }
  258. void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
  259. {
  260. dbg_memalloc("%p\n", xd);
  261. kmem_cache_free(xattr_datum_cache, xd);
  262. }
  263. struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
  264. {
  265. struct jffs2_xattr_ref *ref;
  266. ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
  267. dbg_memalloc("%p\n", ref);
  268. if (!ref)
  269. return NULL;
  270. ref->class = RAWNODE_CLASS_XATTR_REF;
  271. ref->node = (void *)ref;
  272. return ref;
  273. }
  274. void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
  275. {
  276. dbg_memalloc("%p\n", ref);
  277. kmem_cache_free(xattr_ref_cache, ref);
  278. }
  279. #endif