ashmem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /* mm/ashmem.c
  2. **
  3. ** Anonymous Shared Memory Subsystem, ashmem
  4. **
  5. ** Copyright (C) 2008 Google, Inc.
  6. **
  7. ** Robert Love <rlove@google.com>
  8. **
  9. ** This software is licensed under the terms of the GNU General Public
  10. ** License version 2, as published by the Free Software Foundation, and
  11. ** may be copied, distributed, and modified under those terms.
  12. **
  13. ** This program is distributed in the hope that it will be useful,
  14. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. ** GNU General Public License for more details.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/file.h>
  20. #include <linux/fs.h>
  21. #include <linux/miscdevice.h>
  22. #include <linux/security.h>
  23. #include <linux/mm.h>
  24. #include <linux/mman.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/personality.h>
  27. #include <linux/bitops.h>
  28. #include <linux/mutex.h>
  29. #include <linux/shmem_fs.h>
  30. #include <linux/ashmem.h>
  31. #define ASHMEM_NAME_PREFIX "dev/ashmem/"
  32. #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
  33. #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
  34. /*
  35. * ashmem_area - anonymous shared memory area
  36. * Lifecycle: From our parent file's open() until its release()
  37. * Locking: Protected by `ashmem_mutex'
  38. * Big Note: Mappings do NOT pin this structure; it dies on close()
  39. */
  40. struct ashmem_area {
  41. char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */
  42. struct list_head unpinned_list; /* list of all ashmem areas */
  43. struct file *file; /* the shmem-based backing file */
  44. size_t size; /* size of the mapping, in bytes */
  45. unsigned long prot_mask; /* allowed prot bits, as vm_flags */
  46. };
  47. /*
  48. * ashmem_range - represents an interval of unpinned (evictable) pages
  49. * Lifecycle: From unpin to pin
  50. * Locking: Protected by `ashmem_mutex'
  51. */
  52. struct ashmem_range {
  53. struct list_head lru; /* entry in LRU list */
  54. struct list_head unpinned; /* entry in its area's unpinned list */
  55. struct ashmem_area *asma; /* associated area */
  56. size_t pgstart; /* starting page, inclusive */
  57. size_t pgend; /* ending page, inclusive */
  58. unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
  59. };
  60. /* LRU list of unpinned pages, protected by ashmem_mutex */
  61. static LIST_HEAD(ashmem_lru_list);
  62. /* Count of pages on our LRU list, protected by ashmem_mutex */
  63. static unsigned long lru_count;
  64. /*
  65. * ashmem_mutex - protects the list of and each individual ashmem_area
  66. *
  67. * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
  68. */
  69. static DEFINE_MUTEX(ashmem_mutex);
  70. static struct kmem_cache *ashmem_area_cachep __read_mostly;
  71. static struct kmem_cache *ashmem_range_cachep __read_mostly;
  72. #define range_size(range) \
  73. ((range)->pgend - (range)->pgstart + 1)
  74. #define range_on_lru(range) \
  75. ((range)->purged == ASHMEM_NOT_PURGED)
  76. #define page_range_subsumes_range(range, start, end) \
  77. (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
  78. #define page_range_subsumed_by_range(range, start, end) \
  79. (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
  80. #define page_in_range(range, page) \
  81. (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
  82. #define page_range_in_range(range, start, end) \
  83. (page_in_range(range, start) || page_in_range(range, end) || \
  84. page_range_subsumes_range(range, start, end))
  85. #define range_before_page(range, page) \
  86. ((range)->pgend < (page))
  87. #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
  88. static inline void lru_add(struct ashmem_range *range)
  89. {
  90. list_add_tail(&range->lru, &ashmem_lru_list);
  91. lru_count += range_size(range);
  92. }
  93. static inline void lru_del(struct ashmem_range *range)
  94. {
  95. list_del(&range->lru);
  96. lru_count -= range_size(range);
  97. }
  98. /*
  99. * range_alloc - allocate and initialize a new ashmem_range structure
  100. *
  101. * 'asma' - associated ashmem_area
  102. * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
  103. * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
  104. * 'start' - starting page, inclusive
  105. * 'end' - ending page, inclusive
  106. *
  107. * Caller must hold ashmem_mutex.
  108. */
  109. static int range_alloc(struct ashmem_area *asma,
  110. struct ashmem_range *prev_range, unsigned int purged,
  111. size_t start, size_t end)
  112. {
  113. struct ashmem_range *range;
  114. range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
  115. if (unlikely(!range))
  116. return -ENOMEM;
  117. range->asma = asma;
  118. range->pgstart = start;
  119. range->pgend = end;
  120. range->purged = purged;
  121. list_add_tail(&range->unpinned, &prev_range->unpinned);
  122. if (range_on_lru(range))
  123. lru_add(range);
  124. return 0;
  125. }
  126. static void range_del(struct ashmem_range *range)
  127. {
  128. list_del(&range->unpinned);
  129. if (range_on_lru(range))
  130. lru_del(range);
  131. kmem_cache_free(ashmem_range_cachep, range);
  132. }
  133. /*
  134. * range_shrink - shrinks a range
  135. *
  136. * Caller must hold ashmem_mutex.
  137. */
  138. static inline void range_shrink(struct ashmem_range *range,
  139. size_t start, size_t end)
  140. {
  141. size_t pre = range_size(range);
  142. range->pgstart = start;
  143. range->pgend = end;
  144. if (range_on_lru(range))
  145. lru_count -= pre - range_size(range);
  146. }
  147. static int ashmem_open(struct inode *inode, struct file *file)
  148. {
  149. struct ashmem_area *asma;
  150. int ret;
  151. ret = generic_file_open(inode, file);
  152. if (unlikely(ret))
  153. return ret;
  154. asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
  155. if (unlikely(!asma))
  156. return -ENOMEM;
  157. INIT_LIST_HEAD(&asma->unpinned_list);
  158. memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
  159. asma->prot_mask = PROT_MASK;
  160. file->private_data = asma;
  161. return 0;
  162. }
  163. static int ashmem_release(struct inode *ignored, struct file *file)
  164. {
  165. struct ashmem_area *asma = file->private_data;
  166. struct ashmem_range *range, *next;
  167. mutex_lock(&ashmem_mutex);
  168. list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
  169. range_del(range);
  170. mutex_unlock(&ashmem_mutex);
  171. if (asma->file)
  172. fput(asma->file);
  173. kmem_cache_free(ashmem_area_cachep, asma);
  174. return 0;
  175. }
  176. static ssize_t ashmem_read(struct file *file, char __user *buf,
  177. size_t len, loff_t *pos)
  178. {
  179. struct ashmem_area *asma = file->private_data;
  180. int ret = 0;
  181. mutex_lock(&ashmem_mutex);
  182. /* If size is not set, or set to 0, always return EOF. */
  183. if (asma->size == 0) {
  184. goto out;
  185. }
  186. if (!asma->file) {
  187. ret = -EBADF;
  188. goto out;
  189. }
  190. ret = asma->file->f_op->read(asma->file, buf, len, pos);
  191. if (ret < 0) {
  192. goto out;
  193. }
  194. /** Update backing file pos, since f_ops->read() doesn't */
  195. asma->file->f_pos = *pos;
  196. out:
  197. mutex_unlock(&ashmem_mutex);
  198. return ret;
  199. }
  200. static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
  201. {
  202. struct ashmem_area *asma = file->private_data;
  203. int ret;
  204. mutex_lock(&ashmem_mutex);
  205. if (asma->size == 0) {
  206. ret = -EINVAL;
  207. goto out;
  208. }
  209. if (!asma->file) {
  210. ret = -EBADF;
  211. goto out;
  212. }
  213. ret = asma->file->f_op->llseek(asma->file, offset, origin);
  214. if (ret < 0) {
  215. goto out;
  216. }
  217. /** Copy f_pos from backing file, since f_ops->llseek() sets it */
  218. file->f_pos = asma->file->f_pos;
  219. out:
  220. mutex_unlock(&ashmem_mutex);
  221. return ret;
  222. }
  223. static inline unsigned long
  224. calc_vm_may_flags(unsigned long prot)
  225. {
  226. return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) |
  227. _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
  228. _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
  229. }
  230. static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
  231. {
  232. struct ashmem_area *asma = file->private_data;
  233. int ret = 0;
  234. mutex_lock(&ashmem_mutex);
  235. /* user needs to SET_SIZE before mapping */
  236. if (unlikely(!asma->size)) {
  237. ret = -EINVAL;
  238. goto out;
  239. }
  240. /* requested protection bits must match our allowed protection mask */
  241. if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
  242. calc_vm_prot_bits(PROT_MASK))) {
  243. ret = -EPERM;
  244. goto out;
  245. }
  246. vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
  247. if (!asma->file) {
  248. char *name = ASHMEM_NAME_DEF;
  249. struct file *vmfile;
  250. if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
  251. name = asma->name;
  252. /* ... and allocate the backing shmem file */
  253. vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
  254. if (unlikely(IS_ERR(vmfile))) {
  255. ret = PTR_ERR(vmfile);
  256. goto out;
  257. }
  258. asma->file = vmfile;
  259. }
  260. get_file(asma->file);
  261. if (vma->vm_flags & VM_SHARED)
  262. shmem_set_file(vma, asma->file);
  263. else {
  264. if (vma->vm_file)
  265. fput(vma->vm_file);
  266. vma->vm_file = asma->file;
  267. }
  268. vma->vm_flags |= VM_CAN_NONLINEAR;
  269. out:
  270. mutex_unlock(&ashmem_mutex);
  271. return ret;
  272. }
  273. /*
  274. * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
  275. *
  276. * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
  277. * many objects (pages) we have in total.
  278. *
  279. * 'gfp_mask' is the mask of the allocation that got us into this mess.
  280. *
  281. * Return value is the number of objects (pages) remaining, or -1 if we cannot
  282. * proceed without risk of deadlock (due to gfp_mask).
  283. *
  284. * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
  285. * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
  286. * pages freed.
  287. */
  288. static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
  289. {
  290. struct ashmem_range *range, *next;
  291. /* We might recurse into filesystem code, so bail out if necessary */
  292. if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
  293. return -1;
  294. if (!sc->nr_to_scan)
  295. return lru_count;
  296. mutex_lock(&ashmem_mutex);
  297. list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
  298. struct inode *inode = range->asma->file->f_dentry->d_inode;
  299. loff_t start = range->pgstart * PAGE_SIZE;
  300. loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
  301. vmtruncate_range(inode, start, end);
  302. range->purged = ASHMEM_WAS_PURGED;
  303. lru_del(range);
  304. sc->nr_to_scan -= range_size(range);
  305. if (sc->nr_to_scan <= 0)
  306. break;
  307. }
  308. mutex_unlock(&ashmem_mutex);
  309. return lru_count;
  310. }
  311. static struct shrinker ashmem_shrinker = {
  312. .shrink = ashmem_shrink,
  313. .seeks = DEFAULT_SEEKS * 4,
  314. };
  315. static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
  316. {
  317. int ret = 0;
  318. mutex_lock(&ashmem_mutex);
  319. /* the user can only remove, not add, protection bits */
  320. if (unlikely((asma->prot_mask & prot) != prot)) {
  321. ret = -EINVAL;
  322. goto out;
  323. }
  324. /* does the application expect PROT_READ to imply PROT_EXEC? */
  325. if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
  326. prot |= PROT_EXEC;
  327. asma->prot_mask = prot;
  328. out:
  329. mutex_unlock(&ashmem_mutex);
  330. return ret;
  331. }
  332. static int set_name(struct ashmem_area *asma, void __user *name)
  333. {
  334. int ret = 0;
  335. mutex_lock(&ashmem_mutex);
  336. /* cannot change an existing mapping's name */
  337. if (unlikely(asma->file)) {
  338. ret = -EINVAL;
  339. goto out;
  340. }
  341. if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
  342. name, ASHMEM_NAME_LEN)))
  343. ret = -EFAULT;
  344. asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
  345. out:
  346. mutex_unlock(&ashmem_mutex);
  347. return ret;
  348. }
  349. static int get_name(struct ashmem_area *asma, void __user *name)
  350. {
  351. int ret = 0;
  352. mutex_lock(&ashmem_mutex);
  353. if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
  354. size_t len;
  355. /*
  356. * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
  357. * prevents us from revealing one user's stack to another.
  358. */
  359. len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
  360. if (unlikely(copy_to_user(name,
  361. asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
  362. ret = -EFAULT;
  363. } else {
  364. if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
  365. sizeof(ASHMEM_NAME_DEF))))
  366. ret = -EFAULT;
  367. }
  368. mutex_unlock(&ashmem_mutex);
  369. return ret;
  370. }
  371. /*
  372. * ashmem_pin - pin the given ashmem region, returning whether it was
  373. * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
  374. *
  375. * Caller must hold ashmem_mutex.
  376. */
  377. static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
  378. {
  379. struct ashmem_range *range, *next;
  380. int ret = ASHMEM_NOT_PURGED;
  381. list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
  382. /* moved past last applicable page; we can short circuit */
  383. if (range_before_page(range, pgstart))
  384. break;
  385. /*
  386. * The user can ask us to pin pages that span multiple ranges,
  387. * or to pin pages that aren't even unpinned, so this is messy.
  388. *
  389. * Four cases:
  390. * 1. The requested range subsumes an existing range, so we
  391. * just remove the entire matching range.
  392. * 2. The requested range overlaps the start of an existing
  393. * range, so we just update that range.
  394. * 3. The requested range overlaps the end of an existing
  395. * range, so we just update that range.
  396. * 4. The requested range punches a hole in an existing range,
  397. * so we have to update one side of the range and then
  398. * create a new range for the other side.
  399. */
  400. if (page_range_in_range(range, pgstart, pgend)) {
  401. ret |= range->purged;
  402. /* Case #1: Easy. Just nuke the whole thing. */
  403. if (page_range_subsumes_range(range, pgstart, pgend)) {
  404. range_del(range);
  405. continue;
  406. }
  407. /* Case #2: We overlap from the start, so adjust it */
  408. if (range->pgstart >= pgstart) {
  409. range_shrink(range, pgend + 1, range->pgend);
  410. continue;
  411. }
  412. /* Case #3: We overlap from the rear, so adjust it */
  413. if (range->pgend <= pgend) {
  414. range_shrink(range, range->pgstart, pgstart-1);
  415. continue;
  416. }
  417. /*
  418. * Case #4: We eat a chunk out of the middle. A bit
  419. * more complicated, we allocate a new range for the
  420. * second half and adjust the first chunk's endpoint.
  421. */
  422. range_alloc(asma, range, range->purged,
  423. pgend + 1, range->pgend);
  424. range_shrink(range, range->pgstart, pgstart - 1);
  425. break;
  426. }
  427. }
  428. return ret;
  429. }
  430. /*
  431. * ashmem_unpin - unpin the given range of pages. Returns zero on success.
  432. *
  433. * Caller must hold ashmem_mutex.
  434. */
  435. static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
  436. {
  437. struct ashmem_range *range, *next;
  438. unsigned int purged = ASHMEM_NOT_PURGED;
  439. restart:
  440. list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
  441. /* short circuit: this is our insertion point */
  442. if (range_before_page(range, pgstart))
  443. break;
  444. /*
  445. * The user can ask us to unpin pages that are already entirely
  446. * or partially pinned. We handle those two cases here.
  447. */
  448. if (page_range_subsumed_by_range(range, pgstart, pgend))
  449. return 0;
  450. if (page_range_in_range(range, pgstart, pgend)) {
  451. pgstart = min_t(size_t, range->pgstart, pgstart),
  452. pgend = max_t(size_t, range->pgend, pgend);
  453. purged |= range->purged;
  454. range_del(range);
  455. goto restart;
  456. }
  457. }
  458. return range_alloc(asma, range, purged, pgstart, pgend);
  459. }
  460. /*
  461. * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
  462. * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
  463. *
  464. * Caller must hold ashmem_mutex.
  465. */
  466. static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
  467. size_t pgend)
  468. {
  469. struct ashmem_range *range;
  470. int ret = ASHMEM_IS_PINNED;
  471. list_for_each_entry(range, &asma->unpinned_list, unpinned) {
  472. if (range_before_page(range, pgstart))
  473. break;
  474. if (page_range_in_range(range, pgstart, pgend)) {
  475. ret = ASHMEM_IS_UNPINNED;
  476. break;
  477. }
  478. }
  479. return ret;
  480. }
  481. static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
  482. void __user *p)
  483. {
  484. struct ashmem_pin pin;
  485. size_t pgstart, pgend;
  486. int ret = -EINVAL;
  487. if (unlikely(!asma->file))
  488. return -EINVAL;
  489. if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
  490. return -EFAULT;
  491. /* per custom, you can pass zero for len to mean "everything onward" */
  492. if (!pin.len)
  493. pin.len = PAGE_ALIGN(asma->size) - pin.offset;
  494. if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
  495. return -EINVAL;
  496. if (unlikely(((__u32) -1) - pin.offset < pin.len))
  497. return -EINVAL;
  498. if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
  499. return -EINVAL;
  500. pgstart = pin.offset / PAGE_SIZE;
  501. pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
  502. mutex_lock(&ashmem_mutex);
  503. switch (cmd) {
  504. case ASHMEM_PIN:
  505. ret = ashmem_pin(asma, pgstart, pgend);
  506. break;
  507. case ASHMEM_UNPIN:
  508. ret = ashmem_unpin(asma, pgstart, pgend);
  509. break;
  510. case ASHMEM_GET_PIN_STATUS:
  511. ret = ashmem_get_pin_status(asma, pgstart, pgend);
  512. break;
  513. }
  514. mutex_unlock(&ashmem_mutex);
  515. return ret;
  516. }
  517. static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  518. {
  519. struct ashmem_area *asma = file->private_data;
  520. long ret = -ENOTTY;
  521. switch (cmd) {
  522. case ASHMEM_SET_NAME:
  523. ret = set_name(asma, (void __user *) arg);
  524. break;
  525. case ASHMEM_GET_NAME:
  526. ret = get_name(asma, (void __user *) arg);
  527. break;
  528. case ASHMEM_SET_SIZE:
  529. ret = -EINVAL;
  530. if (!asma->file) {
  531. ret = 0;
  532. asma->size = (size_t) arg;
  533. }
  534. break;
  535. case ASHMEM_GET_SIZE:
  536. ret = asma->size;
  537. break;
  538. case ASHMEM_SET_PROT_MASK:
  539. ret = set_prot_mask(asma, arg);
  540. break;
  541. case ASHMEM_GET_PROT_MASK:
  542. ret = asma->prot_mask;
  543. break;
  544. case ASHMEM_PIN:
  545. case ASHMEM_UNPIN:
  546. case ASHMEM_GET_PIN_STATUS:
  547. ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
  548. break;
  549. case ASHMEM_PURGE_ALL_CACHES:
  550. ret = -EPERM;
  551. if (capable(CAP_SYS_ADMIN)) {
  552. struct shrink_control sc = {
  553. .gfp_mask = GFP_KERNEL,
  554. .nr_to_scan = 0,
  555. };
  556. ret = ashmem_shrink(&ashmem_shrinker, &sc);
  557. sc.nr_to_scan = ret;
  558. ashmem_shrink(&ashmem_shrinker, &sc);
  559. }
  560. break;
  561. }
  562. return ret;
  563. }
  564. static struct file_operations ashmem_fops = {
  565. .owner = THIS_MODULE,
  566. .open = ashmem_open,
  567. .release = ashmem_release,
  568. .read = ashmem_read,
  569. .llseek = ashmem_llseek,
  570. .mmap = ashmem_mmap,
  571. .unlocked_ioctl = ashmem_ioctl,
  572. .compat_ioctl = ashmem_ioctl,
  573. };
  574. static struct miscdevice ashmem_misc = {
  575. .minor = MISC_DYNAMIC_MINOR,
  576. .name = "ashmem",
  577. .fops = &ashmem_fops,
  578. };
  579. static int __init ashmem_init(void)
  580. {
  581. int ret;
  582. ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
  583. sizeof(struct ashmem_area),
  584. 0, 0, NULL);
  585. if (unlikely(!ashmem_area_cachep)) {
  586. printk(KERN_ERR "ashmem: failed to create slab cache\n");
  587. return -ENOMEM;
  588. }
  589. ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
  590. sizeof(struct ashmem_range),
  591. 0, 0, NULL);
  592. if (unlikely(!ashmem_range_cachep)) {
  593. printk(KERN_ERR "ashmem: failed to create slab cache\n");
  594. return -ENOMEM;
  595. }
  596. ret = misc_register(&ashmem_misc);
  597. if (unlikely(ret)) {
  598. printk(KERN_ERR "ashmem: failed to register misc device!\n");
  599. return ret;
  600. }
  601. register_shrinker(&ashmem_shrinker);
  602. printk(KERN_INFO "ashmem: initialized\n");
  603. return 0;
  604. }
  605. static void __exit ashmem_exit(void)
  606. {
  607. int ret;
  608. unregister_shrinker(&ashmem_shrinker);
  609. ret = misc_deregister(&ashmem_misc);
  610. if (unlikely(ret))
  611. printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
  612. kmem_cache_destroy(ashmem_range_cachep);
  613. kmem_cache_destroy(ashmem_area_cachep);
  614. printk(KERN_INFO "ashmem: unloaded\n");
  615. }
  616. module_init(ashmem_init);
  617. module_exit(ashmem_exit);
  618. MODULE_LICENSE("GPL");