crypto.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * linux/fs/ext4/crypto.c
  3. *
  4. * Copyright (C) 2015, Google, Inc.
  5. *
  6. * This contains encryption functions for ext4
  7. *
  8. * Written by Michael Halcrow, 2014.
  9. *
  10. * Filename encryption additions
  11. * Uday Savagaonkar, 2014
  12. * Encryption policy handling additions
  13. * Ildar Muslukhov, 2014
  14. *
  15. * This has not yet undergone a rigorous security audit.
  16. *
  17. * The usage of AES-XTS should conform to recommendations in NIST
  18. * Special Publication 800-38E and IEEE P1619/D16.
  19. */
  20. #include <crypto/hash.h>
  21. #include <crypto/sha.h>
  22. #include <keys/user-type.h>
  23. #include <keys/encrypted-type.h>
  24. #include <linux/crypto.h>
  25. #include <linux/ecryptfs.h>
  26. #include <linux/gfp.h>
  27. #include <linux/kernel.h>
  28. #include <linux/key.h>
  29. #include <linux/list.h>
  30. #include <linux/mempool.h>
  31. #include <linux/module.h>
  32. #include <linux/mutex.h>
  33. #include <linux/random.h>
  34. #include <linux/scatterlist.h>
  35. #include <linux/spinlock_types.h>
  36. #include "ext4_extents.h"
  37. #include "xattr.h"
  38. /* Encryption added and removed here! (L: */
  39. static unsigned int num_prealloc_crypto_pages = 32;
  40. static unsigned int num_prealloc_crypto_ctxs = 128;
  41. module_param(num_prealloc_crypto_pages, uint, 0444);
  42. MODULE_PARM_DESC(num_prealloc_crypto_pages,
  43. "Number of crypto pages to preallocate");
  44. module_param(num_prealloc_crypto_ctxs, uint, 0444);
  45. MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
  46. "Number of crypto contexts to preallocate");
  47. static mempool_t *ext4_bounce_page_pool;
  48. static LIST_HEAD(ext4_free_crypto_ctxs);
  49. static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
  50. static struct kmem_cache *ext4_crypto_ctx_cachep;
  51. struct kmem_cache *ext4_crypt_info_cachep;
  52. /**
  53. * ext4_release_crypto_ctx() - Releases an encryption context
  54. * @ctx: The encryption context to release.
  55. *
  56. * If the encryption context was allocated from the pre-allocated pool, returns
  57. * it to that pool. Else, frees it.
  58. *
  59. * If there's a bounce page in the context, this frees that.
  60. */
  61. void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
  62. {
  63. unsigned long flags;
  64. if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
  65. mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
  66. ctx->w.bounce_page = NULL;
  67. ctx->w.control_page = NULL;
  68. if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
  69. kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
  70. } else {
  71. spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
  72. list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
  73. spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
  74. }
  75. }
  76. /**
  77. * ext4_get_crypto_ctx() - Gets an encryption context
  78. * @inode: The inode for which we are doing the crypto
  79. *
  80. * Allocates and initializes an encryption context.
  81. *
  82. * Return: An allocated and initialized encryption context on success; error
  83. * value or NULL otherwise.
  84. */
  85. struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
  86. {
  87. struct ext4_crypto_ctx *ctx = NULL;
  88. int res = 0;
  89. unsigned long flags;
  90. struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
  91. if (ci == NULL)
  92. return ERR_PTR(-ENOKEY);
  93. /*
  94. * We first try getting the ctx from a free list because in
  95. * the common case the ctx will have an allocated and
  96. * initialized crypto tfm, so it's probably a worthwhile
  97. * optimization. For the bounce page, we first try getting it
  98. * from the kernel allocator because that's just about as fast
  99. * as getting it from a list and because a cache of free pages
  100. * should generally be a "last resort" option for a filesystem
  101. * to be able to do its job.
  102. */
  103. spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
  104. ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
  105. struct ext4_crypto_ctx, free_list);
  106. if (ctx)
  107. list_del(&ctx->free_list);
  108. spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
  109. if (!ctx) {
  110. ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
  111. if (!ctx) {
  112. res = -ENOMEM;
  113. goto out;
  114. }
  115. ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
  116. } else {
  117. ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
  118. }
  119. ctx->flags &= ~EXT4_WRITE_PATH_FL;
  120. out:
  121. if (res) {
  122. if (!IS_ERR_OR_NULL(ctx))
  123. ext4_release_crypto_ctx(ctx);
  124. ctx = ERR_PTR(res);
  125. }
  126. return ctx;
  127. }
  128. struct workqueue_struct *ext4_read_workqueue;
  129. static DEFINE_MUTEX(crypto_init);
  130. /**
  131. * ext4_exit_crypto() - Shutdown the ext4 encryption system
  132. */
  133. void ext4_exit_crypto(void)
  134. {
  135. struct ext4_crypto_ctx *pos, *n;
  136. list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
  137. kmem_cache_free(ext4_crypto_ctx_cachep, pos);
  138. INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
  139. if (ext4_bounce_page_pool)
  140. mempool_destroy(ext4_bounce_page_pool);
  141. ext4_bounce_page_pool = NULL;
  142. if (ext4_read_workqueue)
  143. destroy_workqueue(ext4_read_workqueue);
  144. ext4_read_workqueue = NULL;
  145. if (ext4_crypto_ctx_cachep)
  146. kmem_cache_destroy(ext4_crypto_ctx_cachep);
  147. ext4_crypto_ctx_cachep = NULL;
  148. if (ext4_crypt_info_cachep)
  149. kmem_cache_destroy(ext4_crypt_info_cachep);
  150. ext4_crypt_info_cachep = NULL;
  151. }
  152. /**
  153. * ext4_init_crypto() - Set up for ext4 encryption.
  154. *
  155. * We only call this when we start accessing encrypted files, since it
  156. * results in memory getting allocated that wouldn't otherwise be used.
  157. *
  158. * Return: Zero on success, non-zero otherwise.
  159. */
  160. int ext4_init_crypto(void)
  161. {
  162. int i, res = -ENOMEM;
  163. mutex_lock(&crypto_init);
  164. if (ext4_read_workqueue)
  165. goto already_initialized;
  166. ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
  167. if (!ext4_read_workqueue)
  168. goto fail;
  169. ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
  170. SLAB_RECLAIM_ACCOUNT);
  171. if (!ext4_crypto_ctx_cachep)
  172. goto fail;
  173. ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
  174. SLAB_RECLAIM_ACCOUNT);
  175. if (!ext4_crypt_info_cachep)
  176. goto fail;
  177. for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
  178. struct ext4_crypto_ctx *ctx;
  179. ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
  180. if (!ctx) {
  181. res = -ENOMEM;
  182. goto fail;
  183. }
  184. list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
  185. }
  186. ext4_bounce_page_pool =
  187. mempool_create_page_pool(num_prealloc_crypto_pages, 0);
  188. if (!ext4_bounce_page_pool) {
  189. res = -ENOMEM;
  190. goto fail;
  191. }
  192. already_initialized:
  193. mutex_unlock(&crypto_init);
  194. return 0;
  195. fail:
  196. ext4_exit_crypto();
  197. mutex_unlock(&crypto_init);
  198. return res;
  199. }
  200. void ext4_restore_control_page(struct page *data_page)
  201. {
  202. struct ext4_crypto_ctx *ctx =
  203. (struct ext4_crypto_ctx *)page_private(data_page);
  204. set_page_private(data_page, (unsigned long)NULL);
  205. ClearPagePrivate(data_page);
  206. unlock_page(data_page);
  207. ext4_release_crypto_ctx(ctx);
  208. }
  209. /**
  210. * ext4_crypt_complete() - The completion callback for page encryption
  211. * @req: The asynchronous encryption request context
  212. * @res: The result of the encryption operation
  213. */
  214. static void ext4_crypt_complete(struct crypto_async_request *req, int res)
  215. {
  216. struct ext4_completion_result *ecr = req->data;
  217. if (res == -EINPROGRESS)
  218. return;
  219. ecr->res = res;
  220. complete(&ecr->completion);
  221. }
  222. typedef enum {
  223. EXT4_DECRYPT = 0,
  224. EXT4_ENCRYPT,
  225. } ext4_direction_t;
  226. static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
  227. struct inode *inode,
  228. ext4_direction_t rw,
  229. pgoff_t index,
  230. struct page *src_page,
  231. struct page *dest_page)
  232. {
  233. u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
  234. struct ablkcipher_request *req = NULL;
  235. DECLARE_EXT4_COMPLETION_RESULT(ecr);
  236. struct scatterlist dst, src;
  237. struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
  238. struct crypto_ablkcipher *tfm = ci->ci_ctfm;
  239. int res = 0;
  240. req = ablkcipher_request_alloc(tfm, GFP_NOFS);
  241. if (!req) {
  242. printk_ratelimited(KERN_ERR
  243. "%s: crypto_request_alloc() failed\n",
  244. __func__);
  245. return -ENOMEM;
  246. }
  247. ablkcipher_request_set_callback(
  248. req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
  249. ext4_crypt_complete, &ecr);
  250. BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
  251. memcpy(xts_tweak, &index, sizeof(index));
  252. memset(&xts_tweak[sizeof(index)], 0,
  253. EXT4_XTS_TWEAK_SIZE - sizeof(index));
  254. sg_init_table(&dst, 1);
  255. sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
  256. sg_init_table(&src, 1);
  257. sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
  258. ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
  259. xts_tweak);
  260. if (rw == EXT4_DECRYPT)
  261. res = crypto_ablkcipher_decrypt(req);
  262. else
  263. res = crypto_ablkcipher_encrypt(req);
  264. if (res == -EINPROGRESS || res == -EBUSY) {
  265. BUG_ON(req->base.data != &ecr);
  266. wait_for_completion(&ecr.completion);
  267. res = ecr.res;
  268. }
  269. ablkcipher_request_free(req);
  270. if (res) {
  271. printk_ratelimited(
  272. KERN_ERR
  273. "%s: crypto_ablkcipher_encrypt() returned %d\n",
  274. __func__, res);
  275. return res;
  276. }
  277. return 0;
  278. }
  279. static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
  280. {
  281. ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
  282. if (ctx->w.bounce_page == NULL)
  283. return ERR_PTR(-ENOMEM);
  284. ctx->flags |= EXT4_WRITE_PATH_FL;
  285. return ctx->w.bounce_page;
  286. }
  287. /**
  288. * ext4_encrypt() - Encrypts a page
  289. * @inode: The inode for which the encryption should take place
  290. * @plaintext_page: The page to encrypt. Must be locked.
  291. *
  292. * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
  293. * encryption context.
  294. *
  295. * Called on the page write path. The caller must call
  296. * ext4_restore_control_page() on the returned ciphertext page to
  297. * release the bounce buffer and the encryption context.
  298. *
  299. * Return: An allocated page with the encrypted content on success. Else, an
  300. * error value or NULL.
  301. */
  302. struct page *ext4_encrypt(struct inode *inode,
  303. struct page *plaintext_page)
  304. {
  305. struct ext4_crypto_ctx *ctx;
  306. struct page *ciphertext_page = NULL;
  307. int err;
  308. BUG_ON(!PageLocked(plaintext_page));
  309. ctx = ext4_get_crypto_ctx(inode);
  310. if (IS_ERR(ctx))
  311. return (struct page *) ctx;
  312. /* The encryption operation will require a bounce page. */
  313. ciphertext_page = alloc_bounce_page(ctx);
  314. if (IS_ERR(ciphertext_page))
  315. goto errout;
  316. ctx->w.control_page = plaintext_page;
  317. err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
  318. plaintext_page, ciphertext_page);
  319. if (err) {
  320. ciphertext_page = ERR_PTR(err);
  321. errout:
  322. ext4_release_crypto_ctx(ctx);
  323. return ciphertext_page;
  324. }
  325. SetPagePrivate(ciphertext_page);
  326. set_page_private(ciphertext_page, (unsigned long)ctx);
  327. lock_page(ciphertext_page);
  328. return ciphertext_page;
  329. }
  330. /**
  331. * ext4_decrypt() - Decrypts a page in-place
  332. * @ctx: The encryption context.
  333. * @page: The page to decrypt. Must be locked.
  334. *
  335. * Decrypts page in-place using the ctx encryption context.
  336. *
  337. * Called from the read completion callback.
  338. *
  339. * Return: Zero on success, non-zero otherwise.
  340. */
  341. int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
  342. {
  343. BUG_ON(!PageLocked(page));
  344. return ext4_page_crypto(ctx, page->mapping->host,
  345. EXT4_DECRYPT, page->index, page, page);
  346. }
  347. /*
  348. * Convenience function which takes care of allocating and
  349. * deallocating the encryption context
  350. */
  351. int ext4_decrypt_one(struct inode *inode, struct page *page)
  352. {
  353. int ret;
  354. struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
  355. if (IS_ERR(ctx))
  356. return PTR_ERR(ctx);
  357. ret = ext4_decrypt(ctx, page);
  358. ext4_release_crypto_ctx(ctx);
  359. return ret;
  360. }
  361. int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
  362. {
  363. struct ext4_crypto_ctx *ctx;
  364. struct page *ciphertext_page = NULL;
  365. struct bio *bio;
  366. ext4_lblk_t lblk = ex->ee_block;
  367. ext4_fsblk_t pblk = ext4_ext_pblock(ex);
  368. unsigned int len = ext4_ext_get_actual_len(ex);
  369. int err = 0;
  370. BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
  371. ctx = ext4_get_crypto_ctx(inode);
  372. if (IS_ERR(ctx))
  373. return PTR_ERR(ctx);
  374. ciphertext_page = alloc_bounce_page(ctx);
  375. if (IS_ERR(ciphertext_page)) {
  376. err = PTR_ERR(ciphertext_page);
  377. goto errout;
  378. }
  379. while (len--) {
  380. err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
  381. ZERO_PAGE(0), ciphertext_page);
  382. if (err)
  383. goto errout;
  384. bio = bio_alloc(GFP_KERNEL, 1);
  385. if (!bio) {
  386. err = -ENOMEM;
  387. goto errout;
  388. }
  389. bio->bi_bdev = inode->i_sb->s_bdev;
  390. bio->bi_iter.bi_sector = pblk;
  391. err = bio_add_page(bio, ciphertext_page,
  392. inode->i_sb->s_blocksize, 0);
  393. if (err) {
  394. bio_put(bio);
  395. goto errout;
  396. }
  397. err = submit_bio_wait(WRITE, bio);
  398. bio_put(bio);
  399. if (err)
  400. goto errout;
  401. }
  402. err = 0;
  403. errout:
  404. ext4_release_crypto_ctx(ctx);
  405. return err;
  406. }
  407. bool ext4_valid_contents_enc_mode(uint32_t mode)
  408. {
  409. return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
  410. }
  411. /**
  412. * ext4_validate_encryption_key_size() - Validate the encryption key size
  413. * @mode: The key mode.
  414. * @size: The key size to validate.
  415. *
  416. * Return: The validated key size for @mode. Zero if invalid.
  417. */
  418. uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
  419. {
  420. if (size == ext4_encryption_key_size(mode))
  421. return size;
  422. return 0;
  423. }