123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568 |
- /*
- * This contains encryption functions for per-file encryption.
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility
- *
- * Written by Michael Halcrow, 2014.
- *
- * Filename encryption additions
- * Uday Savagaonkar, 2014
- * Encryption policy handling additions
- * Ildar Muslukhov, 2014
- * Add fscrypt_pullback_bio_page()
- * Jaegeuk Kim, 2015.
- *
- * This has not yet undergone a rigorous security audit.
- *
- * The usage of AES-XTS should conform to recommendations in NIST
- * Special Publication 800-38E and IEEE P1619/D16.
- */
- #include <linux/pagemap.h>
- #include <linux/mempool.h>
- #include <linux/module.h>
- #include <linux/scatterlist.h>
- #include <linux/ratelimit.h>
- #include <linux/bio.h>
- #include <linux/dcache.h>
- #include <linux/namei.h>
- #include <linux/fscrypto.h>
- static unsigned int num_prealloc_crypto_pages = 32;
- static unsigned int num_prealloc_crypto_ctxs = 128;
- module_param(num_prealloc_crypto_pages, uint, 0444);
- MODULE_PARM_DESC(num_prealloc_crypto_pages,
- "Number of crypto pages to preallocate");
- module_param(num_prealloc_crypto_ctxs, uint, 0444);
- MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
- static mempool_t *fscrypt_bounce_page_pool = NULL;
- static LIST_HEAD(fscrypt_free_ctxs);
- static DEFINE_SPINLOCK(fscrypt_ctx_lock);
- static struct workqueue_struct *fscrypt_read_workqueue;
- static DEFINE_MUTEX(fscrypt_init_mutex);
- static struct kmem_cache *fscrypt_ctx_cachep;
- struct kmem_cache *fscrypt_info_cachep;
- /**
- * fscrypt_release_ctx() - Releases an encryption context
- * @ctx: The encryption context to release.
- *
- * If the encryption context was allocated from the pre-allocated pool, returns
- * it to that pool. Else, frees it.
- *
- * If there's a bounce page in the context, this frees that.
- */
- void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
- {
- unsigned long flags;
- if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
- mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
- ctx->w.bounce_page = NULL;
- }
- ctx->w.control_page = NULL;
- if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(fscrypt_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- }
- }
- EXPORT_SYMBOL(fscrypt_release_ctx);
- /**
- * fscrypt_get_ctx() - Gets an encryption context
- * @inode: The inode for which we are doing the crypto
- * @gfp_flags: The gfp flag for memory allocation
- *
- * Allocates and initializes an encryption context.
- *
- * Return: An allocated and initialized encryption context on success; error
- * value or NULL otherwise.
- */
- struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
- {
- struct fscrypt_ctx *ctx = NULL;
- struct fscrypt_info *ci = inode->i_crypt_info;
- unsigned long flags;
- if (ci == NULL)
- return ERR_PTR(-ENOKEY);
- /*
- * We first try getting the ctx from a free list because in
- * the common case the ctx will have an allocated and
- * initialized crypto tfm, so it's probably a worthwhile
- * optimization. For the bounce page, we first try getting it
- * from the kernel allocator because that's just about as fast
- * as getting it from a list and because a cache of free pages
- * should generally be a "last resort" option for a filesystem
- * to be able to do its job.
- */
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
- struct fscrypt_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- ctx->flags &= ~FS_WRITE_PATH_FL;
- return ctx;
- }
- EXPORT_SYMBOL(fscrypt_get_ctx);
- /**
- * page_crypt_complete() - completion callback for page crypto
- * @req: The asynchronous cipher request context
- * @res: The result of the cipher operation
- */
- static void page_crypt_complete(struct crypto_async_request *req, int res)
- {
- struct fscrypt_completion_result *ecr = req->data;
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
- }
- typedef enum {
- FS_DECRYPT = 0,
- FS_ENCRYPT,
- } fscrypt_direction_t;
- static int do_page_crypto(struct inode *inode,
- fscrypt_direction_t rw, pgoff_t index,
- struct page *src_page, struct page *dest_page,
- gfp_t gfp_flags)
- {
- struct {
- __le64 index;
- u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
- } xts_tweak;
- struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
- struct scatterlist dst, src;
- struct fscrypt_info *ci = inode->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_ctfm;
- int res = 0;
- req = skcipher_request_alloc(tfm, gfp_flags);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n",
- __func__);
- return -ENOMEM;
- }
- skcipher_request_set_callback(
- req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- page_crypt_complete, &ecr);
- BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
- xts_tweak.index = cpu_to_le64(index);
- memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
- sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
- sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
- if (rw == FS_DECRYPT)
- res = crypto_skcipher_decrypt(req);
- else
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- skcipher_request_free(req);
- if (res) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_skcipher_encrypt() returned %d\n",
- __func__, res);
- return res;
- }
- return 0;
- }
- static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
- {
- ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
- if (ctx->w.bounce_page == NULL)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= FS_WRITE_PATH_FL;
- return ctx->w.bounce_page;
- }
- /**
- * fscypt_encrypt_page() - Encrypts a page
- * @inode: The inode for which the encryption should take place
- * @plaintext_page: The page to encrypt. Must be locked.
- * @gfp_flags: The gfp flag for memory allocation
- *
- * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
- * encryption context.
- *
- * Called on the page write path. The caller must call
- * fscrypt_restore_control_page() on the returned ciphertext page to
- * release the bounce buffer and the encryption context.
- *
- * Return: An allocated page with the encrypted content on success. Else, an
- * error value or NULL.
- */
- struct page *fscrypt_encrypt_page(struct inode *inode,
- struct page *plaintext_page, gfp_t gfp_flags)
- {
- struct fscrypt_ctx *ctx;
- struct page *ciphertext_page = NULL;
- int err;
- BUG_ON(!PageLocked(plaintext_page));
- ctx = fscrypt_get_ctx(inode, gfp_flags);
- if (IS_ERR(ctx))
- return (struct page *)ctx;
- /* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
- if (IS_ERR(ciphertext_page))
- goto errout;
- ctx->w.control_page = plaintext_page;
- err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page,
- gfp_flags);
- if (err) {
- ciphertext_page = ERR_PTR(err);
- goto errout;
- }
- SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)ctx);
- lock_page(ciphertext_page);
- return ciphertext_page;
- errout:
- fscrypt_release_ctx(ctx);
- return ciphertext_page;
- }
- EXPORT_SYMBOL(fscrypt_encrypt_page);
- /**
- * f2crypt_decrypt_page() - Decrypts a page in-place
- * @page: The page to decrypt. Must be locked.
- *
- * Decrypts page in-place using the ctx encryption context.
- *
- * Called from the read completion callback.
- *
- * Return: Zero on success, non-zero otherwise.
- */
- int fscrypt_decrypt_page(struct page *page)
- {
- BUG_ON(!PageLocked(page));
- return do_page_crypto(page->mapping->host,
- FS_DECRYPT, page->index, page, page, GFP_NOFS);
- }
- EXPORT_SYMBOL(fscrypt_decrypt_page);
- int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
- sector_t pblk, unsigned int len)
- {
- struct fscrypt_ctx *ctx;
- struct page *ciphertext_page = NULL;
- struct bio *bio;
- int ret, err = 0;
- BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
- ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
- if (IS_ERR(ciphertext_page)) {
- err = PTR_ERR(ciphertext_page);
- goto errout;
- }
- while (len--) {
- err = do_page_crypto(inode, FS_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page,
- GFP_NOFS);
- if (err)
- goto errout;
- bio = bio_alloc(GFP_NOWAIT, 1);
- if (!bio) {
- err = -ENOMEM;
- goto errout;
- }
- bio->bi_bdev = inode->i_sb->s_bdev;
- bio->bi_iter.bi_sector =
- pblk << (inode->i_sb->s_blocksize_bits - 9);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- ret = bio_add_page(bio, ciphertext_page,
- inode->i_sb->s_blocksize, 0);
- if (ret != inode->i_sb->s_blocksize) {
- /* should never happen! */
- WARN_ON(1);
- bio_put(bio);
- err = -EIO;
- goto errout;
- }
- err = submit_bio_wait(bio);
- if ((err == 0) && bio->bi_error)
- err = -EIO;
- bio_put(bio);
- if (err)
- goto errout;
- lblk++;
- pblk++;
- }
- err = 0;
- errout:
- fscrypt_release_ctx(ctx);
- return err;
- }
- EXPORT_SYMBOL(fscrypt_zeroout_range);
- /*
- * Validate dentries for encrypted directories to make sure we aren't
- * potentially caching stale data after a key has been added or
- * removed.
- */
- static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
- {
- struct dentry *dir;
- int dir_has_key, cached_with_key;
- if (flags & LOOKUP_RCU)
- return -ECHILD;
- dir = dget_parent(dentry);
- if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
- dput(dir);
- return 0;
- }
- /* this should eventually be an flag in d_flags */
- spin_lock(&dentry->d_lock);
- cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
- spin_unlock(&dentry->d_lock);
- dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
- dput(dir);
- /*
- * If the dentry was cached without the key, and it is a
- * negative dentry, it might be a valid name. We can't check
- * if the key has since been made available due to locking
- * reasons, so we fail the validation so ext4_lookup() can do
- * this check.
- *
- * We also fail the validation if the dentry was created with
- * the key present, but we no longer have the key, or vice versa.
- */
- if ((!cached_with_key && d_is_negative(dentry)) ||
- (!cached_with_key && dir_has_key) ||
- (cached_with_key && !dir_has_key))
- return 0;
- return 1;
- }
- const struct dentry_operations fscrypt_d_ops = {
- .d_revalidate = fscrypt_d_revalidate,
- };
- EXPORT_SYMBOL(fscrypt_d_ops);
- /*
- * Call fscrypt_decrypt_page on every single page, reusing the encryption
- * context.
- */
- static void completion_pages(struct work_struct *work)
- {
- struct fscrypt_ctx *ctx =
- container_of(work, struct fscrypt_ctx, r.work);
- struct bio *bio = ctx->r.bio;
- struct bio_vec *bv;
- int i;
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
- int ret = fscrypt_decrypt_page(page);
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else {
- SetPageUptodate(page);
- }
- unlock_page(page);
- }
- fscrypt_release_ctx(ctx);
- bio_put(bio);
- }
- void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
- {
- INIT_WORK(&ctx->r.work, completion_pages);
- ctx->r.bio = bio;
- queue_work(fscrypt_read_workqueue, &ctx->r.work);
- }
- EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
- void fscrypt_pullback_bio_page(struct page **page, bool restore)
- {
- struct fscrypt_ctx *ctx;
- struct page *bounce_page;
- /* The bounce data pages are unmapped. */
- if ((*page)->mapping)
- return;
- /* The bounce data page is unmapped. */
- bounce_page = *page;
- ctx = (struct fscrypt_ctx *)page_private(bounce_page);
- /* restore control page */
- *page = ctx->w.control_page;
- if (restore)
- fscrypt_restore_control_page(bounce_page);
- }
- EXPORT_SYMBOL(fscrypt_pullback_bio_page);
- void fscrypt_restore_control_page(struct page *page)
- {
- struct fscrypt_ctx *ctx;
- ctx = (struct fscrypt_ctx *)page_private(page);
- set_page_private(page, (unsigned long)NULL);
- ClearPagePrivate(page);
- unlock_page(page);
- fscrypt_release_ctx(ctx);
- }
- EXPORT_SYMBOL(fscrypt_restore_control_page);
- static void fscrypt_destroy(void)
- {
- struct fscrypt_ctx *pos, *n;
- list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
- kmem_cache_free(fscrypt_ctx_cachep, pos);
- INIT_LIST_HEAD(&fscrypt_free_ctxs);
- mempool_destroy(fscrypt_bounce_page_pool);
- fscrypt_bounce_page_pool = NULL;
- }
- /**
- * fscrypt_initialize() - allocate major buffers for fs encryption.
- *
- * We only call this when we start accessing encrypted files, since it
- * results in memory getting allocated that wouldn't otherwise be used.
- *
- * Return: Zero on success, non-zero otherwise.
- */
- int fscrypt_initialize(void)
- {
- int i, res = -ENOMEM;
- mutex_lock(&fscrypt_init_mutex);
- if (fscrypt_bounce_page_pool)
- goto already_initialized;
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct fscrypt_ctx *ctx;
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- }
- fscrypt_bounce_page_pool =
- mempool_create_page_pool(num_prealloc_crypto_pages, 0);
- if (!fscrypt_bounce_page_pool)
- goto fail;
- already_initialized:
- mutex_unlock(&fscrypt_init_mutex);
- return 0;
- fail:
- fscrypt_destroy();
- mutex_unlock(&fscrypt_init_mutex);
- return res;
- }
- EXPORT_SYMBOL(fscrypt_initialize);
- /**
- * fscrypt_init() - Set up for fs encryption.
- */
- static int __init fscrypt_init(void)
- {
- /*
- * Use an unbound workqueue to allow bios to be decrypted in parallel
- * even when they happen to complete on the same CPU. This sacrifices
- * locality, but it's worthwhile since decryption is CPU-intensive.
- *
- * Also use a high-priority workqueue to prioritize decryption work,
- * which blocks reads from completing, over regular application tasks.
- */
- fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
- WQ_UNBOUND | WQ_HIGHPRI,
- num_online_cpus());
- if (!fscrypt_read_workqueue)
- goto fail;
- fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_ctx_cachep)
- goto fail_free_queue;
- fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_info_cachep)
- goto fail_free_ctx;
- return 0;
- fail_free_ctx:
- kmem_cache_destroy(fscrypt_ctx_cachep);
- fail_free_queue:
- destroy_workqueue(fscrypt_read_workqueue);
- fail:
- return -ENOMEM;
- }
- module_init(fscrypt_init)
- /**
- * fscrypt_exit() - Shutdown the fs encryption system
- */
- static void __exit fscrypt_exit(void)
- {
- fscrypt_destroy();
- if (fscrypt_read_workqueue)
- destroy_workqueue(fscrypt_read_workqueue);
- kmem_cache_destroy(fscrypt_ctx_cachep);
- kmem_cache_destroy(fscrypt_info_cachep);
- }
- module_exit(fscrypt_exit);
- MODULE_LICENSE("GPL");
|