123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605 |
- /**
- * AES CCM routines supporting the Power 7+ Nest Accelerators driver
- *
- * Copyright (C) 2012 International Business Machines Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 only.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Author: Kent Yoder <yoder1@us.ibm.com>
- */
- #include <crypto/internal/aead.h>
- #include <crypto/aes.h>
- #include <crypto/algapi.h>
- #include <crypto/scatterwalk.h>
- #include <linux/module.h>
- #include <linux/types.h>
- #include <linux/crypto.h>
- #include <asm/vio.h>
- #include "nx_csbcpb.h"
- #include "nx.h"
- static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
- const u8 *in_key,
- unsigned int key_len)
- {
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
- struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
- nx_ctx_init(nx_ctx, HCOP_FC_AES);
- switch (key_len) {
- case AES_KEYSIZE_128:
- NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
- NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
- nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
- break;
- default:
- return -EINVAL;
- }
- csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
- memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
- csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
- memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
- return 0;
- }
- static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
- const u8 *in_key,
- unsigned int key_len)
- {
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
- if (key_len < 3)
- return -EINVAL;
- key_len -= 3;
- memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
- return ccm_aes_nx_set_key(tfm, in_key, key_len);
- }
- static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
- {
- switch (authsize) {
- case 4:
- case 6:
- case 8:
- case 10:
- case 12:
- case 14:
- case 16:
- break;
- default:
- return -EINVAL;
- }
- crypto_aead_crt(tfm)->authsize = authsize;
- return 0;
- }
- static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
- {
- switch (authsize) {
- case 8:
- case 12:
- case 16:
- break;
- default:
- return -EINVAL;
- }
- crypto_aead_crt(tfm)->authsize = authsize;
- return 0;
- }
- /* taken from crypto/ccm.c */
- static int set_msg_len(u8 *block, unsigned int msglen, int csize)
- {
- __be32 data;
- memset(block, 0, csize);
- block += csize;
- if (csize >= 4)
- csize = 4;
- else if (msglen > (unsigned int)(1 << (8 * csize)))
- return -EOVERFLOW;
- data = cpu_to_be32(msglen);
- memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
- return 0;
- }
- /* taken from crypto/ccm.c */
- static inline int crypto_ccm_check_iv(const u8 *iv)
- {
- /* 2 <= L <= 8, so 1 <= L' <= 7. */
- if (1 > iv[0] || iv[0] > 7)
- return -EINVAL;
- return 0;
- }
- /* based on code from crypto/ccm.c */
- static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
- unsigned int cryptlen, u8 *b0)
- {
- unsigned int l, lp, m = authsize;
- int rc;
- memcpy(b0, iv, 16);
- lp = b0[0];
- l = lp + 1;
- /* set m, bits 3-5 */
- *b0 |= (8 * ((m - 2) / 2));
- /* set adata, bit 6, if associated data is used */
- if (assoclen)
- *b0 |= 64;
- rc = set_msg_len(b0 + 16 - l, cryptlen, l);
- return rc;
- }
- static int generate_pat(u8 *iv,
- struct aead_request *req,
- struct nx_crypto_ctx *nx_ctx,
- unsigned int authsize,
- unsigned int nbytes,
- u8 *out)
- {
- struct nx_sg *nx_insg = nx_ctx->in_sg;
- struct nx_sg *nx_outsg = nx_ctx->out_sg;
- unsigned int iauth_len = 0;
- u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
- int rc;
- unsigned int max_sg_len;
- /* zero the ctr value */
- memset(iv + 15 - iv[0], 0, iv[0] + 1);
- /* page 78 of nx_wb.pdf has,
- * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
- * in length. If a full message is used, the AES CCA implementation
- * restricts the maximum AAD length to 2^32 -1 bytes.
- * If partial messages are used, the implementation supports
- * 2^64 -1 bytes maximum AAD length.
- *
- * However, in the cryptoapi's aead_request structure,
- * assoclen is an unsigned int, thus it cannot hold a length
- * value greater than 2^32 - 1.
- * Thus the AAD is further constrained by this and is never
- * greater than 2^32.
- */
- if (!req->assoclen) {
- b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
- } else if (req->assoclen <= 14) {
- /* if associated data is 14 bytes or less, we do 1 GCM
- * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
- * which is fed in through the source buffers here */
- b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
- b1 = nx_ctx->priv.ccm.iauth_tag;
- iauth_len = req->assoclen;
- } else if (req->assoclen <= 65280) {
- /* if associated data is less than (2^16 - 2^8), we construct
- * B1 differently and feed in the associated data to a CCA
- * operation */
- b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
- b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
- iauth_len = 14;
- } else {
- b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
- b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
- iauth_len = 10;
- }
- /* generate B0 */
- rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
- if (rc)
- return rc;
- /* generate B1:
- * add control info for associated data
- * RFC 3610 and NIST Special Publication 800-38C
- */
- if (b1) {
- memset(b1, 0, 16);
- if (req->assoclen <= 65280) {
- *(u16 *)b1 = (u16)req->assoclen;
- scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
- iauth_len, SCATTERWALK_FROM_SG);
- } else {
- *(u16 *)b1 = (u16)(0xfffe);
- *(u32 *)&b1[2] = (u32)req->assoclen;
- scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
- iauth_len, SCATTERWALK_FROM_SG);
- }
- }
- /* now copy any remaining AAD to scatterlist and call nx... */
- if (!req->assoclen) {
- return rc;
- } else if (req->assoclen <= 14) {
- unsigned int len = 16;
- nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
- if (len != 16)
- return -EINVAL;
- nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
- nx_ctx->ap->sglen);
- if (len != 16)
- return -EINVAL;
- /* inlen should be negative, indicating to phyp that its a
- * pointer to an sg list */
- nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
- sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
- sizeof(struct nx_sg);
- NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
- result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- return rc;
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
- } else {
- unsigned int processed = 0, to_process;
- processed += iauth_len;
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
- do {
- to_process = min_t(u32, req->assoclen - processed,
- nx_ctx->ap->databytelen);
- nx_insg = nx_walk_and_build(nx_ctx->in_sg,
- nx_ctx->ap->sglen,
- req->assoc, processed,
- &to_process);
- if ((to_process + processed) < req->assoclen) {
- NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
- NX_FDM_INTERMEDIATE;
- } else {
- NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
- ~NX_FDM_INTERMEDIATE;
- }
- nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
- sizeof(struct nx_sg);
- result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- return rc;
- memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
- nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
- AES_BLOCK_SIZE);
- NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen,
- &(nx_ctx->stats->aes_bytes));
- processed += to_process;
- } while (processed < req->assoclen);
- result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
- }
- memcpy(out, result, AES_BLOCK_SIZE);
- return rc;
- }
- static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc)
- {
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- unsigned int nbytes = req->cryptlen;
- unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
- struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
- unsigned long irq_flags;
- unsigned int processed = 0, to_process;
- int rc = -1;
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- nbytes -= authsize;
- /* copy out the auth tag to compare with later */
- scatterwalk_map_and_copy(priv->oauth_tag,
- req->src, nbytes, authsize,
- SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
- csbcpb->cpb.aes_ccm.in_pat_or_b0);
- if (rc)
- goto out;
- do {
- /* to_process: the AES_BLOCK_SIZE data chunk to process in this
- * update. This value is bound by sg list limits.
- */
- to_process = nbytes - processed;
- if ((to_process + processed) < nbytes)
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- else
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- &to_process, processed,
- csbcpb->cpb.aes_ccm.iv_or_ctr);
- if (rc)
- goto out;
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
- /* for partial completion, copy following for next
- * entry into loop...
- */
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
- memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
- csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
- memcpy(csbcpb->cpb.aes_ccm.in_s0,
- csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- /* update stats */
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
- processed += to_process;
- } while (processed < nbytes);
- rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
- authsize) ? -EBADMSG : 0;
- out:
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
- return rc;
- }
- static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc)
- {
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- unsigned int nbytes = req->cryptlen;
- unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
- unsigned long irq_flags;
- unsigned int processed = 0, to_process;
- int rc = -1;
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
- csbcpb->cpb.aes_ccm.in_pat_or_b0);
- if (rc)
- goto out;
- do {
- /* to process: the AES_BLOCK_SIZE data chunk to process in this
- * update. This value is bound by sg list limits.
- */
- to_process = nbytes - processed;
- if ((to_process + processed) < nbytes)
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- else
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- &to_process, processed,
- csbcpb->cpb.aes_ccm.iv_or_ctr);
- if (rc)
- goto out;
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
- /* for partial completion, copy following for next
- * entry into loop...
- */
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
- memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
- csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
- memcpy(csbcpb->cpb.aes_ccm.in_s0,
- csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- /* update stats */
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
- processed += to_process;
- } while (processed < nbytes);
- /* copy out the auth tag */
- scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
- req->dst, nbytes, authsize,
- SCATTERWALK_TO_SG);
- out:
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
- return rc;
- }
- static int ccm4309_aes_nx_encrypt(struct aead_request *req)
- {
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
- iv[0] = 3;
- memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
- memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- return ccm_nx_encrypt(req, &desc);
- }
- static int ccm_aes_nx_encrypt(struct aead_request *req)
- {
- struct blkcipher_desc desc;
- int rc;
- desc.info = req->iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- rc = crypto_ccm_check_iv(desc.info);
- if (rc)
- return rc;
- return ccm_nx_encrypt(req, &desc);
- }
- static int ccm4309_aes_nx_decrypt(struct aead_request *req)
- {
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
- iv[0] = 3;
- memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
- memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- return ccm_nx_decrypt(req, &desc);
- }
- static int ccm_aes_nx_decrypt(struct aead_request *req)
- {
- struct blkcipher_desc desc;
- int rc;
- desc.info = req->iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- rc = crypto_ccm_check_iv(desc.info);
- if (rc)
- return rc;
- return ccm_nx_decrypt(req, &desc);
- }
- /* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
- struct crypto_alg nx_ccm_aes_alg = {
- .cra_name = "ccm(aes)",
- .cra_driver_name = "ccm-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ccm_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = ccm_aes_nx_set_key,
- .setauthsize = ccm_aes_nx_setauthsize,
- .encrypt = ccm_aes_nx_encrypt,
- .decrypt = ccm_aes_nx_decrypt,
- }
- };
- struct crypto_alg nx_ccm4309_aes_alg = {
- .cra_name = "rfc4309(ccm(aes))",
- .cra_driver_name = "rfc4309-ccm-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_nivaead_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ccm_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_aead = {
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = ccm4309_aes_nx_set_key,
- .setauthsize = ccm4309_aes_nx_setauthsize,
- .encrypt = ccm4309_aes_nx_encrypt,
- .decrypt = ccm4309_aes_nx_decrypt,
- .geniv = "seqiv",
- }
- };
|