scompress.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. /*
  2. * Synchronous Compression operations
  3. *
  4. * Copyright 2015 LG Electronics Inc.
  5. * Copyright (c) 2016, Intel Corporation
  6. * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/errno.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/slab.h>
  19. #include <linux/string.h>
  20. #include <linux/crypto.h>
  21. #include <linux/compiler.h>
  22. #include <linux/vmalloc.h>
  23. #include <crypto/algapi.h>
  24. #include <linux/cryptouser.h>
  25. #include <net/netlink.h>
  26. #include <linux/scatterlist.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/internal/acompress.h>
  29. #include <crypto/internal/scompress.h>
  30. #include "internal.h"
  31. static const struct crypto_type crypto_scomp_type;
  32. static void * __percpu *scomp_src_scratches;
  33. static void * __percpu *scomp_dst_scratches;
  34. static int scomp_scratch_users;
  35. static DEFINE_MUTEX(scomp_lock);
  36. #ifdef CONFIG_NET
  37. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  38. {
  39. struct crypto_report_comp rscomp;
  40. strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
  41. if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
  42. sizeof(struct crypto_report_comp), &rscomp))
  43. goto nla_put_failure;
  44. return 0;
  45. nla_put_failure:
  46. return -EMSGSIZE;
  47. }
  48. #else
  49. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  50. {
  51. return -ENOSYS;
  52. }
  53. #endif
  54. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  55. __maybe_unused;
  56. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  57. {
  58. seq_puts(m, "type : scomp\n");
  59. }
  60. static void crypto_scomp_free_scratches(void * __percpu *scratches)
  61. {
  62. int i;
  63. if (!scratches)
  64. return;
  65. for_each_possible_cpu(i)
  66. vfree(*per_cpu_ptr(scratches, i));
  67. free_percpu(scratches);
  68. }
  69. static void * __percpu *crypto_scomp_alloc_scratches(void)
  70. {
  71. void * __percpu *scratches;
  72. int i;
  73. scratches = alloc_percpu(void *);
  74. if (!scratches)
  75. return NULL;
  76. for_each_possible_cpu(i) {
  77. void *scratch;
  78. scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
  79. if (!scratch)
  80. goto error;
  81. *per_cpu_ptr(scratches, i) = scratch;
  82. }
  83. return scratches;
  84. error:
  85. crypto_scomp_free_scratches(scratches);
  86. return NULL;
  87. }
  88. static void crypto_scomp_free_all_scratches(void)
  89. {
  90. if (!--scomp_scratch_users) {
  91. crypto_scomp_free_scratches(scomp_src_scratches);
  92. crypto_scomp_free_scratches(scomp_dst_scratches);
  93. scomp_src_scratches = NULL;
  94. scomp_dst_scratches = NULL;
  95. }
  96. }
  97. static int crypto_scomp_alloc_all_scratches(void)
  98. {
  99. if (!scomp_scratch_users++) {
  100. scomp_src_scratches = crypto_scomp_alloc_scratches();
  101. if (!scomp_src_scratches)
  102. return -ENOMEM;
  103. scomp_dst_scratches = crypto_scomp_alloc_scratches();
  104. if (!scomp_dst_scratches) {
  105. crypto_scomp_free_scratches(scomp_src_scratches);
  106. scomp_src_scratches = NULL;
  107. return -ENOMEM;
  108. }
  109. }
  110. return 0;
  111. }
  112. static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
  113. {
  114. int ret;
  115. mutex_lock(&scomp_lock);
  116. ret = crypto_scomp_alloc_all_scratches();
  117. mutex_unlock(&scomp_lock);
  118. return ret;
  119. }
  120. static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
  121. {
  122. struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
  123. void **tfm_ctx = acomp_tfm_ctx(tfm);
  124. struct crypto_scomp *scomp = *tfm_ctx;
  125. void **ctx = acomp_request_ctx(req);
  126. const int cpu = get_cpu();
  127. u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
  128. u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
  129. int ret;
  130. if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
  131. ret = -EINVAL;
  132. goto out;
  133. }
  134. if (req->dst && !req->dlen) {
  135. ret = -EINVAL;
  136. goto out;
  137. }
  138. if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
  139. req->dlen = SCOMP_SCRATCH_SIZE;
  140. scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
  141. if (dir)
  142. ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
  143. scratch_dst, &req->dlen, *ctx);
  144. else
  145. ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
  146. scratch_dst, &req->dlen, *ctx);
  147. if (!ret) {
  148. if (!req->dst) {
  149. req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
  150. if (!req->dst)
  151. goto out;
  152. }
  153. scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
  154. 1);
  155. }
  156. out:
  157. put_cpu();
  158. return ret;
  159. }
  160. static int scomp_acomp_compress(struct acomp_req *req)
  161. {
  162. return scomp_acomp_comp_decomp(req, 1);
  163. }
  164. static int scomp_acomp_decompress(struct acomp_req *req)
  165. {
  166. return scomp_acomp_comp_decomp(req, 0);
  167. }
  168. static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
  169. {
  170. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  171. crypto_free_scomp(*ctx);
  172. mutex_lock(&scomp_lock);
  173. crypto_scomp_free_all_scratches();
  174. mutex_unlock(&scomp_lock);
  175. }
  176. int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
  177. {
  178. struct crypto_alg *calg = tfm->__crt_alg;
  179. struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
  180. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  181. struct crypto_scomp *scomp;
  182. if (!crypto_mod_get(calg))
  183. return -EAGAIN;
  184. scomp = crypto_create_tfm(calg, &crypto_scomp_type);
  185. if (IS_ERR(scomp)) {
  186. crypto_mod_put(calg);
  187. return PTR_ERR(scomp);
  188. }
  189. *ctx = scomp;
  190. tfm->exit = crypto_exit_scomp_ops_async;
  191. crt->compress = scomp_acomp_compress;
  192. crt->decompress = scomp_acomp_decompress;
  193. crt->dst_free = sgl_free;
  194. crt->reqsize = sizeof(void *);
  195. return 0;
  196. }
  197. struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
  198. {
  199. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  200. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  201. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  202. struct crypto_scomp *scomp = *tfm_ctx;
  203. void *ctx;
  204. ctx = crypto_scomp_alloc_ctx(scomp);
  205. if (IS_ERR(ctx)) {
  206. kfree(req);
  207. return NULL;
  208. }
  209. *req->__ctx = ctx;
  210. return req;
  211. }
  212. void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
  213. {
  214. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  215. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  216. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  217. struct crypto_scomp *scomp = *tfm_ctx;
  218. void *ctx = *req->__ctx;
  219. if (ctx)
  220. crypto_scomp_free_ctx(scomp, ctx);
  221. }
  222. static const struct crypto_type crypto_scomp_type = {
  223. .extsize = crypto_alg_extsize,
  224. .init_tfm = crypto_scomp_init_tfm,
  225. #ifdef CONFIG_PROC_FS
  226. .show = crypto_scomp_show,
  227. #endif
  228. .report = crypto_scomp_report,
  229. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  230. .maskset = CRYPTO_ALG_TYPE_MASK,
  231. .type = CRYPTO_ALG_TYPE_SCOMPRESS,
  232. .tfmsize = offsetof(struct crypto_scomp, base),
  233. };
  234. int crypto_register_scomp(struct scomp_alg *alg)
  235. {
  236. struct crypto_alg *base = &alg->base;
  237. base->cra_type = &crypto_scomp_type;
  238. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  239. base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
  240. return crypto_register_alg(base);
  241. }
  242. EXPORT_SYMBOL_GPL(crypto_register_scomp);
  243. int crypto_unregister_scomp(struct scomp_alg *alg)
  244. {
  245. return crypto_unregister_alg(&alg->base);
  246. }
  247. EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
  248. int crypto_register_scomps(struct scomp_alg *algs, int count)
  249. {
  250. int i, ret;
  251. for (i = 0; i < count; i++) {
  252. ret = crypto_register_scomp(&algs[i]);
  253. if (ret)
  254. goto err;
  255. }
  256. return 0;
  257. err:
  258. for (--i; i >= 0; --i)
  259. crypto_unregister_scomp(&algs[i]);
  260. return ret;
  261. }
  262. EXPORT_SYMBOL_GPL(crypto_register_scomps);
  263. void crypto_unregister_scomps(struct scomp_alg *algs, int count)
  264. {
  265. int i;
  266. for (i = count - 1; i >= 0; --i)
  267. crypto_unregister_scomp(&algs[i]);
  268. }
  269. EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
  270. MODULE_LICENSE("GPL");
  271. MODULE_DESCRIPTION("Synchronous compression type");