omap-crypto.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * OMAP Crypto driver common support routines.
  4. *
  5. * Copyright (c) 2017 Texas Instruments Incorporated
  6. * Tero Kristo <t-kristo@ti.com>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/scatterlist.h>
  11. #include <crypto/scatterwalk.h>
  12. #include "omap-crypto.h"
  13. static int omap_crypto_copy_sg_lists(int total, int bs,
  14. struct scatterlist **sg,
  15. struct scatterlist *new_sg, u16 flags)
  16. {
  17. int n = sg_nents(*sg);
  18. struct scatterlist *tmp;
  19. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
  20. new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  21. if (!new_sg)
  22. return -ENOMEM;
  23. sg_init_table(new_sg, n);
  24. }
  25. tmp = new_sg;
  26. while (*sg && total) {
  27. int len = (*sg)->length;
  28. if (total < len)
  29. len = total;
  30. if (len > 0) {
  31. total -= len;
  32. sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
  33. if (total <= 0)
  34. sg_mark_end(tmp);
  35. tmp = sg_next(tmp);
  36. }
  37. *sg = sg_next(*sg);
  38. }
  39. *sg = new_sg;
  40. return 0;
  41. }
  42. static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
  43. struct scatterlist *new_sg, u16 flags)
  44. {
  45. void *buf;
  46. int pages;
  47. int new_len;
  48. new_len = ALIGN(total, bs);
  49. pages = get_order(new_len);
  50. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  51. if (!buf) {
  52. pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
  53. __func__);
  54. return -ENOMEM;
  55. }
  56. if (flags & OMAP_CRYPTO_COPY_DATA) {
  57. scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
  58. if (flags & OMAP_CRYPTO_ZERO_BUF)
  59. memset(buf + total, 0, new_len - total);
  60. }
  61. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
  62. sg_init_table(new_sg, 1);
  63. sg_set_buf(new_sg, buf, new_len);
  64. *sg = new_sg;
  65. return 0;
  66. }
  67. static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
  68. u16 flags)
  69. {
  70. int len = 0;
  71. int num_sg = 0;
  72. if (!IS_ALIGNED(total, bs))
  73. return OMAP_CRYPTO_NOT_ALIGNED;
  74. while (sg) {
  75. num_sg++;
  76. if (!IS_ALIGNED(sg->offset, 4))
  77. return OMAP_CRYPTO_NOT_ALIGNED;
  78. if (!IS_ALIGNED(sg->length, bs))
  79. return OMAP_CRYPTO_NOT_ALIGNED;
  80. #ifdef CONFIG_ZONE_DMA
  81. if (page_zonenum(sg_page(sg)) != ZONE_DMA)
  82. return OMAP_CRYPTO_NOT_ALIGNED;
  83. #endif
  84. len += sg->length;
  85. sg = sg_next(sg);
  86. if (len >= total)
  87. break;
  88. }
  89. if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
  90. return OMAP_CRYPTO_NOT_ALIGNED;
  91. if (len != total)
  92. return OMAP_CRYPTO_BAD_DATA_LENGTH;
  93. return 0;
  94. }
  95. int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
  96. struct scatterlist *new_sg, u16 flags,
  97. u8 flags_shift, unsigned long *dd_flags)
  98. {
  99. int ret;
  100. *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
  101. if (flags & OMAP_CRYPTO_FORCE_COPY)
  102. ret = OMAP_CRYPTO_NOT_ALIGNED;
  103. else
  104. ret = omap_crypto_check_sg(*sg, total, bs, flags);
  105. if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
  106. ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
  107. if (ret)
  108. return ret;
  109. *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
  110. } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
  111. ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
  112. if (ret)
  113. return ret;
  114. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
  115. *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
  116. } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
  117. sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
  118. }
  119. return 0;
  120. }
  121. EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
  122. void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
  123. int offset, int len, u8 flags_shift,
  124. unsigned long flags)
  125. {
  126. void *buf;
  127. int pages;
  128. flags >>= flags_shift;
  129. flags &= OMAP_CRYPTO_COPY_MASK;
  130. if (!flags)
  131. return;
  132. buf = sg_virt(sg);
  133. pages = get_order(len);
  134. if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
  135. scatterwalk_map_and_copy(buf, orig, offset, len, 1);
  136. if (flags & OMAP_CRYPTO_DATA_COPIED)
  137. free_pages((unsigned long)buf, pages);
  138. else if (flags & OMAP_CRYPTO_SG_COPIED)
  139. kfree(sg);
  140. }
  141. EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
  142. MODULE_DESCRIPTION("OMAP crypto support library.");
  143. MODULE_LICENSE("GPL v2");
  144. MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");