omap-crypto.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /*
  2. * OMAP Crypto driver common support routines.
  3. *
  4. * Copyright (c) 2017 Texas Instruments Incorporated
  5. * Tero Kristo <t-kristo@ti.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as published
  9. * by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/scatterlist.h>
  14. #include <crypto/scatterwalk.h>
  15. #include "omap-crypto.h"
  16. static int omap_crypto_copy_sg_lists(int total, int bs,
  17. struct scatterlist **sg,
  18. struct scatterlist *new_sg, u16 flags)
  19. {
  20. int n = sg_nents(*sg);
  21. struct scatterlist *tmp;
  22. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
  23. new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  24. if (!new_sg)
  25. return -ENOMEM;
  26. sg_init_table(new_sg, n);
  27. }
  28. tmp = new_sg;
  29. while (*sg && total) {
  30. int len = (*sg)->length;
  31. if (total < len)
  32. len = total;
  33. if (len > 0) {
  34. total -= len;
  35. sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
  36. if (total <= 0)
  37. sg_mark_end(tmp);
  38. tmp = sg_next(tmp);
  39. }
  40. *sg = sg_next(*sg);
  41. }
  42. *sg = new_sg;
  43. return 0;
  44. }
  45. static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
  46. struct scatterlist *new_sg, u16 flags)
  47. {
  48. void *buf;
  49. int pages;
  50. int new_len;
  51. new_len = ALIGN(total, bs);
  52. pages = get_order(new_len);
  53. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  54. if (!buf) {
  55. pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
  56. __func__);
  57. return -ENOMEM;
  58. }
  59. if (flags & OMAP_CRYPTO_COPY_DATA) {
  60. scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
  61. if (flags & OMAP_CRYPTO_ZERO_BUF)
  62. memset(buf + total, 0, new_len - total);
  63. }
  64. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
  65. sg_init_table(new_sg, 1);
  66. sg_set_buf(new_sg, buf, new_len);
  67. *sg = new_sg;
  68. return 0;
  69. }
  70. static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
  71. u16 flags)
  72. {
  73. int len = 0;
  74. int num_sg = 0;
  75. if (!IS_ALIGNED(total, bs))
  76. return OMAP_CRYPTO_NOT_ALIGNED;
  77. while (sg) {
  78. num_sg++;
  79. if (!IS_ALIGNED(sg->offset, 4))
  80. return OMAP_CRYPTO_NOT_ALIGNED;
  81. if (!IS_ALIGNED(sg->length, bs))
  82. return OMAP_CRYPTO_NOT_ALIGNED;
  83. #ifdef CONFIG_ZONE_DMA
  84. if (page_zonenum(sg_page(sg)) != ZONE_DMA)
  85. return OMAP_CRYPTO_NOT_ALIGNED;
  86. #endif
  87. len += sg->length;
  88. sg = sg_next(sg);
  89. if (len >= total)
  90. break;
  91. }
  92. if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
  93. return OMAP_CRYPTO_NOT_ALIGNED;
  94. if (len != total)
  95. return OMAP_CRYPTO_BAD_DATA_LENGTH;
  96. return 0;
  97. }
  98. int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
  99. struct scatterlist *new_sg, u16 flags,
  100. u8 flags_shift, unsigned long *dd_flags)
  101. {
  102. int ret;
  103. *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
  104. if (flags & OMAP_CRYPTO_FORCE_COPY)
  105. ret = OMAP_CRYPTO_NOT_ALIGNED;
  106. else
  107. ret = omap_crypto_check_sg(*sg, total, bs, flags);
  108. if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
  109. ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
  110. if (ret)
  111. return ret;
  112. *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
  113. } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
  114. ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
  115. if (ret)
  116. return ret;
  117. if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
  118. *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
  119. } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
  120. sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
  121. }
  122. return 0;
  123. }
  124. EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
  125. void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
  126. int offset, int len, u8 flags_shift,
  127. unsigned long flags)
  128. {
  129. void *buf;
  130. int pages;
  131. flags >>= flags_shift;
  132. flags &= OMAP_CRYPTO_COPY_MASK;
  133. if (!flags)
  134. return;
  135. buf = sg_virt(sg);
  136. pages = get_order(len);
  137. if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
  138. scatterwalk_map_and_copy(buf, orig, offset, len, 1);
  139. if (flags & OMAP_CRYPTO_DATA_COPIED)
  140. free_pages((unsigned long)buf, pages);
  141. else if (flags & OMAP_CRYPTO_SG_COPIED)
  142. kfree(sg);
  143. }
  144. EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
  145. MODULE_DESCRIPTION("OMAP crypto support library.");
  146. MODULE_LICENSE("GPL v2");
  147. MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");