t10-pi.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * t10_pi.c - Functions for generating and verifying T10 Protection
  3. * Information.
  4. *
  5. * Copyright (C) 2007, 2008, 2014 Oracle Corporation
  6. * Written by: Martin K. Petersen <martin.petersen@oracle.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version
  10. * 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; see the file COPYING. If not, write to
  19. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  20. * USA.
  21. *
  22. */
  23. #include <linux/t10-pi.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/crc-t10dif.h>
  26. #include <net/checksum.h>
  27. typedef __be16 (csum_fn) (void *, unsigned int);
  28. static __be16 t10_pi_crc_fn(void *data, unsigned int len)
  29. {
  30. return cpu_to_be16(crc_t10dif(data, len));
  31. }
  32. static __be16 t10_pi_ip_fn(void *data, unsigned int len)
  33. {
  34. return (__force __be16)ip_compute_csum(data, len);
  35. }
  36. /*
  37. * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
  38. * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
  39. * tag.
  40. */
  41. static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
  42. csum_fn *fn, unsigned int type)
  43. {
  44. unsigned int i;
  45. for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  46. struct t10_pi_tuple *pi = iter->prot_buf;
  47. pi->guard_tag = fn(iter->data_buf, iter->interval);
  48. pi->app_tag = 0;
  49. if (type == 1)
  50. pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
  51. else
  52. pi->ref_tag = 0;
  53. iter->data_buf += iter->interval;
  54. iter->prot_buf += sizeof(struct t10_pi_tuple);
  55. iter->seed++;
  56. }
  57. return BLK_STS_OK;
  58. }
  59. static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
  60. csum_fn *fn, unsigned int type)
  61. {
  62. unsigned int i;
  63. for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  64. struct t10_pi_tuple *pi = iter->prot_buf;
  65. __be16 csum;
  66. switch (type) {
  67. case 1:
  68. case 2:
  69. if (pi->app_tag == T10_PI_APP_ESCAPE)
  70. goto next;
  71. if (be32_to_cpu(pi->ref_tag) !=
  72. lower_32_bits(iter->seed)) {
  73. pr_err("%s: ref tag error at location %llu " \
  74. "(rcvd %u)\n", iter->disk_name,
  75. (unsigned long long)
  76. iter->seed, be32_to_cpu(pi->ref_tag));
  77. return BLK_STS_PROTECTION;
  78. }
  79. break;
  80. case 3:
  81. if (pi->app_tag == T10_PI_APP_ESCAPE &&
  82. pi->ref_tag == T10_PI_REF_ESCAPE)
  83. goto next;
  84. break;
  85. }
  86. csum = fn(iter->data_buf, iter->interval);
  87. if (pi->guard_tag != csum) {
  88. pr_err("%s: guard tag error at sector %llu " \
  89. "(rcvd %04x, want %04x)\n", iter->disk_name,
  90. (unsigned long long)iter->seed,
  91. be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
  92. return BLK_STS_PROTECTION;
  93. }
  94. next:
  95. iter->data_buf += iter->interval;
  96. iter->prot_buf += sizeof(struct t10_pi_tuple);
  97. iter->seed++;
  98. }
  99. return BLK_STS_OK;
  100. }
  101. static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
  102. {
  103. return t10_pi_generate(iter, t10_pi_crc_fn, 1);
  104. }
  105. static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
  106. {
  107. return t10_pi_generate(iter, t10_pi_ip_fn, 1);
  108. }
  109. static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
  110. {
  111. return t10_pi_verify(iter, t10_pi_crc_fn, 1);
  112. }
  113. static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
  114. {
  115. return t10_pi_verify(iter, t10_pi_ip_fn, 1);
  116. }
  117. static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
  118. {
  119. return t10_pi_generate(iter, t10_pi_crc_fn, 3);
  120. }
  121. static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
  122. {
  123. return t10_pi_generate(iter, t10_pi_ip_fn, 3);
  124. }
  125. static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
  126. {
  127. return t10_pi_verify(iter, t10_pi_crc_fn, 3);
  128. }
  129. static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
  130. {
  131. return t10_pi_verify(iter, t10_pi_ip_fn, 3);
  132. }
  133. const struct blk_integrity_profile t10_pi_type1_crc = {
  134. .name = "T10-DIF-TYPE1-CRC",
  135. .generate_fn = t10_pi_type1_generate_crc,
  136. .verify_fn = t10_pi_type1_verify_crc,
  137. };
  138. EXPORT_SYMBOL(t10_pi_type1_crc);
  139. const struct blk_integrity_profile t10_pi_type1_ip = {
  140. .name = "T10-DIF-TYPE1-IP",
  141. .generate_fn = t10_pi_type1_generate_ip,
  142. .verify_fn = t10_pi_type1_verify_ip,
  143. };
  144. EXPORT_SYMBOL(t10_pi_type1_ip);
  145. const struct blk_integrity_profile t10_pi_type3_crc = {
  146. .name = "T10-DIF-TYPE3-CRC",
  147. .generate_fn = t10_pi_type3_generate_crc,
  148. .verify_fn = t10_pi_type3_verify_crc,
  149. };
  150. EXPORT_SYMBOL(t10_pi_type3_crc);
  151. const struct blk_integrity_profile t10_pi_type3_ip = {
  152. .name = "T10-DIF-TYPE3-IP",
  153. .generate_fn = t10_pi_type3_generate_ip,
  154. .verify_fn = t10_pi_type3_verify_ip,
  155. };
  156. EXPORT_SYMBOL(t10_pi_type3_ip);
  157. /**
  158. * t10_pi_prepare - prepare PI prior submitting request to device
  159. * @rq: request with PI that should be prepared
  160. * @protection_type: PI type (Type 1/Type 2/Type 3)
  161. *
  162. * For Type 1/Type 2, the virtual start sector is the one that was
  163. * originally submitted by the block layer for the ref_tag usage. Due to
  164. * partitioning, MD/DM cloning, etc. the actual physical start sector is
  165. * likely to be different. Remap protection information to match the
  166. * physical LBA.
  167. *
  168. * Type 3 does not have a reference tag so no remapping is required.
  169. */
  170. void t10_pi_prepare(struct request *rq, u8 protection_type)
  171. {
  172. const int tuple_sz = rq->q->integrity.tuple_size;
  173. u32 ref_tag = t10_pi_ref_tag(rq);
  174. struct bio *bio;
  175. if (protection_type == T10_PI_TYPE3_PROTECTION)
  176. return;
  177. __rq_for_each_bio(bio, rq) {
  178. struct bio_integrity_payload *bip = bio_integrity(bio);
  179. u32 virt = bip_get_seed(bip) & 0xffffffff;
  180. struct bio_vec iv;
  181. struct bvec_iter iter;
  182. /* Already remapped? */
  183. if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
  184. break;
  185. bip_for_each_vec(iv, bip, iter) {
  186. void *p, *pmap;
  187. unsigned int j;
  188. pmap = kmap_atomic(iv.bv_page);
  189. p = pmap + iv.bv_offset;
  190. for (j = 0; j < iv.bv_len; j += tuple_sz) {
  191. struct t10_pi_tuple *pi = p;
  192. if (be32_to_cpu(pi->ref_tag) == virt)
  193. pi->ref_tag = cpu_to_be32(ref_tag);
  194. virt++;
  195. ref_tag++;
  196. p += tuple_sz;
  197. }
  198. kunmap_atomic(pmap);
  199. }
  200. bip->bip_flags |= BIP_MAPPED_INTEGRITY;
  201. }
  202. }
  203. EXPORT_SYMBOL(t10_pi_prepare);
  204. /**
  205. * t10_pi_complete - prepare PI prior returning request to the block layer
  206. * @rq: request with PI that should be prepared
  207. * @protection_type: PI type (Type 1/Type 2/Type 3)
  208. * @intervals: total elements to prepare
  209. *
  210. * For Type 1/Type 2, the virtual start sector is the one that was
  211. * originally submitted by the block layer for the ref_tag usage. Due to
  212. * partitioning, MD/DM cloning, etc. the actual physical start sector is
  213. * likely to be different. Since the physical start sector was submitted
  214. * to the device, we should remap it back to virtual values expected by the
  215. * block layer.
  216. *
  217. * Type 3 does not have a reference tag so no remapping is required.
  218. */
  219. void t10_pi_complete(struct request *rq, u8 protection_type,
  220. unsigned int intervals)
  221. {
  222. const int tuple_sz = rq->q->integrity.tuple_size;
  223. u32 ref_tag = t10_pi_ref_tag(rq);
  224. struct bio *bio;
  225. if (protection_type == T10_PI_TYPE3_PROTECTION)
  226. return;
  227. __rq_for_each_bio(bio, rq) {
  228. struct bio_integrity_payload *bip = bio_integrity(bio);
  229. u32 virt = bip_get_seed(bip) & 0xffffffff;
  230. struct bio_vec iv;
  231. struct bvec_iter iter;
  232. bip_for_each_vec(iv, bip, iter) {
  233. void *p, *pmap;
  234. unsigned int j;
  235. pmap = kmap_atomic(iv.bv_page);
  236. p = pmap + iv.bv_offset;
  237. for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
  238. struct t10_pi_tuple *pi = p;
  239. if (be32_to_cpu(pi->ref_tag) == ref_tag)
  240. pi->ref_tag = cpu_to_be32(virt);
  241. virt++;
  242. ref_tag++;
  243. intervals--;
  244. p += tuple_sz;
  245. }
  246. kunmap_atomic(pmap);
  247. }
  248. }
  249. }
  250. EXPORT_SYMBOL(t10_pi_complete);