badrange.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * Copyright(c) 2017 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/libnvdimm.h>
  14. #include <linux/badblocks.h>
  15. #include <linux/export.h>
  16. #include <linux/module.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/device.h>
  19. #include <linux/ctype.h>
  20. #include <linux/ndctl.h>
  21. #include <linux/mutex.h>
  22. #include <linux/slab.h>
  23. #include <linux/io.h>
  24. #include "nd-core.h"
  25. #include "nd.h"
  26. void badrange_init(struct badrange *badrange)
  27. {
  28. INIT_LIST_HEAD(&badrange->list);
  29. spin_lock_init(&badrange->lock);
  30. }
  31. EXPORT_SYMBOL_GPL(badrange_init);
  32. static void append_badrange_entry(struct badrange *badrange,
  33. struct badrange_entry *bre, u64 addr, u64 length)
  34. {
  35. lockdep_assert_held(&badrange->lock);
  36. bre->start = addr;
  37. bre->length = length;
  38. list_add_tail(&bre->list, &badrange->list);
  39. }
  40. static int alloc_and_append_badrange_entry(struct badrange *badrange,
  41. u64 addr, u64 length, gfp_t flags)
  42. {
  43. struct badrange_entry *bre;
  44. bre = kzalloc(sizeof(*bre), flags);
  45. if (!bre)
  46. return -ENOMEM;
  47. append_badrange_entry(badrange, bre, addr, length);
  48. return 0;
  49. }
  50. static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
  51. {
  52. struct badrange_entry *bre, *bre_new;
  53. spin_unlock(&badrange->lock);
  54. bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
  55. spin_lock(&badrange->lock);
  56. if (list_empty(&badrange->list)) {
  57. if (!bre_new)
  58. return -ENOMEM;
  59. append_badrange_entry(badrange, bre_new, addr, length);
  60. return 0;
  61. }
  62. /*
  63. * There is a chance this is a duplicate, check for those first.
  64. * This will be the common case as ARS_STATUS returns all known
  65. * errors in the SPA space, and we can't query it per region
  66. */
  67. list_for_each_entry(bre, &badrange->list, list)
  68. if (bre->start == addr) {
  69. /* If length has changed, update this list entry */
  70. if (bre->length != length)
  71. bre->length = length;
  72. kfree(bre_new);
  73. return 0;
  74. }
  75. /*
  76. * If not a duplicate or a simple length update, add the entry as is,
  77. * as any overlapping ranges will get resolved when the list is consumed
  78. * and converted to badblocks
  79. */
  80. if (!bre_new)
  81. return -ENOMEM;
  82. append_badrange_entry(badrange, bre_new, addr, length);
  83. return 0;
  84. }
  85. int badrange_add(struct badrange *badrange, u64 addr, u64 length)
  86. {
  87. int rc;
  88. spin_lock(&badrange->lock);
  89. rc = add_badrange(badrange, addr, length);
  90. spin_unlock(&badrange->lock);
  91. return rc;
  92. }
  93. EXPORT_SYMBOL_GPL(badrange_add);
  94. void badrange_forget(struct badrange *badrange, phys_addr_t start,
  95. unsigned int len)
  96. {
  97. struct list_head *badrange_list = &badrange->list;
  98. u64 clr_end = start + len - 1;
  99. struct badrange_entry *bre, *next;
  100. spin_lock(&badrange->lock);
  101. /*
  102. * [start, clr_end] is the badrange interval being cleared.
  103. * [bre->start, bre_end] is the badrange_list entry we're comparing
  104. * the above interval against. The badrange list entry may need
  105. * to be modified (update either start or length), deleted, or
  106. * split into two based on the overlap characteristics
  107. */
  108. list_for_each_entry_safe(bre, next, badrange_list, list) {
  109. u64 bre_end = bre->start + bre->length - 1;
  110. /* Skip intervals with no intersection */
  111. if (bre_end < start)
  112. continue;
  113. if (bre->start > clr_end)
  114. continue;
  115. /* Delete completely overlapped badrange entries */
  116. if ((bre->start >= start) && (bre_end <= clr_end)) {
  117. list_del(&bre->list);
  118. kfree(bre);
  119. continue;
  120. }
  121. /* Adjust start point of partially cleared entries */
  122. if ((start <= bre->start) && (clr_end > bre->start)) {
  123. bre->length -= clr_end - bre->start + 1;
  124. bre->start = clr_end + 1;
  125. continue;
  126. }
  127. /* Adjust bre->length for partial clearing at the tail end */
  128. if ((bre->start < start) && (bre_end <= clr_end)) {
  129. /* bre->start remains the same */
  130. bre->length = start - bre->start;
  131. continue;
  132. }
  133. /*
  134. * If clearing in the middle of an entry, we split it into
  135. * two by modifying the current entry to represent one half of
  136. * the split, and adding a new entry for the second half.
  137. */
  138. if ((bre->start < start) && (bre_end > clr_end)) {
  139. u64 new_start = clr_end + 1;
  140. u64 new_len = bre_end - new_start + 1;
  141. /* Add new entry covering the right half */
  142. alloc_and_append_badrange_entry(badrange, new_start,
  143. new_len, GFP_NOWAIT);
  144. /* Adjust this entry to cover the left half */
  145. bre->length = start - bre->start;
  146. continue;
  147. }
  148. }
  149. spin_unlock(&badrange->lock);
  150. }
  151. EXPORT_SYMBOL_GPL(badrange_forget);
  152. static void set_badblock(struct badblocks *bb, sector_t s, int num)
  153. {
  154. dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
  155. (u64) s * 512, (u64) num * 512);
  156. /* this isn't an error as the hardware will still throw an exception */
  157. if (badblocks_set(bb, s, num, 1))
  158. dev_info_once(bb->dev, "%s: failed for sector %llx\n",
  159. __func__, (u64) s);
  160. }
  161. /**
  162. * __add_badblock_range() - Convert a physical address range to bad sectors
  163. * @bb: badblocks instance to populate
  164. * @ns_offset: namespace offset where the error range begins (in bytes)
  165. * @len: number of bytes of badrange to be added
  166. *
  167. * This assumes that the range provided with (ns_offset, len) is within
  168. * the bounds of physical addresses for this namespace, i.e. lies in the
  169. * interval [ns_start, ns_start + ns_size)
  170. */
  171. static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
  172. {
  173. const unsigned int sector_size = 512;
  174. sector_t start_sector, end_sector;
  175. u64 num_sectors;
  176. u32 rem;
  177. start_sector = div_u64(ns_offset, sector_size);
  178. end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
  179. if (rem)
  180. end_sector++;
  181. num_sectors = end_sector - start_sector;
  182. if (unlikely(num_sectors > (u64)INT_MAX)) {
  183. u64 remaining = num_sectors;
  184. sector_t s = start_sector;
  185. while (remaining) {
  186. int done = min_t(u64, remaining, INT_MAX);
  187. set_badblock(bb, s, done);
  188. remaining -= done;
  189. s += done;
  190. }
  191. } else
  192. set_badblock(bb, start_sector, num_sectors);
  193. }
  194. static void badblocks_populate(struct badrange *badrange,
  195. struct badblocks *bb, const struct resource *res)
  196. {
  197. struct badrange_entry *bre;
  198. if (list_empty(&badrange->list))
  199. return;
  200. list_for_each_entry(bre, &badrange->list, list) {
  201. u64 bre_end = bre->start + bre->length - 1;
  202. /* Discard intervals with no intersection */
  203. if (bre_end < res->start)
  204. continue;
  205. if (bre->start > res->end)
  206. continue;
  207. /* Deal with any overlap after start of the namespace */
  208. if (bre->start >= res->start) {
  209. u64 start = bre->start;
  210. u64 len;
  211. if (bre_end <= res->end)
  212. len = bre->length;
  213. else
  214. len = res->start + resource_size(res)
  215. - bre->start;
  216. __add_badblock_range(bb, start - res->start, len);
  217. continue;
  218. }
  219. /*
  220. * Deal with overlap for badrange starting before
  221. * the namespace.
  222. */
  223. if (bre->start < res->start) {
  224. u64 len;
  225. if (bre_end < res->end)
  226. len = bre->start + bre->length - res->start;
  227. else
  228. len = resource_size(res);
  229. __add_badblock_range(bb, 0, len);
  230. }
  231. }
  232. }
  233. /**
  234. * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
  235. * @region: parent region of the range to interrogate
  236. * @bb: badblocks instance to populate
  237. * @res: resource range to consider
  238. *
  239. * The badrange list generated during bus initialization may contain
  240. * multiple, possibly overlapping physical address ranges. Compare each
  241. * of these ranges to the resource range currently being initialized,
  242. * and add badblocks entries for all matching sub-ranges
  243. */
  244. void nvdimm_badblocks_populate(struct nd_region *nd_region,
  245. struct badblocks *bb, const struct resource *res)
  246. {
  247. struct nvdimm_bus *nvdimm_bus;
  248. if (!is_memory(&nd_region->dev)) {
  249. dev_WARN_ONCE(&nd_region->dev, 1,
  250. "%s only valid for pmem regions\n", __func__);
  251. return;
  252. }
  253. nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
  254. nvdimm_bus_lock(&nvdimm_bus->dev);
  255. badblocks_populate(&nvdimm_bus->badrange, bb, res);
  256. nvdimm_bus_unlock(&nvdimm_bus->dev);
  257. }
  258. EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);