123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575 |
- /*
- * Block Translation Table
- * Copyright (c) 2014-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
- #include <linux/highmem.h>
- #include <linux/debugfs.h>
- #include <linux/blkdev.h>
- #include <linux/module.h>
- #include <linux/device.h>
- #include <linux/mutex.h>
- #include <linux/hdreg.h>
- #include <linux/genhd.h>
- #include <linux/sizes.h>
- #include <linux/ndctl.h>
- #include <linux/fs.h>
- #include <linux/nd.h>
- #include "btt.h"
- #include "nd.h"
- enum log_ent_request {
- LOG_NEW_ENT = 0,
- LOG_OLD_ENT
- };
- static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
- void *buf, size_t n)
- {
- struct nd_btt *nd_btt = arena->nd_btt;
- struct nd_namespace_common *ndns = nd_btt->ndns;
- /* arena offsets are 4K from the base of the device */
- offset += SZ_4K;
- return nvdimm_read_bytes(ndns, offset, buf, n);
- }
- static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
- void *buf, size_t n)
- {
- struct nd_btt *nd_btt = arena->nd_btt;
- struct nd_namespace_common *ndns = nd_btt->ndns;
- /* arena offsets are 4K from the base of the device */
- offset += SZ_4K;
- return nvdimm_write_bytes(ndns, offset, buf, n);
- }
- static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
- {
- int ret;
- ret = arena_write_bytes(arena, arena->info2off, super,
- sizeof(struct btt_sb));
- if (ret)
- return ret;
- return arena_write_bytes(arena, arena->infooff, super,
- sizeof(struct btt_sb));
- }
- static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
- {
- WARN_ON(!super);
- return arena_read_bytes(arena, arena->infooff, super,
- sizeof(struct btt_sb));
- }
- /*
- * 'raw' version of btt_map write
- * Assumptions:
- * mapping is in little-endian
- * mapping contains 'E' and 'Z' flags as desired
- */
- static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping)
- {
- u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
- WARN_ON(lba >= arena->external_nlba);
- return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE);
- }
- static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
- u32 z_flag, u32 e_flag)
- {
- u32 ze;
- __le32 mapping_le;
- /*
- * This 'mapping' is supposed to be just the LBA mapping, without
- * any flags set, so strip the flag bits.
- */
- mapping &= MAP_LBA_MASK;
- ze = (z_flag << 1) + e_flag;
- switch (ze) {
- case 0:
- /*
- * We want to set neither of the Z or E flags, and
- * in the actual layout, this means setting the bit
- * positions of both to '1' to indicate a 'normal'
- * map entry
- */
- mapping |= MAP_ENT_NORMAL;
- break;
- case 1:
- mapping |= (1 << MAP_ERR_SHIFT);
- break;
- case 2:
- mapping |= (1 << MAP_TRIM_SHIFT);
- break;
- default:
- /*
- * The case where Z and E are both sent in as '1' could be
- * construed as a valid 'normal' case, but we decide not to,
- * to avoid confusion
- */
- WARN_ONCE(1, "Invalid use of Z and E flags\n");
- return -EIO;
- }
- mapping_le = cpu_to_le32(mapping);
- return __btt_map_write(arena, lba, mapping_le);
- }
- static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
- int *trim, int *error)
- {
- int ret;
- __le32 in;
- u32 raw_mapping, postmap, ze, z_flag, e_flag;
- u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
- WARN_ON(lba >= arena->external_nlba);
- ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE);
- if (ret)
- return ret;
- raw_mapping = le32_to_cpu(in);
- z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT;
- e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT;
- ze = (z_flag << 1) + e_flag;
- postmap = raw_mapping & MAP_LBA_MASK;
- /* Reuse the {z,e}_flag variables for *trim and *error */
- z_flag = 0;
- e_flag = 0;
- switch (ze) {
- case 0:
- /* Initial state. Return postmap = premap */
- *mapping = lba;
- break;
- case 1:
- *mapping = postmap;
- e_flag = 1;
- break;
- case 2:
- *mapping = postmap;
- z_flag = 1;
- break;
- case 3:
- *mapping = postmap;
- break;
- default:
- return -EIO;
- }
- if (trim)
- *trim = z_flag;
- if (error)
- *error = e_flag;
- return ret;
- }
- static int btt_log_group_read(struct arena_info *arena, u32 lane,
- struct log_group *log)
- {
- WARN_ON(!log);
- return arena_read_bytes(arena,
- arena->logoff + (lane * LOG_GRP_SIZE), log,
- LOG_GRP_SIZE);
- }
- static struct dentry *debugfs_root;
- static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
- int idx)
- {
- char dirname[32];
- struct dentry *d;
- /* If for some reason, parent bttN was not created, exit */
- if (!parent)
- return;
- snprintf(dirname, 32, "arena%d", idx);
- d = debugfs_create_dir(dirname, parent);
- if (IS_ERR_OR_NULL(d))
- return;
- a->debugfs_dir = d;
- debugfs_create_x64("size", S_IRUGO, d, &a->size);
- debugfs_create_x64("external_lba_start", S_IRUGO, d,
- &a->external_lba_start);
- debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
- debugfs_create_u32("internal_lbasize", S_IRUGO, d,
- &a->internal_lbasize);
- debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
- debugfs_create_u32("external_lbasize", S_IRUGO, d,
- &a->external_lbasize);
- debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
- debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
- debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
- debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
- debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
- debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
- debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
- debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
- debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
- debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
- debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
- debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
- }
- static void btt_debugfs_init(struct btt *btt)
- {
- int i = 0;
- struct arena_info *arena;
- btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
- debugfs_root);
- if (IS_ERR_OR_NULL(btt->debugfs_dir))
- return;
- list_for_each_entry(arena, &btt->arena_list, list) {
- arena_debugfs_init(arena, btt->debugfs_dir, i);
- i++;
- }
- }
- static u32 log_seq(struct log_group *log, int log_idx)
- {
- return le32_to_cpu(log->ent[log_idx].seq);
- }
- /*
- * This function accepts two log entries, and uses the
- * sequence number to find the 'older' entry.
- * It also updates the sequence number in this old entry to
- * make it the 'new' one if the mark_flag is set.
- * Finally, it returns which of the entries was the older one.
- *
- * TODO The logic feels a bit kludge-y. make it better..
- */
- static int btt_log_get_old(struct arena_info *a, struct log_group *log)
- {
- int idx0 = a->log_index[0];
- int idx1 = a->log_index[1];
- int old;
- /*
- * the first ever time this is seen, the entry goes into [0]
- * the next time, the following logic works out to put this
- * (next) entry into [1]
- */
- if (log_seq(log, idx0) == 0) {
- log->ent[idx0].seq = cpu_to_le32(1);
- return 0;
- }
- if (log_seq(log, idx0) == log_seq(log, idx1))
- return -EINVAL;
- if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
- return -EINVAL;
- if (log_seq(log, idx0) < log_seq(log, idx1)) {
- if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
- old = 0;
- else
- old = 1;
- } else {
- if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
- old = 1;
- else
- old = 0;
- }
- return old;
- }
- static struct device *to_dev(struct arena_info *arena)
- {
- return &arena->nd_btt->dev;
- }
- /*
- * This function copies the desired (old/new) log entry into ent if
- * it is not NULL. It returns the sub-slot number (0 or 1)
- * where the desired log entry was found. Negative return values
- * indicate errors.
- */
- static int btt_log_read(struct arena_info *arena, u32 lane,
- struct log_entry *ent, int old_flag)
- {
- int ret;
- int old_ent, ret_ent;
- struct log_group log;
- ret = btt_log_group_read(arena, lane, &log);
- if (ret)
- return -EIO;
- old_ent = btt_log_get_old(arena, &log);
- if (old_ent < 0 || old_ent > 1) {
- dev_info(to_dev(arena),
- "log corruption (%d): lane %d seq [%d, %d]\n",
- old_ent, lane, log.ent[arena->log_index[0]].seq,
- log.ent[arena->log_index[1]].seq);
- /* TODO set error state? */
- return -EIO;
- }
- ret_ent = (old_flag ? old_ent : (1 - old_ent));
- if (ent != NULL)
- memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
- return ret_ent;
- }
- /*
- * This function commits a log entry to media
- * It does _not_ prepare the freelist entry for the next write
- * btt_flog_write is the wrapper for updating the freelist elements
- */
- static int __btt_log_write(struct arena_info *arena, u32 lane,
- u32 sub, struct log_entry *ent)
- {
- int ret;
- u32 group_slot = arena->log_index[sub];
- unsigned int log_half = LOG_ENT_SIZE / 2;
- void *src = ent;
- u64 ns_off;
- ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
- (group_slot * LOG_ENT_SIZE);
- /* split the 16B write into atomic, durable halves */
- ret = arena_write_bytes(arena, ns_off, src, log_half);
- if (ret)
- return ret;
- ns_off += log_half;
- src += log_half;
- return arena_write_bytes(arena, ns_off, src, log_half);
- }
- static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
- struct log_entry *ent)
- {
- int ret;
- ret = __btt_log_write(arena, lane, sub, ent);
- if (ret)
- return ret;
- /* prepare the next free entry */
- arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
- if (++(arena->freelist[lane].seq) == 4)
- arena->freelist[lane].seq = 1;
- arena->freelist[lane].block = le32_to_cpu(ent->old_map);
- return ret;
- }
- /*
- * This function initializes the BTT map to the initial state, which is
- * all-zeroes, and indicates an identity mapping
- */
- static int btt_map_init(struct arena_info *arena)
- {
- int ret = -EINVAL;
- void *zerobuf;
- size_t offset = 0;
- size_t chunk_size = SZ_2M;
- size_t mapsize = arena->logoff - arena->mapoff;
- zerobuf = kzalloc(chunk_size, GFP_KERNEL);
- if (!zerobuf)
- return -ENOMEM;
- while (mapsize) {
- size_t size = min(mapsize, chunk_size);
- ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
- size);
- if (ret)
- goto free;
- offset += size;
- mapsize -= size;
- cond_resched();
- }
- free:
- kfree(zerobuf);
- return ret;
- }
- /*
- * This function initializes the BTT log with 'fake' entries pointing
- * to the initial reserved set of blocks as being free
- */
- static int btt_log_init(struct arena_info *arena)
- {
- int ret;
- u32 i;
- struct log_entry ent, zerolog;
- memset(&zerolog, 0, sizeof(zerolog));
- for (i = 0; i < arena->nfree; i++) {
- ent.lba = cpu_to_le32(i);
- ent.old_map = cpu_to_le32(arena->external_nlba + i);
- ent.new_map = cpu_to_le32(arena->external_nlba + i);
- ent.seq = cpu_to_le32(LOG_SEQ_INIT);
- ret = __btt_log_write(arena, i, 0, &ent);
- if (ret)
- return ret;
- ret = __btt_log_write(arena, i, 1, &zerolog);
- if (ret)
- return ret;
- }
- return 0;
- }
- static int btt_freelist_init(struct arena_info *arena)
- {
- int old, new, ret;
- u32 i, map_entry;
- struct log_entry log_new, log_old;
- arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
- GFP_KERNEL);
- if (!arena->freelist)
- return -ENOMEM;
- for (i = 0; i < arena->nfree; i++) {
- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
- if (old < 0)
- return old;
- new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
- if (new < 0)
- return new;
- /* sub points to the next one to be overwritten */
- arena->freelist[i].sub = 1 - new;
- arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
- arena->freelist[i].block = le32_to_cpu(log_new.old_map);
- /* This implies a newly created or untouched flog entry */
- if (log_new.old_map == log_new.new_map)
- continue;
- /* Check if map recovery is needed */
- ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
- NULL, NULL);
- if (ret)
- return ret;
- if ((le32_to_cpu(log_new.new_map) != map_entry) &&
- (le32_to_cpu(log_new.old_map) == map_entry)) {
- /*
- * Last transaction wrote the flog, but wasn't able
- * to complete the map write. So fix up the map.
- */
- ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
- le32_to_cpu(log_new.new_map), 0, 0);
- if (ret)
- return ret;
- }
- }
- return 0;
- }
- static bool ent_is_padding(struct log_entry *ent)
- {
- return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
- && (ent->seq == 0);
- }
- /*
- * Detecting valid log indices: We read a log group (see the comments in btt.h
- * for a description of a 'log_group' and its 'slots'), and iterate over its
- * four slots. We expect that a padding slot will be all-zeroes, and use this
- * to detect a padding slot vs. an actual entry.
- *
- * If a log_group is in the initial state, i.e. hasn't been used since the
- * creation of this BTT layout, it will have three of the four slots with
- * zeroes. We skip over these log_groups for the detection of log_index. If
- * all log_groups are in the initial state (i.e. the BTT has never been
- * written to), it is safe to assume the 'new format' of log entries in slots
- * (0, 1).
- */
- static int log_set_indices(struct arena_info *arena)
- {
- bool idx_set = false, initial_state = true;
- int ret, log_index[2] = {-1, -1};
- u32 i, j, next_idx = 0;
- struct log_group log;
- u32 pad_count = 0;
- for (i = 0; i < arena->nfree; i++) {
- ret = btt_log_group_read(arena, i, &log);
- if (ret < 0)
- return ret;
- for (j = 0; j < 4; j++) {
- if (!idx_set) {
- if (ent_is_padding(&log.ent[j])) {
- pad_count++;
- continue;
- } else {
- /* Skip if index has been recorded */
- if ((next_idx == 1) &&
- (j == log_index[0]))
- continue;
- /* valid entry, record index */
- log_index[next_idx] = j;
- next_idx++;
- }
- if (next_idx == 2) {
- /* two valid entries found */
- idx_set = true;
- } else if (next_idx > 2) {
- /* too many valid indices */
- return -ENXIO;
- }
- } else {
- /*
- * once the indices have been set, just verify
- * that all subsequent log groups are either in
- * their initial state or follow the same
- * indices.
- */
- if (j == log_index[0]) {
- /* entry must be 'valid' */
- if (ent_is_padding(&log.ent[j]))
- return -ENXIO;
- } else if (j == log_index[1]) {
- ;
- /*
- * log_index[1] can be padding if the
- * lane never got used and it is still
- * in the initial state (three 'padding'
- * entries)
- */
- } else {
- /* entry must be invalid (padding) */
- if (!ent_is_padding(&log.ent[j]))
- return -ENXIO;
- }
- }
- }
- /*
- * If any of the log_groups have more than one valid,
- * non-padding entry, then the we are no longer in the
- * initial_state
- */
- if (pad_count < 3)
- initial_state = false;
- pad_count = 0;
- }
- if (!initial_state && !idx_set)
- return -ENXIO;
- /*
- * If all the entries in the log were in the initial state,
- * assume new padding scheme
- */
- if (initial_state)
- log_index[1] = 1;
- /*
- * Only allow the known permutations of log/padding indices,
- * i.e. (0, 1), and (0, 2)
- */
- if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
- ; /* known index possibilities */
- else {
- dev_err(to_dev(arena), "Found an unknown padding scheme\n");
- return -ENXIO;
- }
- arena->log_index[0] = log_index[0];
- arena->log_index[1] = log_index[1];
- dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
- dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
- return 0;
- }
- static int btt_rtt_init(struct arena_info *arena)
- {
- arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
- if (arena->rtt == NULL)
- return -ENOMEM;
- return 0;
- }
- static int btt_maplocks_init(struct arena_info *arena)
- {
- u32 i;
- arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
- GFP_KERNEL);
- if (!arena->map_locks)
- return -ENOMEM;
- for (i = 0; i < arena->nfree; i++)
- spin_lock_init(&arena->map_locks[i].lock);
- return 0;
- }
- static struct arena_info *alloc_arena(struct btt *btt, size_t size,
- size_t start, size_t arena_off)
- {
- struct arena_info *arena;
- u64 logsize, mapsize, datasize;
- u64 available = size;
- arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
- if (!arena)
- return NULL;
- arena->nd_btt = btt->nd_btt;
- if (!size)
- return arena;
- arena->size = size;
- arena->external_lba_start = start;
- arena->external_lbasize = btt->lbasize;
- arena->internal_lbasize = roundup(arena->external_lbasize,
- INT_LBASIZE_ALIGNMENT);
- arena->nfree = BTT_DEFAULT_NFREE;
- arena->version_major = 1;
- arena->version_minor = 1;
- if (available % BTT_PG_SIZE)
- available -= (available % BTT_PG_SIZE);
- /* Two pages are reserved for the super block and its copy */
- available -= 2 * BTT_PG_SIZE;
- /* The log takes a fixed amount of space based on nfree */
- logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
- available -= logsize;
- /* Calculate optimal split between map and data area */
- arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
- arena->internal_lbasize + MAP_ENT_SIZE);
- arena->external_nlba = arena->internal_nlba - arena->nfree;
- mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
- datasize = available - mapsize;
- /* 'Absolute' values, relative to start of storage space */
- arena->infooff = arena_off;
- arena->dataoff = arena->infooff + BTT_PG_SIZE;
- arena->mapoff = arena->dataoff + datasize;
- arena->logoff = arena->mapoff + mapsize;
- arena->info2off = arena->logoff + logsize;
- /* Default log indices are (0,1) */
- arena->log_index[0] = 0;
- arena->log_index[1] = 1;
- return arena;
- }
- static void free_arenas(struct btt *btt)
- {
- struct arena_info *arena, *next;
- list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
- list_del(&arena->list);
- kfree(arena->rtt);
- kfree(arena->map_locks);
- kfree(arena->freelist);
- debugfs_remove_recursive(arena->debugfs_dir);
- kfree(arena);
- }
- }
- /*
- * This function reads an existing valid btt superblock and
- * populates the corresponding arena_info struct
- */
- static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
- u64 arena_off)
- {
- arena->internal_nlba = le32_to_cpu(super->internal_nlba);
- arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
- arena->external_nlba = le32_to_cpu(super->external_nlba);
- arena->external_lbasize = le32_to_cpu(super->external_lbasize);
- arena->nfree = le32_to_cpu(super->nfree);
- arena->version_major = le16_to_cpu(super->version_major);
- arena->version_minor = le16_to_cpu(super->version_minor);
- arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
- le64_to_cpu(super->nextoff));
- arena->infooff = arena_off;
- arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
- arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
- arena->logoff = arena_off + le64_to_cpu(super->logoff);
- arena->info2off = arena_off + le64_to_cpu(super->info2off);
- arena->size = (le64_to_cpu(super->nextoff) > 0)
- ? (le64_to_cpu(super->nextoff))
- : (arena->info2off - arena->infooff + BTT_PG_SIZE);
- arena->flags = le32_to_cpu(super->flags);
- }
- static int discover_arenas(struct btt *btt)
- {
- int ret = 0;
- struct arena_info *arena;
- struct btt_sb *super;
- size_t remaining = btt->rawsize;
- u64 cur_nlba = 0;
- size_t cur_off = 0;
- int num_arenas = 0;
- super = kzalloc(sizeof(*super), GFP_KERNEL);
- if (!super)
- return -ENOMEM;
- while (remaining) {
- /* Alloc memory for arena */
- arena = alloc_arena(btt, 0, 0, 0);
- if (!arena) {
- ret = -ENOMEM;
- goto out_super;
- }
- arena->infooff = cur_off;
- ret = btt_info_read(arena, super);
- if (ret)
- goto out;
- if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
- if (remaining == btt->rawsize) {
- btt->init_state = INIT_NOTFOUND;
- dev_info(to_dev(arena), "No existing arenas\n");
- goto out;
- } else {
- dev_info(to_dev(arena),
- "Found corrupted metadata!\n");
- ret = -ENODEV;
- goto out;
- }
- }
- arena->external_lba_start = cur_nlba;
- parse_arena_meta(arena, super, cur_off);
- ret = log_set_indices(arena);
- if (ret) {
- dev_err(to_dev(arena),
- "Unable to deduce log/padding indices\n");
- goto out;
- }
- ret = btt_freelist_init(arena);
- if (ret)
- goto out;
- ret = btt_rtt_init(arena);
- if (ret)
- goto out;
- ret = btt_maplocks_init(arena);
- if (ret)
- goto out;
- list_add_tail(&arena->list, &btt->arena_list);
- remaining -= arena->size;
- cur_off += arena->size;
- cur_nlba += arena->external_nlba;
- num_arenas++;
- if (arena->nextoff == 0)
- break;
- }
- btt->num_arenas = num_arenas;
- btt->nlba = cur_nlba;
- btt->init_state = INIT_READY;
- kfree(super);
- return ret;
- out:
- kfree(arena);
- free_arenas(btt);
- out_super:
- kfree(super);
- return ret;
- }
- static int create_arenas(struct btt *btt)
- {
- size_t remaining = btt->rawsize;
- size_t cur_off = 0;
- while (remaining) {
- struct arena_info *arena;
- size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
- remaining -= arena_size;
- if (arena_size < ARENA_MIN_SIZE)
- break;
- arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
- if (!arena) {
- free_arenas(btt);
- return -ENOMEM;
- }
- btt->nlba += arena->external_nlba;
- if (remaining >= ARENA_MIN_SIZE)
- arena->nextoff = arena->size;
- else
- arena->nextoff = 0;
- cur_off += arena_size;
- list_add_tail(&arena->list, &btt->arena_list);
- }
- return 0;
- }
- /*
- * This function completes arena initialization by writing
- * all the metadata.
- * It is only called for an uninitialized arena when a write
- * to that arena occurs for the first time.
- */
- static int btt_arena_write_layout(struct arena_info *arena)
- {
- int ret;
- u64 sum;
- struct btt_sb *super;
- struct nd_btt *nd_btt = arena->nd_btt;
- const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
- ret = btt_map_init(arena);
- if (ret)
- return ret;
- ret = btt_log_init(arena);
- if (ret)
- return ret;
- super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
- if (!super)
- return -ENOMEM;
- strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
- memcpy(super->uuid, nd_btt->uuid, 16);
- memcpy(super->parent_uuid, parent_uuid, 16);
- super->flags = cpu_to_le32(arena->flags);
- super->version_major = cpu_to_le16(arena->version_major);
- super->version_minor = cpu_to_le16(arena->version_minor);
- super->external_lbasize = cpu_to_le32(arena->external_lbasize);
- super->external_nlba = cpu_to_le32(arena->external_nlba);
- super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
- super->internal_nlba = cpu_to_le32(arena->internal_nlba);
- super->nfree = cpu_to_le32(arena->nfree);
- super->infosize = cpu_to_le32(sizeof(struct btt_sb));
- super->nextoff = cpu_to_le64(arena->nextoff);
- /*
- * Subtract arena->infooff (arena start) so numbers are relative
- * to 'this' arena
- */
- super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
- super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
- super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
- super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
- super->flags = 0;
- sum = nd_sb_checksum((struct nd_gen_sb *) super);
- super->checksum = cpu_to_le64(sum);
- ret = btt_info_write(arena, super);
- kfree(super);
- return ret;
- }
- /*
- * This function completes the initialization for the BTT namespace
- * such that it is ready to accept IOs
- */
- static int btt_meta_init(struct btt *btt)
- {
- int ret = 0;
- struct arena_info *arena;
- mutex_lock(&btt->init_lock);
- list_for_each_entry(arena, &btt->arena_list, list) {
- ret = btt_arena_write_layout(arena);
- if (ret)
- goto unlock;
- ret = btt_freelist_init(arena);
- if (ret)
- goto unlock;
- ret = btt_rtt_init(arena);
- if (ret)
- goto unlock;
- ret = btt_maplocks_init(arena);
- if (ret)
- goto unlock;
- }
- btt->init_state = INIT_READY;
- unlock:
- mutex_unlock(&btt->init_lock);
- return ret;
- }
- static u32 btt_meta_size(struct btt *btt)
- {
- return btt->lbasize - btt->sector_size;
- }
- /*
- * This function calculates the arena in which the given LBA lies
- * by doing a linear walk. This is acceptable since we expect only
- * a few arenas. If we have backing devices that get much larger,
- * we can construct a balanced binary tree of arenas at init time
- * so that this range search becomes faster.
- */
- static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
- struct arena_info **arena)
- {
- struct arena_info *arena_list;
- __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
- list_for_each_entry(arena_list, &btt->arena_list, list) {
- if (lba < arena_list->external_nlba) {
- *arena = arena_list;
- *premap = lba;
- return 0;
- }
- lba -= arena_list->external_nlba;
- }
- return -EIO;
- }
- /*
- * The following (lock_map, unlock_map) are mostly just to improve
- * readability, since they index into an array of locks
- */
- static void lock_map(struct arena_info *arena, u32 premap)
- __acquires(&arena->map_locks[idx].lock)
- {
- u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
- spin_lock(&arena->map_locks[idx].lock);
- }
- static void unlock_map(struct arena_info *arena, u32 premap)
- __releases(&arena->map_locks[idx].lock)
- {
- u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
- spin_unlock(&arena->map_locks[idx].lock);
- }
- static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
- {
- return arena->dataoff + ((u64)lba * arena->internal_lbasize);
- }
- static int btt_data_read(struct arena_info *arena, struct page *page,
- unsigned int off, u32 lba, u32 len)
- {
- int ret;
- u64 nsoff = to_namespace_offset(arena, lba);
- void *mem = kmap_atomic(page);
- ret = arena_read_bytes(arena, nsoff, mem + off, len);
- kunmap_atomic(mem);
- return ret;
- }
- static int btt_data_write(struct arena_info *arena, u32 lba,
- struct page *page, unsigned int off, u32 len)
- {
- int ret;
- u64 nsoff = to_namespace_offset(arena, lba);
- void *mem = kmap_atomic(page);
- ret = arena_write_bytes(arena, nsoff, mem + off, len);
- kunmap_atomic(mem);
- return ret;
- }
- static void zero_fill_data(struct page *page, unsigned int off, u32 len)
- {
- void *mem = kmap_atomic(page);
- memset(mem + off, 0, len);
- kunmap_atomic(mem);
- }
- #ifdef CONFIG_BLK_DEV_INTEGRITY
- static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
- struct arena_info *arena, u32 postmap, int rw)
- {
- unsigned int len = btt_meta_size(btt);
- u64 meta_nsoff;
- int ret = 0;
- if (bip == NULL)
- return 0;
- meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
- while (len) {
- unsigned int cur_len;
- struct bio_vec bv;
- void *mem;
- bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
- /*
- * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
- * .bv_offset already adjusted for iter->bi_bvec_done, and we
- * can use those directly
- */
- cur_len = min(len, bv.bv_len);
- mem = kmap_atomic(bv.bv_page);
- if (rw)
- ret = arena_write_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len);
- else
- ret = arena_read_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len);
- kunmap_atomic(mem);
- if (ret)
- return ret;
- len -= cur_len;
- meta_nsoff += cur_len;
- bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
- }
- return ret;
- }
- #else /* CONFIG_BLK_DEV_INTEGRITY */
- static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
- struct arena_info *arena, u32 postmap, int rw)
- {
- return 0;
- }
- #endif
- static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
- struct page *page, unsigned int off, sector_t sector,
- unsigned int len)
- {
- int ret = 0;
- int t_flag, e_flag;
- struct arena_info *arena = NULL;
- u32 lane = 0, premap, postmap;
- while (len) {
- u32 cur_len;
- lane = nd_region_acquire_lane(btt->nd_region);
- ret = lba_to_arena(btt, sector, &premap, &arena);
- if (ret)
- goto out_lane;
- cur_len = min(btt->sector_size, len);
- ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag);
- if (ret)
- goto out_lane;
- /*
- * We loop to make sure that the post map LBA didn't change
- * from under us between writing the RTT and doing the actual
- * read.
- */
- while (1) {
- u32 new_map;
- if (t_flag) {
- zero_fill_data(page, off, cur_len);
- goto out_lane;
- }
- if (e_flag) {
- ret = -EIO;
- goto out_lane;
- }
- arena->rtt[lane] = RTT_VALID | postmap;
- /*
- * Barrier to make sure this write is not reordered
- * to do the verification map_read before the RTT store
- */
- barrier();
- ret = btt_map_read(arena, premap, &new_map, &t_flag,
- &e_flag);
- if (ret)
- goto out_rtt;
- if (postmap == new_map)
- break;
- postmap = new_map;
- }
- ret = btt_data_read(arena, page, off, postmap, cur_len);
- if (ret)
- goto out_rtt;
- if (bip) {
- ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
- if (ret)
- goto out_rtt;
- }
- arena->rtt[lane] = RTT_INVALID;
- nd_region_release_lane(btt->nd_region, lane);
- len -= cur_len;
- off += cur_len;
- sector += btt->sector_size >> SECTOR_SHIFT;
- }
- return 0;
- out_rtt:
- arena->rtt[lane] = RTT_INVALID;
- out_lane:
- nd_region_release_lane(btt->nd_region, lane);
- return ret;
- }
- static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
- sector_t sector, struct page *page, unsigned int off,
- unsigned int len)
- {
- int ret = 0;
- struct arena_info *arena = NULL;
- u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
- struct log_entry log;
- int sub;
- while (len) {
- u32 cur_len;
- lane = nd_region_acquire_lane(btt->nd_region);
- ret = lba_to_arena(btt, sector, &premap, &arena);
- if (ret)
- goto out_lane;
- cur_len = min(btt->sector_size, len);
- if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
- ret = -EIO;
- goto out_lane;
- }
- new_postmap = arena->freelist[lane].block;
- /* Wait if the new block is being read from */
- for (i = 0; i < arena->nfree; i++)
- while (arena->rtt[i] == (RTT_VALID | new_postmap))
- cpu_relax();
- if (new_postmap >= arena->internal_nlba) {
- ret = -EIO;
- goto out_lane;
- }
- ret = btt_data_write(arena, new_postmap, page, off, cur_len);
- if (ret)
- goto out_lane;
- if (bip) {
- ret = btt_rw_integrity(btt, bip, arena, new_postmap,
- WRITE);
- if (ret)
- goto out_lane;
- }
- lock_map(arena, premap);
- ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
- if (ret)
- goto out_map;
- if (old_postmap >= arena->internal_nlba) {
- ret = -EIO;
- goto out_map;
- }
- log.lba = cpu_to_le32(premap);
- log.old_map = cpu_to_le32(old_postmap);
- log.new_map = cpu_to_le32(new_postmap);
- log.seq = cpu_to_le32(arena->freelist[lane].seq);
- sub = arena->freelist[lane].sub;
- ret = btt_flog_write(arena, lane, sub, &log);
- if (ret)
- goto out_map;
- ret = btt_map_write(arena, premap, new_postmap, 0, 0);
- if (ret)
- goto out_map;
- unlock_map(arena, premap);
- nd_region_release_lane(btt->nd_region, lane);
- len -= cur_len;
- off += cur_len;
- sector += btt->sector_size >> SECTOR_SHIFT;
- }
- return 0;
- out_map:
- unlock_map(arena, premap);
- out_lane:
- nd_region_release_lane(btt->nd_region, lane);
- return ret;
- }
- static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
- struct page *page, unsigned int len, unsigned int off,
- bool is_write, sector_t sector)
- {
- int ret;
- if (!is_write) {
- ret = btt_read_pg(btt, bip, page, off, sector, len);
- flush_dcache_page(page);
- } else {
- flush_dcache_page(page);
- ret = btt_write_pg(btt, bip, sector, page, off, len);
- }
- return ret;
- }
- static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
- {
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct btt *btt = q->queuedata;
- struct bvec_iter iter;
- unsigned long start;
- struct bio_vec bvec;
- int err = 0;
- bool do_acct;
- /*
- * bio_integrity_enabled also checks if the bio already has an
- * integrity payload attached. If it does, we *don't* do a
- * bio_integrity_prep here - the payload has been generated by
- * another kernel subsystem, and we just pass it through.
- */
- if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
- bio->bi_error = -EIO;
- goto out;
- }
- do_acct = nd_iostat_start(bio, &start);
- bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
- BUG_ON(len > PAGE_SIZE);
- /* Make sure len is in multiples of sector size. */
- /* XXX is this right? */
- BUG_ON(len < btt->sector_size);
- BUG_ON(len % btt->sector_size);
- err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), iter.bi_sector);
- if (err) {
- dev_info(&btt->nd_btt->dev,
- "io error in %s sector %lld, len %d,\n",
- (op_is_write(bio_op(bio))) ? "WRITE" :
- "READ",
- (unsigned long long) iter.bi_sector, len);
- bio->bi_error = err;
- break;
- }
- }
- if (do_acct)
- nd_iostat_end(bio, start);
- out:
- bio_endio(bio);
- return BLK_QC_T_NONE;
- }
- static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
- {
- struct btt *btt = bdev->bd_disk->private_data;
- int rc;
- rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
- if (rc == 0)
- page_endio(page, is_write, 0);
- return rc;
- }
- static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
- {
- /* some standard values */
- geo->heads = 1 << 6;
- geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
- return 0;
- }
- static const struct block_device_operations btt_fops = {
- .owner = THIS_MODULE,
- .rw_page = btt_rw_page,
- .getgeo = btt_getgeo,
- .revalidate_disk = nvdimm_revalidate_disk,
- };
- static int btt_blk_init(struct btt *btt)
- {
- struct nd_btt *nd_btt = btt->nd_btt;
- struct nd_namespace_common *ndns = nd_btt->ndns;
- /* create a new disk and request queue for btt */
- btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
- if (!btt->btt_queue)
- return -ENOMEM;
- btt->btt_disk = alloc_disk(0);
- if (!btt->btt_disk) {
- blk_cleanup_queue(btt->btt_queue);
- return -ENOMEM;
- }
- nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
- btt->btt_disk->first_minor = 0;
- btt->btt_disk->fops = &btt_fops;
- btt->btt_disk->private_data = btt;
- btt->btt_disk->queue = btt->btt_queue;
- btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
- blk_queue_make_request(btt->btt_queue, btt_make_request);
- blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
- blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
- blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
- btt->btt_queue->queuedata = btt;
- if (btt_meta_size(btt)) {
- int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
- if (rc) {
- del_gendisk(btt->btt_disk);
- put_disk(btt->btt_disk);
- blk_cleanup_queue(btt->btt_queue);
- return rc;
- }
- }
- set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
- device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
- btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
- revalidate_disk(btt->btt_disk);
- return 0;
- }
- static void btt_blk_cleanup(struct btt *btt)
- {
- del_gendisk(btt->btt_disk);
- put_disk(btt->btt_disk);
- blk_cleanup_queue(btt->btt_queue);
- }
- /**
- * btt_init - initialize a block translation table for the given device
- * @nd_btt: device with BTT geometry and backing device info
- * @rawsize: raw size in bytes of the backing device
- * @lbasize: lba size of the backing device
- * @uuid: A uuid for the backing device - this is stored on media
- * @maxlane: maximum number of parallel requests the device can handle
- *
- * Initialize a Block Translation Table on a backing device to provide
- * single sector power fail atomicity.
- *
- * Context:
- * Might sleep.
- *
- * Returns:
- * Pointer to a new struct btt on success, NULL on failure.
- */
- static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
- u32 lbasize, u8 *uuid, struct nd_region *nd_region)
- {
- int ret;
- struct btt *btt;
- struct device *dev = &nd_btt->dev;
- btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
- if (!btt)
- return NULL;
- btt->nd_btt = nd_btt;
- btt->rawsize = rawsize;
- btt->lbasize = lbasize;
- btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
- INIT_LIST_HEAD(&btt->arena_list);
- mutex_init(&btt->init_lock);
- btt->nd_region = nd_region;
- ret = discover_arenas(btt);
- if (ret) {
- dev_err(dev, "init: error in arena_discover: %d\n", ret);
- return NULL;
- }
- if (btt->init_state != INIT_READY && nd_region->ro) {
- dev_info(dev, "%s is read-only, unable to init btt metadata\n",
- dev_name(&nd_region->dev));
- return NULL;
- } else if (btt->init_state != INIT_READY) {
- btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
- ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
- dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
- btt->num_arenas, rawsize);
- ret = create_arenas(btt);
- if (ret) {
- dev_info(dev, "init: create_arenas: %d\n", ret);
- return NULL;
- }
- ret = btt_meta_init(btt);
- if (ret) {
- dev_err(dev, "init: error in meta_init: %d\n", ret);
- return NULL;
- }
- }
- ret = btt_blk_init(btt);
- if (ret) {
- dev_err(dev, "init: error in blk_init: %d\n", ret);
- return NULL;
- }
- btt_debugfs_init(btt);
- return btt;
- }
- /**
- * btt_fini - de-initialize a BTT
- * @btt: the BTT handle that was generated by btt_init
- *
- * De-initialize a Block Translation Table on device removal
- *
- * Context:
- * Might sleep.
- */
- static void btt_fini(struct btt *btt)
- {
- if (btt) {
- btt_blk_cleanup(btt);
- free_arenas(btt);
- debugfs_remove_recursive(btt->debugfs_dir);
- }
- }
- int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
- {
- struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
- struct nd_region *nd_region;
- struct btt *btt;
- size_t rawsize;
- if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
- dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
- return -ENODEV;
- }
- rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
- if (rawsize < ARENA_MIN_SIZE) {
- dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
- dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K);
- return -ENXIO;
- }
- nd_region = to_nd_region(nd_btt->dev.parent);
- btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
- nd_region);
- if (!btt)
- return -ENOMEM;
- nd_btt->btt = btt;
- return 0;
- }
- EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
- int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
- {
- struct btt *btt = nd_btt->btt;
- btt_fini(btt);
- nd_btt->btt = NULL;
- return 0;
- }
- EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
- static int __init nd_btt_init(void)
- {
- int rc = 0;
- debugfs_root = debugfs_create_dir("btt", NULL);
- if (IS_ERR_OR_NULL(debugfs_root))
- rc = -ENXIO;
- return rc;
- }
- static void __exit nd_btt_exit(void)
- {
- debugfs_remove_recursive(debugfs_root);
- }
- MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
- MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
- MODULE_LICENSE("GPL v2");
- module_init(nd_btt_init);
- module_exit(nd_btt_exit);
|