123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184 |
- /*
- * linux/fs/block_dev.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
- */
- #include <linux/init.h>
- #include <linux/mm.h>
- #include <linux/fcntl.h>
- #include <linux/slab.h>
- #include <linux/kmod.h>
- #include <linux/major.h>
- #include <linux/device_cgroup.h>
- #include <linux/highmem.h>
- #include <linux/blkdev.h>
- #include <linux/backing-dev.h>
- #include <linux/module.h>
- #include <linux/blkpg.h>
- #include <linux/magic.h>
- #include <linux/dax.h>
- #include <linux/buffer_head.h>
- #include <linux/swap.h>
- #include <linux/pagevec.h>
- #include <linux/writeback.h>
- #include <linux/mpage.h>
- #include <linux/mount.h>
- #include <linux/uio.h>
- #include <linux/namei.h>
- #include <linux/log2.h>
- #include <linux/cleancache.h>
- #include <linux/dax.h>
- #include <linux/badblocks.h>
- #include <linux/task_io_accounting_ops.h>
- #include <linux/falloc.h>
- #include <linux/uaccess.h>
- #include "internal.h"
- struct bdev_inode {
- struct block_device bdev;
- struct inode vfs_inode;
- };
- static const struct address_space_operations def_blk_aops;
- static inline struct bdev_inode *BDEV_I(struct inode *inode)
- {
- return container_of(inode, struct bdev_inode, vfs_inode);
- }
- struct block_device *I_BDEV(struct inode *inode)
- {
- return &BDEV_I(inode)->bdev;
- }
- EXPORT_SYMBOL(I_BDEV);
- static void bdev_write_inode(struct block_device *bdev)
- {
- struct inode *inode = bdev->bd_inode;
- int ret;
- spin_lock(&inode->i_lock);
- while (inode->i_state & I_DIRTY) {
- spin_unlock(&inode->i_lock);
- ret = write_inode_now(inode, true);
- if (ret) {
- char name[BDEVNAME_SIZE];
- pr_warn_ratelimited("VFS: Dirty inode writeback failed "
- "for block device %s (err=%d).\n",
- bdevname(bdev, name), ret);
- }
- spin_lock(&inode->i_lock);
- }
- spin_unlock(&inode->i_lock);
- }
- /* Kill _all_ buffers and pagecache , dirty or not.. */
- void kill_bdev(struct block_device *bdev)
- {
- struct address_space *mapping = bdev->bd_inode->i_mapping;
- if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
- return;
- invalidate_bh_lrus();
- truncate_inode_pages(mapping, 0);
- }
- EXPORT_SYMBOL(kill_bdev);
- /* Invalidate clean unused buffers and pagecache. */
- void invalidate_bdev(struct block_device *bdev)
- {
- struct address_space *mapping = bdev->bd_inode->i_mapping;
- if (mapping->nrpages) {
- invalidate_bh_lrus();
- lru_add_drain_all(); /* make sure all lru add caches are flushed */
- invalidate_mapping_pages(mapping, 0, -1);
- }
- /* 99% of the time, we don't need to flush the cleancache on the bdev.
- * But, for the strange corners, lets be cautious
- */
- cleancache_invalidate_inode(mapping);
- }
- EXPORT_SYMBOL(invalidate_bdev);
- static void set_init_blocksize(struct block_device *bdev)
- {
- unsigned bsize = bdev_logical_block_size(bdev);
- loff_t size = i_size_read(bdev->bd_inode);
- while (bsize < PAGE_SIZE) {
- if (size & bsize)
- break;
- bsize <<= 1;
- }
- bdev->bd_block_size = bsize;
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
- }
- int set_blocksize(struct block_device *bdev, int size)
- {
- /* Size must be a power of two, and between 512 and PAGE_SIZE */
- if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
- return -EINVAL;
- /* Size cannot be smaller than the size supported by the device */
- if (size < bdev_logical_block_size(bdev))
- return -EINVAL;
- /* Don't change the size if it is same as current */
- if (bdev->bd_block_size != size) {
- sync_blockdev(bdev);
- bdev->bd_block_size = size;
- bdev->bd_inode->i_blkbits = blksize_bits(size);
- kill_bdev(bdev);
- }
- return 0;
- }
- EXPORT_SYMBOL(set_blocksize);
- int sb_set_blocksize(struct super_block *sb, int size)
- {
- if (set_blocksize(sb->s_bdev, size))
- return 0;
- /* If we get here, we know size is power of two
- * and it's value is between 512 and PAGE_SIZE */
- sb->s_blocksize = size;
- sb->s_blocksize_bits = blksize_bits(size);
- return sb->s_blocksize;
- }
- EXPORT_SYMBOL(sb_set_blocksize);
- int sb_min_blocksize(struct super_block *sb, int size)
- {
- int minsize = bdev_logical_block_size(sb->s_bdev);
- if (size < minsize)
- size = minsize;
- return sb_set_blocksize(sb, size);
- }
- EXPORT_SYMBOL(sb_min_blocksize);
- static int
- blkdev_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
- {
- bh->b_bdev = I_BDEV(inode);
- bh->b_blocknr = iblock;
- set_buffer_mapped(bh);
- return 0;
- }
- static struct inode *bdev_file_inode(struct file *file)
- {
- return file->f_mapping->host;
- }
- static unsigned int dio_bio_write_op(struct kiocb *iocb)
- {
- unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
- /* avoid the need for a I/O completion work item */
- if (iocb->ki_flags & IOCB_DSYNC)
- op |= REQ_FUA;
- return op;
- }
- #define DIO_INLINE_BIO_VECS 4
- static void blkdev_bio_end_io_simple(struct bio *bio)
- {
- struct task_struct *waiter = bio->bi_private;
- WRITE_ONCE(bio->bi_private, NULL);
- wake_up_process(waiter);
- }
- static ssize_t
- __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
- int nr_pages)
- {
- struct file *file = iocb->ki_filp;
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
- struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs, *bvec;
- loff_t pos = iocb->ki_pos;
- bool should_dirty = false;
- struct bio bio;
- ssize_t ret;
- blk_qc_t qc;
- int i;
- if ((pos | iov_iter_alignment(iter)) &
- (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
- if (nr_pages <= DIO_INLINE_BIO_VECS)
- vecs = inline_vecs;
- else {
- vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
- GFP_KERNEL);
- if (!vecs)
- return -ENOMEM;
- }
- bio_init(&bio, vecs, nr_pages);
- bio_set_dev(&bio, bdev);
- bio.bi_iter.bi_sector = pos >> 9;
- bio.bi_write_hint = iocb->ki_hint;
- bio.bi_private = current;
- bio.bi_end_io = blkdev_bio_end_io_simple;
- bio.bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_pages(&bio, iter);
- if (unlikely(ret))
- goto out;
- ret = bio.bi_iter.bi_size;
- if (iov_iter_rw(iter) == READ) {
- bio.bi_opf = REQ_OP_READ;
- if (iter_is_iovec(iter))
- should_dirty = true;
- } else {
- bio.bi_opf = dio_bio_write_op(iocb);
- task_io_account_write(ret);
- }
- qc = submit_bio(&bio);
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (!READ_ONCE(bio.bi_private))
- break;
- if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc))
- io_schedule();
- }
- __set_current_state(TASK_RUNNING);
- bio_for_each_segment_all(bvec, &bio, i) {
- if (should_dirty && !PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
- put_page(bvec->bv_page);
- }
- if (unlikely(bio.bi_status))
- ret = blk_status_to_errno(bio.bi_status);
- out:
- if (vecs != inline_vecs)
- kfree(vecs);
- bio_uninit(&bio);
- return ret;
- }
- struct blkdev_dio {
- union {
- struct kiocb *iocb;
- struct task_struct *waiter;
- };
- size_t size;
- atomic_t ref;
- bool multi_bio : 1;
- bool should_dirty : 1;
- bool is_sync : 1;
- struct bio bio;
- };
- static struct bio_set blkdev_dio_pool;
- static void blkdev_bio_end_io(struct bio *bio)
- {
- struct blkdev_dio *dio = bio->bi_private;
- bool should_dirty = dio->should_dirty;
- if (bio->bi_status && !dio->bio.bi_status)
- dio->bio.bi_status = bio->bi_status;
- if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
- if (!dio->is_sync) {
- struct kiocb *iocb = dio->iocb;
- ssize_t ret;
- if (likely(!dio->bio.bi_status)) {
- ret = dio->size;
- iocb->ki_pos += ret;
- } else {
- ret = blk_status_to_errno(dio->bio.bi_status);
- }
- dio->iocb->ki_complete(iocb, ret, 0);
- bio_put(&dio->bio);
- } else {
- struct task_struct *waiter = dio->waiter;
- WRITE_ONCE(dio->waiter, NULL);
- wake_up_process(waiter);
- }
- }
- if (should_dirty) {
- bio_check_pages_dirty(bio);
- } else {
- struct bio_vec *bvec;
- int i;
- bio_for_each_segment_all(bvec, bio, i)
- put_page(bvec->bv_page);
- bio_put(bio);
- }
- }
- static ssize_t
- __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
- {
- struct file *file = iocb->ki_filp;
- struct inode *inode = bdev_file_inode(file);
- struct block_device *bdev = I_BDEV(inode);
- struct blk_plug plug;
- struct blkdev_dio *dio;
- struct bio *bio;
- bool is_read = (iov_iter_rw(iter) == READ), is_sync;
- loff_t pos = iocb->ki_pos;
- blk_qc_t qc = BLK_QC_T_NONE;
- int ret = 0;
- if ((pos | iov_iter_alignment(iter)) &
- (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
- bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
- bio_get(bio); /* extra ref for the completion handler */
- dio = container_of(bio, struct blkdev_dio, bio);
- dio->is_sync = is_sync = is_sync_kiocb(iocb);
- if (dio->is_sync)
- dio->waiter = current;
- else
- dio->iocb = iocb;
- dio->size = 0;
- dio->multi_bio = false;
- dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
- blk_start_plug(&plug);
- for (;;) {
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = pos >> 9;
- bio->bi_write_hint = iocb->ki_hint;
- bio->bi_private = dio;
- bio->bi_end_io = blkdev_bio_end_io;
- bio->bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_pages(bio, iter);
- if (unlikely(ret)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- break;
- }
- if (is_read) {
- bio->bi_opf = REQ_OP_READ;
- if (dio->should_dirty)
- bio_set_pages_dirty(bio);
- } else {
- bio->bi_opf = dio_bio_write_op(iocb);
- task_io_account_write(bio->bi_iter.bi_size);
- }
- dio->size += bio->bi_iter.bi_size;
- pos += bio->bi_iter.bi_size;
- nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
- if (!nr_pages) {
- qc = submit_bio(bio);
- break;
- }
- if (!dio->multi_bio) {
- dio->multi_bio = true;
- atomic_set(&dio->ref, 2);
- } else {
- atomic_inc(&dio->ref);
- }
- submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
- }
- blk_finish_plug(&plug);
- if (!is_sync)
- return -EIOCBQUEUED;
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (!READ_ONCE(dio->waiter))
- break;
- if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc))
- io_schedule();
- }
- __set_current_state(TASK_RUNNING);
- if (!ret)
- ret = blk_status_to_errno(dio->bio.bi_status);
- if (likely(!ret))
- ret = dio->size;
- bio_put(&dio->bio);
- return ret;
- }
- static ssize_t
- blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
- {
- int nr_pages;
- nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1);
- if (!nr_pages)
- return 0;
- if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
- return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
- return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
- }
- static __init int blkdev_init(void)
- {
- return bioset_init(&blkdev_dio_pool, 4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
- }
- module_init(blkdev_init);
- int __sync_blockdev(struct block_device *bdev, int wait)
- {
- if (!bdev)
- return 0;
- if (!wait)
- return filemap_flush(bdev->bd_inode->i_mapping);
- return filemap_write_and_wait(bdev->bd_inode->i_mapping);
- }
- /*
- * Write out and wait upon all the dirty data associated with a block
- * device via its mapping. Does not take the superblock lock.
- */
- int sync_blockdev(struct block_device *bdev)
- {
- return __sync_blockdev(bdev, 1);
- }
- EXPORT_SYMBOL(sync_blockdev);
- /*
- * Write out and wait upon all dirty data associated with this
- * device. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
- int fsync_bdev(struct block_device *bdev)
- {
- struct super_block *sb = get_super(bdev);
- if (sb) {
- int res = sync_filesystem(sb);
- drop_super(sb);
- return res;
- }
- return sync_blockdev(bdev);
- }
- EXPORT_SYMBOL(fsync_bdev);
- /**
- * freeze_bdev -- lock a filesystem and force it into a consistent state
- * @bdev: blockdevice to lock
- *
- * If a superblock is found on this device, we take the s_umount semaphore
- * on it to make sure nobody unmounts until the snapshot creation is done.
- * The reference counter (bd_fsfreeze_count) guarantees that only the last
- * unfreeze process can unfreeze the frozen filesystem actually when multiple
- * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
- * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
- * actually.
- */
- struct super_block *freeze_bdev(struct block_device *bdev)
- {
- struct super_block *sb;
- int error = 0;
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (++bdev->bd_fsfreeze_count > 1) {
- /*
- * We don't even need to grab a reference - the first call
- * to freeze_bdev grab an active reference and only the last
- * thaw_bdev drops it.
- */
- sb = get_super(bdev);
- if (sb)
- drop_super(sb);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return sb;
- }
- sb = get_active_super(bdev);
- if (!sb)
- goto out;
- if (sb->s_op->freeze_super)
- error = sb->s_op->freeze_super(sb);
- else
- error = freeze_super(sb);
- if (error) {
- deactivate_super(sb);
- bdev->bd_fsfreeze_count--;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return ERR_PTR(error);
- }
- deactivate_super(sb);
- out:
- sync_blockdev(bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return sb; /* thaw_bdev releases s->s_umount */
- }
- EXPORT_SYMBOL(freeze_bdev);
- /**
- * thaw_bdev -- unlock filesystem
- * @bdev: blockdevice to unlock
- * @sb: associated superblock
- *
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
- */
- int thaw_bdev(struct block_device *bdev, struct super_block *sb)
- {
- int error = -EINVAL;
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (!bdev->bd_fsfreeze_count)
- goto out;
- error = 0;
- if (--bdev->bd_fsfreeze_count > 0)
- goto out;
- if (!sb)
- goto out;
- if (sb->s_op->thaw_super)
- error = sb->s_op->thaw_super(sb);
- else
- error = thaw_super(sb);
- if (error)
- bdev->bd_fsfreeze_count++;
- out:
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return error;
- }
- EXPORT_SYMBOL(thaw_bdev);
- static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
- {
- return block_write_full_page(page, blkdev_get_block, wbc);
- }
- static int blkdev_readpage(struct file * file, struct page * page)
- {
- return block_read_full_page(page, blkdev_get_block);
- }
- static int blkdev_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
- {
- return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
- }
- static int blkdev_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
- {
- return block_write_begin(mapping, pos, len, flags, pagep,
- blkdev_get_block);
- }
- static int blkdev_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
- {
- int ret;
- ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
- unlock_page(page);
- put_page(page);
- return ret;
- }
- /*
- * private llseek:
- * for a block special file file_inode(file)->i_size is zero
- * so we compute the size by hand (just as in block_read/write above)
- */
- static loff_t block_llseek(struct file *file, loff_t offset, int whence)
- {
- struct inode *bd_inode = bdev_file_inode(file);
- loff_t retval;
- inode_lock(bd_inode);
- retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
- inode_unlock(bd_inode);
- return retval;
- }
-
- int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
- {
- struct inode *bd_inode = bdev_file_inode(filp);
- struct block_device *bdev = I_BDEV(bd_inode);
- int error;
-
- error = file_write_and_wait_range(filp, start, end);
- if (error)
- return error;
- /*
- * There is no need to serialise calls to blkdev_issue_flush with
- * i_mutex and doing so causes performance issues with concurrent
- * O_SYNC writers to a block device.
- */
- error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
- if (error == -EOPNOTSUPP)
- error = 0;
- return error;
- }
- EXPORT_SYMBOL(blkdev_fsync);
- /**
- * bdev_read_page() - Start reading a page from a block device
- * @bdev: The device to read the page from
- * @sector: The offset on the device to read the page to (need not be aligned)
- * @page: The page to read
- *
- * On entry, the page should be locked. It will be unlocked when the page
- * has been read. If the block driver implements rw_page synchronously,
- * that will be true on exit from this function, but it need not be.
- *
- * Errors returned by this function are usually "soft", eg out of memory, or
- * queue full; callers should try a different route to read this page rather
- * than propagate an error back up the stack.
- *
- * Return: negative errno if an error occurs, 0 if submission was successful.
- */
- int bdev_read_page(struct block_device *bdev, sector_t sector,
- struct page *page)
- {
- const struct block_device_operations *ops = bdev->bd_disk->fops;
- int result = -EOPNOTSUPP;
- if (!ops->rw_page || bdev_get_integrity(bdev))
- return result;
- result = blk_queue_enter(bdev->bd_queue, 0);
- if (result)
- return result;
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
- REQ_OP_READ);
- blk_queue_exit(bdev->bd_queue);
- return result;
- }
- EXPORT_SYMBOL_GPL(bdev_read_page);
- /**
- * bdev_write_page() - Start writing a page to a block device
- * @bdev: The device to write the page to
- * @sector: The offset on the device to write the page to (need not be aligned)
- * @page: The page to write
- * @wbc: The writeback_control for the write
- *
- * On entry, the page should be locked and not currently under writeback.
- * On exit, if the write started successfully, the page will be unlocked and
- * under writeback. If the write failed already (eg the driver failed to
- * queue the page to the device), the page will still be locked. If the
- * caller is a ->writepage implementation, it will need to unlock the page.
- *
- * Errors returned by this function are usually "soft", eg out of memory, or
- * queue full; callers should try a different route to write this page rather
- * than propagate an error back up the stack.
- *
- * Return: negative errno if an error occurs, 0 if submission was successful.
- */
- int bdev_write_page(struct block_device *bdev, sector_t sector,
- struct page *page, struct writeback_control *wbc)
- {
- int result;
- const struct block_device_operations *ops = bdev->bd_disk->fops;
- if (!ops->rw_page || bdev_get_integrity(bdev))
- return -EOPNOTSUPP;
- result = blk_queue_enter(bdev->bd_queue, 0);
- if (result)
- return result;
- set_page_writeback(page);
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
- REQ_OP_WRITE);
- if (result) {
- end_page_writeback(page);
- } else {
- clean_page_buffers(page);
- unlock_page(page);
- }
- blk_queue_exit(bdev->bd_queue);
- return result;
- }
- EXPORT_SYMBOL_GPL(bdev_write_page);
- /*
- * pseudo-fs
- */
- static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
- static struct kmem_cache * bdev_cachep __read_mostly;
- static struct inode *bdev_alloc_inode(struct super_block *sb)
- {
- struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
- if (!ei)
- return NULL;
- return &ei->vfs_inode;
- }
- static void bdev_i_callback(struct rcu_head *head)
- {
- struct inode *inode = container_of(head, struct inode, i_rcu);
- struct bdev_inode *bdi = BDEV_I(inode);
- kmem_cache_free(bdev_cachep, bdi);
- }
- static void bdev_destroy_inode(struct inode *inode)
- {
- call_rcu(&inode->i_rcu, bdev_i_callback);
- }
- static void init_once(void *foo)
- {
- struct bdev_inode *ei = (struct bdev_inode *) foo;
- struct block_device *bdev = &ei->bdev;
- memset(bdev, 0, sizeof(*bdev));
- mutex_init(&bdev->bd_mutex);
- INIT_LIST_HEAD(&bdev->bd_list);
- #ifdef CONFIG_SYSFS
- INIT_LIST_HEAD(&bdev->bd_holder_disks);
- #endif
- bdev->bd_bdi = &noop_backing_dev_info;
- inode_init_once(&ei->vfs_inode);
- /* Initialize mutex for freeze. */
- mutex_init(&bdev->bd_fsfreeze_mutex);
- }
- static void bdev_evict_inode(struct inode *inode)
- {
- struct block_device *bdev = &BDEV_I(inode)->bdev;
- truncate_inode_pages_final(&inode->i_data);
- invalidate_inode_buffers(inode); /* is it needed here? */
- clear_inode(inode);
- spin_lock(&bdev_lock);
- list_del_init(&bdev->bd_list);
- spin_unlock(&bdev_lock);
- /* Detach inode from wb early as bdi_put() may free bdi->wb */
- inode_detach_wb(inode);
- if (bdev->bd_bdi != &noop_backing_dev_info) {
- bdi_put(bdev->bd_bdi);
- bdev->bd_bdi = &noop_backing_dev_info;
- }
- }
- static const struct super_operations bdev_sops = {
- .statfs = simple_statfs,
- .alloc_inode = bdev_alloc_inode,
- .destroy_inode = bdev_destroy_inode,
- .drop_inode = generic_delete_inode,
- .evict_inode = bdev_evict_inode,
- };
- static struct dentry *bd_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
- {
- struct dentry *dent;
- dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
- if (!IS_ERR(dent))
- dent->d_sb->s_iflags |= SB_I_CGROUPWB;
- return dent;
- }
- static struct file_system_type bd_type = {
- .name = "bdev",
- .mount = bd_mount,
- .kill_sb = kill_anon_super,
- };
- struct super_block *blockdev_superblock __read_mostly;
- EXPORT_SYMBOL_GPL(blockdev_superblock);
- void __init bdev_cache_init(void)
- {
- int err;
- static struct vfsmount *bd_mnt;
- bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
- 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
- init_once);
- err = register_filesystem(&bd_type);
- if (err)
- panic("Cannot register bdev pseudo-fs");
- bd_mnt = kern_mount(&bd_type);
- if (IS_ERR(bd_mnt))
- panic("Cannot create bdev pseudo-fs");
- blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
- }
- /*
- * Most likely _very_ bad one - but then it's hardly critical for small
- * /dev and can be fixed when somebody will need really large one.
- * Keep in mind that it will be fed through icache hash function too.
- */
- static inline unsigned long hash(dev_t dev)
- {
- return MAJOR(dev)+MINOR(dev);
- }
- static int bdev_test(struct inode *inode, void *data)
- {
- return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
- }
- static int bdev_set(struct inode *inode, void *data)
- {
- BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
- return 0;
- }
- static LIST_HEAD(all_bdevs);
- /*
- * If there is a bdev inode for this device, unhash it so that it gets evicted
- * as soon as last inode reference is dropped.
- */
- void bdev_unhash_inode(dev_t dev)
- {
- struct inode *inode;
- inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev);
- if (inode) {
- remove_inode_hash(inode);
- iput(inode);
- }
- }
- struct block_device *bdget(dev_t dev)
- {
- struct block_device *bdev;
- struct inode *inode;
- inode = iget5_locked(blockdev_superblock, hash(dev),
- bdev_test, bdev_set, &dev);
- if (!inode)
- return NULL;
- bdev = &BDEV_I(inode)->bdev;
- if (inode->i_state & I_NEW) {
- bdev->bd_contains = NULL;
- bdev->bd_super = NULL;
- bdev->bd_inode = inode;
- bdev->bd_block_size = i_blocksize(inode);
- bdev->bd_part_count = 0;
- bdev->bd_invalidated = 0;
- inode->i_mode = S_IFBLK;
- inode->i_rdev = dev;
- inode->i_bdev = bdev;
- inode->i_data.a_ops = &def_blk_aops;
- mapping_set_gfp_mask(&inode->i_data, GFP_USER);
- spin_lock(&bdev_lock);
- list_add(&bdev->bd_list, &all_bdevs);
- spin_unlock(&bdev_lock);
- unlock_new_inode(inode);
- }
- return bdev;
- }
- EXPORT_SYMBOL(bdget);
- /**
- * bdgrab -- Grab a reference to an already referenced block device
- * @bdev: Block device to grab a reference to.
- */
- struct block_device *bdgrab(struct block_device *bdev)
- {
- ihold(bdev->bd_inode);
- return bdev;
- }
- EXPORT_SYMBOL(bdgrab);
- long nr_blockdev_pages(void)
- {
- struct block_device *bdev;
- long ret = 0;
- spin_lock(&bdev_lock);
- list_for_each_entry(bdev, &all_bdevs, bd_list) {
- ret += bdev->bd_inode->i_mapping->nrpages;
- }
- spin_unlock(&bdev_lock);
- return ret;
- }
- void bdput(struct block_device *bdev)
- {
- iput(bdev->bd_inode);
- }
- EXPORT_SYMBOL(bdput);
-
- static struct block_device *bd_acquire(struct inode *inode)
- {
- struct block_device *bdev;
- spin_lock(&bdev_lock);
- bdev = inode->i_bdev;
- if (bdev && !inode_unhashed(bdev->bd_inode)) {
- bdgrab(bdev);
- spin_unlock(&bdev_lock);
- return bdev;
- }
- spin_unlock(&bdev_lock);
- /*
- * i_bdev references block device inode that was already shut down
- * (corresponding device got removed). Remove the reference and look
- * up block device inode again just in case new device got
- * reestablished under the same device number.
- */
- if (bdev)
- bd_forget(inode);
- bdev = bdget(inode->i_rdev);
- if (bdev) {
- spin_lock(&bdev_lock);
- if (!inode->i_bdev) {
- /*
- * We take an additional reference to bd_inode,
- * and it's released in clear_inode() of inode.
- * So, we can access it via ->i_mapping always
- * without igrab().
- */
- bdgrab(bdev);
- inode->i_bdev = bdev;
- inode->i_mapping = bdev->bd_inode->i_mapping;
- }
- spin_unlock(&bdev_lock);
- }
- return bdev;
- }
- /* Call when you free inode */
- void bd_forget(struct inode *inode)
- {
- struct block_device *bdev = NULL;
- spin_lock(&bdev_lock);
- if (!sb_is_blkdev_sb(inode->i_sb))
- bdev = inode->i_bdev;
- inode->i_bdev = NULL;
- inode->i_mapping = &inode->i_data;
- spin_unlock(&bdev_lock);
- if (bdev)
- bdput(bdev);
- }
- /**
- * bd_may_claim - test whether a block device can be claimed
- * @bdev: block device of interest
- * @whole: whole block device containing @bdev, may equal @bdev
- * @holder: holder trying to claim @bdev
- *
- * Test whether @bdev can be claimed by @holder.
- *
- * CONTEXT:
- * spin_lock(&bdev_lock).
- *
- * RETURNS:
- * %true if @bdev can be claimed, %false otherwise.
- */
- static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
- void *holder)
- {
- if (bdev->bd_holder == holder)
- return true; /* already a holder */
- else if (bdev->bd_holder != NULL)
- return false; /* held by someone else */
- else if (whole == bdev)
- return true; /* is a whole device which isn't held */
- else if (whole->bd_holder == bd_may_claim)
- return true; /* is a partition of a device that is being partitioned */
- else if (whole->bd_holder != NULL)
- return false; /* is a partition of a held device */
- else
- return true; /* is a partition of an un-held device */
- }
- /**
- * bd_prepare_to_claim - prepare to claim a block device
- * @bdev: block device of interest
- * @whole: the whole device containing @bdev, may equal @bdev
- * @holder: holder trying to claim @bdev
- *
- * Prepare to claim @bdev. This function fails if @bdev is already
- * claimed by another holder and waits if another claiming is in
- * progress. This function doesn't actually claim. On successful
- * return, the caller has ownership of bd_claiming and bd_holder[s].
- *
- * CONTEXT:
- * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
- * it multiple times.
- *
- * RETURNS:
- * 0 if @bdev can be claimed, -EBUSY otherwise.
- */
- static int bd_prepare_to_claim(struct block_device *bdev,
- struct block_device *whole, void *holder)
- {
- retry:
- /* if someone else claimed, fail */
- if (!bd_may_claim(bdev, whole, holder))
- return -EBUSY;
- /* if claiming is already in progress, wait for it to finish */
- if (whole->bd_claiming) {
- wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
- DEFINE_WAIT(wait);
- prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&bdev_lock);
- schedule();
- finish_wait(wq, &wait);
- spin_lock(&bdev_lock);
- goto retry;
- }
- /* yay, all mine */
- return 0;
- }
- static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
- {
- struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
- if (!disk)
- return NULL;
- /*
- * Now that we hold gendisk reference we make sure bdev we looked up is
- * not stale. If it is, it means device got removed and created before
- * we looked up gendisk and we fail open in such case. Associating
- * unhashed bdev with newly created gendisk could lead to two bdevs
- * (and thus two independent caches) being associated with one device
- * which is bad.
- */
- if (inode_unhashed(bdev->bd_inode)) {
- put_disk_and_module(disk);
- return NULL;
- }
- return disk;
- }
- /**
- * bd_start_claiming - start claiming a block device
- * @bdev: block device of interest
- * @holder: holder trying to claim @bdev
- *
- * @bdev is about to be opened exclusively. Check @bdev can be opened
- * exclusively and mark that an exclusive open is in progress. Each
- * successful call to this function must be matched with a call to
- * either bd_finish_claiming() or bd_abort_claiming() (which do not
- * fail).
- *
- * This function is used to gain exclusive access to the block device
- * without actually causing other exclusive open attempts to fail. It
- * should be used when the open sequence itself requires exclusive
- * access but may subsequently fail.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * Pointer to the block device containing @bdev on success, ERR_PTR()
- * value on failure.
- */
- static struct block_device *bd_start_claiming(struct block_device *bdev,
- void *holder)
- {
- struct gendisk *disk;
- struct block_device *whole;
- int partno, err;
- might_sleep();
- /*
- * @bdev might not have been initialized properly yet, look up
- * and grab the outer block device the hard way.
- */
- disk = bdev_get_gendisk(bdev, &partno);
- if (!disk)
- return ERR_PTR(-ENXIO);
- /*
- * Normally, @bdev should equal what's returned from bdget_disk()
- * if partno is 0; however, some drivers (floppy) use multiple
- * bdev's for the same physical device and @bdev may be one of the
- * aliases. Keep @bdev if partno is 0. This means claimer
- * tracking is broken for those devices but it has always been that
- * way.
- */
- if (partno)
- whole = bdget_disk(disk, 0);
- else
- whole = bdgrab(bdev);
- put_disk_and_module(disk);
- if (!whole)
- return ERR_PTR(-ENOMEM);
- /* prepare to claim, if successful, mark claiming in progress */
- spin_lock(&bdev_lock);
- err = bd_prepare_to_claim(bdev, whole, holder);
- if (err == 0) {
- whole->bd_claiming = holder;
- spin_unlock(&bdev_lock);
- return whole;
- } else {
- spin_unlock(&bdev_lock);
- bdput(whole);
- return ERR_PTR(err);
- }
- }
- #ifdef CONFIG_SYSFS
- struct bd_holder_disk {
- struct list_head list;
- struct gendisk *disk;
- int refcnt;
- };
- static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
- struct gendisk *disk)
- {
- struct bd_holder_disk *holder;
- list_for_each_entry(holder, &bdev->bd_holder_disks, list)
- if (holder->disk == disk)
- return holder;
- return NULL;
- }
- static int add_symlink(struct kobject *from, struct kobject *to)
- {
- return sysfs_create_link(from, to, kobject_name(to));
- }
- static void del_symlink(struct kobject *from, struct kobject *to)
- {
- sysfs_remove_link(from, kobject_name(to));
- }
- /**
- * bd_link_disk_holder - create symlinks between holding disk and slave bdev
- * @bdev: the claimed slave bdev
- * @disk: the holding disk
- *
- * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
- *
- * This functions creates the following sysfs symlinks.
- *
- * - from "slaves" directory of the holder @disk to the claimed @bdev
- * - from "holders" directory of the @bdev to the holder @disk
- *
- * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
- * passed to bd_link_disk_holder(), then:
- *
- * /sys/block/dm-0/slaves/sda --> /sys/block/sda
- * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
- *
- * The caller must have claimed @bdev before calling this function and
- * ensure that both @bdev and @disk are valid during the creation and
- * lifetime of these symlinks.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
- int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
- {
- struct bd_holder_disk *holder;
- int ret = 0;
- mutex_lock(&bdev->bd_mutex);
- WARN_ON_ONCE(!bdev->bd_holder);
- /* FIXME: remove the following once add_disk() handles errors */
- if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
- goto out_unlock;
- holder = bd_find_holder_disk(bdev, disk);
- if (holder) {
- holder->refcnt++;
- goto out_unlock;
- }
- holder = kzalloc(sizeof(*holder), GFP_KERNEL);
- if (!holder) {
- ret = -ENOMEM;
- goto out_unlock;
- }
- INIT_LIST_HEAD(&holder->list);
- holder->disk = disk;
- holder->refcnt = 1;
- ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
- if (ret)
- goto out_free;
- ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
- if (ret)
- goto out_del;
- /*
- * bdev could be deleted beneath us which would implicitly destroy
- * the holder directory. Hold on to it.
- */
- kobject_get(bdev->bd_part->holder_dir);
- list_add(&holder->list, &bdev->bd_holder_disks);
- goto out_unlock;
- out_del:
- del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
- out_free:
- kfree(holder);
- out_unlock:
- mutex_unlock(&bdev->bd_mutex);
- return ret;
- }
- EXPORT_SYMBOL_GPL(bd_link_disk_holder);
- /**
- * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
- * @bdev: the calimed slave bdev
- * @disk: the holding disk
- *
- * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
- *
- * CONTEXT:
- * Might sleep.
- */
- void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
- {
- struct bd_holder_disk *holder;
- mutex_lock(&bdev->bd_mutex);
- holder = bd_find_holder_disk(bdev, disk);
- if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
- del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
- del_symlink(bdev->bd_part->holder_dir,
- &disk_to_dev(disk)->kobj);
- kobject_put(bdev->bd_part->holder_dir);
- list_del_init(&holder->list);
- kfree(holder);
- }
- mutex_unlock(&bdev->bd_mutex);
- }
- EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
- #endif
- /**
- * flush_disk - invalidates all buffer-cache entries on a disk
- *
- * @bdev: struct block device to be flushed
- * @kill_dirty: flag to guide handling of dirty inodes
- *
- * Invalidates all buffer-cache entries on a disk. It should be called
- * when a disk has been changed -- either by a media change or online
- * resize.
- */
- static void flush_disk(struct block_device *bdev, bool kill_dirty)
- {
- if (__invalidate_device(bdev, kill_dirty)) {
- printk(KERN_WARNING "VFS: busy inodes on changed media or "
- "resized disk %s\n",
- bdev->bd_disk ? bdev->bd_disk->disk_name : "");
- }
- bdev->bd_invalidated = 1;
- }
- /**
- * check_disk_size_change - checks for disk size change and adjusts bdev size.
- * @disk: struct gendisk to check
- * @bdev: struct bdev to adjust.
- * @verbose: if %true log a message about a size change if there is any
- *
- * This routine checks to see if the bdev size does not match the disk size
- * and adjusts it if it differs. When shrinking the bdev size, its all caches
- * are freed.
- */
- void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
- bool verbose)
- {
- loff_t disk_size, bdev_size;
- disk_size = (loff_t)get_capacity(disk) << 9;
- bdev_size = i_size_read(bdev->bd_inode);
- if (disk_size != bdev_size) {
- if (verbose) {
- printk(KERN_INFO
- "%s: detected capacity change from %lld to %lld\n",
- disk->disk_name, bdev_size, disk_size);
- }
- i_size_write(bdev->bd_inode, disk_size);
- if (bdev_size > disk_size)
- flush_disk(bdev, false);
- }
- }
- /**
- * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
- * @disk: struct gendisk to be revalidated
- *
- * This routine is a wrapper for lower-level driver's revalidate_disk
- * call-backs. It is used to do common pre and post operations needed
- * for all revalidate_disk operations.
- */
- int revalidate_disk(struct gendisk *disk)
- {
- struct block_device *bdev;
- int ret = 0;
- if (disk->fops->revalidate_disk)
- ret = disk->fops->revalidate_disk(disk);
- bdev = bdget_disk(disk, 0);
- if (!bdev)
- return ret;
- mutex_lock(&bdev->bd_mutex);
- check_disk_size_change(disk, bdev, ret == 0);
- bdev->bd_invalidated = 0;
- mutex_unlock(&bdev->bd_mutex);
- bdput(bdev);
- return ret;
- }
- EXPORT_SYMBOL(revalidate_disk);
- /*
- * This routine checks whether a removable media has been changed,
- * and invalidates all buffer-cache-entries in that case. This
- * is a relatively slow routine, so we have to try to minimize using
- * it. Thus it is called only upon a 'mount' or 'open'. This
- * is the best way of combining speed and utility, I think.
- * People changing diskettes in the middle of an operation deserve
- * to lose :-)
- */
- int check_disk_change(struct block_device *bdev)
- {
- struct gendisk *disk = bdev->bd_disk;
- const struct block_device_operations *bdops = disk->fops;
- unsigned int events;
- events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
- DISK_EVENT_EJECT_REQUEST);
- if (!(events & DISK_EVENT_MEDIA_CHANGE))
- return 0;
- flush_disk(bdev, true);
- if (bdops->revalidate_disk)
- bdops->revalidate_disk(bdev->bd_disk);
- return 1;
- }
- EXPORT_SYMBOL(check_disk_change);
- void bd_set_size(struct block_device *bdev, loff_t size)
- {
- inode_lock(bdev->bd_inode);
- i_size_write(bdev->bd_inode, size);
- inode_unlock(bdev->bd_inode);
- }
- EXPORT_SYMBOL(bd_set_size);
- static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
- static void bdev_disk_changed(struct block_device *bdev, bool invalidate)
- {
- if (disk_part_scan_enabled(bdev->bd_disk)) {
- if (invalidate)
- invalidate_partitions(bdev->bd_disk, bdev);
- else
- rescan_partitions(bdev->bd_disk, bdev);
- } else {
- check_disk_size_change(bdev->bd_disk, bdev, !invalidate);
- bdev->bd_invalidated = 0;
- }
- }
- /*
- * bd_mutex locking:
- *
- * mutex_lock(part->bd_mutex)
- * mutex_lock_nested(whole->bd_mutex, 1)
- */
- static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
- {
- struct gendisk *disk;
- int ret;
- int partno;
- int perm = 0;
- bool first_open = false;
- if (mode & FMODE_READ)
- perm |= MAY_READ;
- if (mode & FMODE_WRITE)
- perm |= MAY_WRITE;
- /*
- * hooks: /n/, see "layering violations".
- */
- if (!for_part) {
- ret = devcgroup_inode_permission(bdev->bd_inode, perm);
- if (ret != 0) {
- bdput(bdev);
- return ret;
- }
- }
- restart:
- ret = -ENXIO;
- disk = bdev_get_gendisk(bdev, &partno);
- if (!disk)
- goto out;
- disk_block_events(disk);
- mutex_lock_nested(&bdev->bd_mutex, for_part);
- if (!bdev->bd_openers) {
- first_open = true;
- bdev->bd_disk = disk;
- bdev->bd_queue = disk->queue;
- bdev->bd_contains = bdev;
- bdev->bd_partno = partno;
- if (!partno) {
- ret = -ENXIO;
- bdev->bd_part = disk_get_part(disk, partno);
- if (!bdev->bd_part)
- goto out_clear;
- ret = 0;
- if (disk->fops->open) {
- ret = disk->fops->open(bdev, mode);
- if (ret == -ERESTARTSYS) {
- /* Lost a race with 'disk' being
- * deleted, try again.
- * See md.c
- */
- disk_put_part(bdev->bd_part);
- bdev->bd_part = NULL;
- bdev->bd_disk = NULL;
- bdev->bd_queue = NULL;
- mutex_unlock(&bdev->bd_mutex);
- disk_unblock_events(disk);
- put_disk_and_module(disk);
- goto restart;
- }
- }
- if (!ret) {
- bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
- set_init_blocksize(bdev);
- }
- /*
- * If the device is invalidated, rescan partition
- * if open succeeded or failed with -ENOMEDIUM.
- * The latter is necessary to prevent ghost
- * partitions on a removed medium.
- */
- if (bdev->bd_invalidated &&
- (!ret || ret == -ENOMEDIUM))
- bdev_disk_changed(bdev, ret == -ENOMEDIUM);
- if (ret)
- goto out_clear;
- } else {
- struct block_device *whole;
- whole = bdget_disk(disk, 0);
- ret = -ENOMEM;
- if (!whole)
- goto out_clear;
- BUG_ON(for_part);
- ret = __blkdev_get(whole, mode, 1);
- if (ret)
- goto out_clear;
- bdev->bd_contains = whole;
- bdev->bd_part = disk_get_part(disk, partno);
- if (!(disk->flags & GENHD_FL_UP) ||
- !bdev->bd_part || !bdev->bd_part->nr_sects) {
- ret = -ENXIO;
- goto out_clear;
- }
- bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
- set_init_blocksize(bdev);
- }
- if (bdev->bd_bdi == &noop_backing_dev_info)
- bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
- } else {
- if (bdev->bd_contains == bdev) {
- ret = 0;
- if (bdev->bd_disk->fops->open)
- ret = bdev->bd_disk->fops->open(bdev, mode);
- /* the same as first opener case, read comment there */
- if (bdev->bd_invalidated &&
- (!ret || ret == -ENOMEDIUM))
- bdev_disk_changed(bdev, ret == -ENOMEDIUM);
- if (ret)
- goto out_unlock_bdev;
- }
- }
- bdev->bd_openers++;
- if (for_part)
- bdev->bd_part_count++;
- mutex_unlock(&bdev->bd_mutex);
- disk_unblock_events(disk);
- /* only one opener holds refs to the module and disk */
- if (!first_open)
- put_disk_and_module(disk);
- return 0;
- out_clear:
- disk_put_part(bdev->bd_part);
- bdev->bd_disk = NULL;
- bdev->bd_part = NULL;
- bdev->bd_queue = NULL;
- if (bdev != bdev->bd_contains)
- __blkdev_put(bdev->bd_contains, mode, 1);
- bdev->bd_contains = NULL;
- out_unlock_bdev:
- mutex_unlock(&bdev->bd_mutex);
- disk_unblock_events(disk);
- put_disk_and_module(disk);
- out:
- bdput(bdev);
- return ret;
- }
- /**
- * blkdev_get - open a block device
- * @bdev: block_device to open
- * @mode: FMODE_* mask
- * @holder: exclusive holder identifier
- *
- * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
- * open with exclusive access. Specifying %FMODE_EXCL with %NULL
- * @holder is invalid. Exclusive opens may nest for the same @holder.
- *
- * On success, the reference count of @bdev is unchanged. On failure,
- * @bdev is put.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
- int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
- {
- struct block_device *whole = NULL;
- int res;
- WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
- if ((mode & FMODE_EXCL) && holder) {
- whole = bd_start_claiming(bdev, holder);
- if (IS_ERR(whole)) {
- bdput(bdev);
- return PTR_ERR(whole);
- }
- }
- res = __blkdev_get(bdev, mode, 0);
- if (whole) {
- struct gendisk *disk = whole->bd_disk;
- /* finish claiming */
- mutex_lock(&bdev->bd_mutex);
- spin_lock(&bdev_lock);
- if (!res) {
- BUG_ON(!bd_may_claim(bdev, whole, holder));
- /*
- * Note that for a whole device bd_holders
- * will be incremented twice, and bd_holder
- * will be set to bd_may_claim before being
- * set to holder
- */
- whole->bd_holders++;
- whole->bd_holder = bd_may_claim;
- bdev->bd_holders++;
- bdev->bd_holder = holder;
- }
- /* tell others that we're done */
- BUG_ON(whole->bd_claiming != holder);
- whole->bd_claiming = NULL;
- wake_up_bit(&whole->bd_claiming, 0);
- spin_unlock(&bdev_lock);
- /*
- * Block event polling for write claims if requested. Any
- * write holder makes the write_holder state stick until
- * all are released. This is good enough and tracking
- * individual writeable reference is too fragile given the
- * way @mode is used in blkdev_get/put().
- */
- if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
- (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
- bdev->bd_write_holder = true;
- disk_block_events(disk);
- }
- mutex_unlock(&bdev->bd_mutex);
- bdput(whole);
- }
- return res;
- }
- EXPORT_SYMBOL(blkdev_get);
- /**
- * blkdev_get_by_path - open a block device by name
- * @path: path to the block device to open
- * @mode: FMODE_* mask
- * @holder: exclusive holder identifier
- *
- * Open the blockdevice described by the device file at @path. @mode
- * and @holder are identical to blkdev_get().
- *
- * On success, the returned block_device has reference count of one.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * Pointer to block_device on success, ERR_PTR(-errno) on failure.
- */
- struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder)
- {
- struct block_device *bdev;
- int err;
- bdev = lookup_bdev(path);
- if (IS_ERR(bdev))
- return bdev;
- err = blkdev_get(bdev, mode, holder);
- if (err)
- return ERR_PTR(err);
- if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
- blkdev_put(bdev, mode);
- return ERR_PTR(-EACCES);
- }
- return bdev;
- }
- EXPORT_SYMBOL(blkdev_get_by_path);
- /**
- * blkdev_get_by_dev - open a block device by device number
- * @dev: device number of block device to open
- * @mode: FMODE_* mask
- * @holder: exclusive holder identifier
- *
- * Open the blockdevice described by device number @dev. @mode and
- * @holder are identical to blkdev_get().
- *
- * Use it ONLY if you really do not have anything better - i.e. when
- * you are behind a truly sucky interface and all you are given is a
- * device number. _Never_ to be used for internal purposes. If you
- * ever need it - reconsider your API.
- *
- * On success, the returned block_device has reference count of one.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * Pointer to block_device on success, ERR_PTR(-errno) on failure.
- */
- struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
- {
- struct block_device *bdev;
- int err;
- bdev = bdget(dev);
- if (!bdev)
- return ERR_PTR(-ENOMEM);
- err = blkdev_get(bdev, mode, holder);
- if (err)
- return ERR_PTR(err);
- return bdev;
- }
- EXPORT_SYMBOL(blkdev_get_by_dev);
- static int blkdev_open(struct inode * inode, struct file * filp)
- {
- struct block_device *bdev;
- /*
- * Preserve backwards compatibility and allow large file access
- * even if userspace doesn't ask for it explicitly. Some mkfs
- * binary needs it. We might want to drop this workaround
- * during an unstable branch.
- */
- filp->f_flags |= O_LARGEFILE;
- filp->f_mode |= FMODE_NOWAIT;
- if (filp->f_flags & O_NDELAY)
- filp->f_mode |= FMODE_NDELAY;
- if (filp->f_flags & O_EXCL)
- filp->f_mode |= FMODE_EXCL;
- if ((filp->f_flags & O_ACCMODE) == 3)
- filp->f_mode |= FMODE_WRITE_IOCTL;
- bdev = bd_acquire(inode);
- if (bdev == NULL)
- return -ENOMEM;
- filp->f_mapping = bdev->bd_inode->i_mapping;
- filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
- return blkdev_get(bdev, filp->f_mode, filp);
- }
- static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
- {
- struct gendisk *disk = bdev->bd_disk;
- struct block_device *victim = NULL;
- mutex_lock_nested(&bdev->bd_mutex, for_part);
- if (for_part)
- bdev->bd_part_count--;
- if (!--bdev->bd_openers) {
- WARN_ON_ONCE(bdev->bd_holders);
- sync_blockdev(bdev);
- kill_bdev(bdev);
- bdev_write_inode(bdev);
- }
- if (bdev->bd_contains == bdev) {
- if (disk->fops->release)
- disk->fops->release(disk, mode);
- }
- if (!bdev->bd_openers) {
- disk_put_part(bdev->bd_part);
- bdev->bd_part = NULL;
- bdev->bd_disk = NULL;
- if (bdev != bdev->bd_contains)
- victim = bdev->bd_contains;
- bdev->bd_contains = NULL;
- put_disk_and_module(disk);
- }
- mutex_unlock(&bdev->bd_mutex);
- bdput(bdev);
- if (victim)
- __blkdev_put(victim, mode, 1);
- }
- void blkdev_put(struct block_device *bdev, fmode_t mode)
- {
- mutex_lock(&bdev->bd_mutex);
- if (mode & FMODE_EXCL) {
- bool bdev_free;
- /*
- * Release a claim on the device. The holder fields
- * are protected with bdev_lock. bd_mutex is to
- * synchronize disk_holder unlinking.
- */
- spin_lock(&bdev_lock);
- WARN_ON_ONCE(--bdev->bd_holders < 0);
- WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
- /* bd_contains might point to self, check in a separate step */
- if ((bdev_free = !bdev->bd_holders))
- bdev->bd_holder = NULL;
- if (!bdev->bd_contains->bd_holders)
- bdev->bd_contains->bd_holder = NULL;
- spin_unlock(&bdev_lock);
- /*
- * If this was the last claim, remove holder link and
- * unblock evpoll if it was a write holder.
- */
- if (bdev_free && bdev->bd_write_holder) {
- disk_unblock_events(bdev->bd_disk);
- bdev->bd_write_holder = false;
- }
- }
- /*
- * Trigger event checking and tell drivers to flush MEDIA_CHANGE
- * event. This is to ensure detection of media removal commanded
- * from userland - e.g. eject(1).
- */
- disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
- mutex_unlock(&bdev->bd_mutex);
- __blkdev_put(bdev, mode, 0);
- }
- EXPORT_SYMBOL(blkdev_put);
- static int blkdev_close(struct inode * inode, struct file * filp)
- {
- struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
- blkdev_put(bdev, filp->f_mode);
- return 0;
- }
- static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
- {
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
- fmode_t mode = file->f_mode;
- /*
- * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
- * to updated it before every ioctl.
- */
- if (file->f_flags & O_NDELAY)
- mode |= FMODE_NDELAY;
- else
- mode &= ~FMODE_NDELAY;
- return blkdev_ioctl(bdev, mode, cmd, arg);
- }
- /*
- * Write data to the block device. Only intended for the block device itself
- * and the raw driver which basically is a fake block device.
- *
- * Does not take i_mutex for the write and thus is not for general purpose
- * use.
- */
- ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
- {
- struct file *file = iocb->ki_filp;
- struct inode *bd_inode = bdev_file_inode(file);
- loff_t size = i_size_read(bd_inode);
- struct blk_plug plug;
- ssize_t ret;
- if (bdev_read_only(I_BDEV(bd_inode)))
- return -EPERM;
- if (!iov_iter_count(from))
- return 0;
- if (iocb->ki_pos >= size)
- return -ENOSPC;
- if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
- return -EOPNOTSUPP;
- iov_iter_truncate(from, size - iocb->ki_pos);
- blk_start_plug(&plug);
- ret = __generic_file_write_iter(iocb, from);
- if (ret > 0)
- ret = generic_write_sync(iocb, ret);
- blk_finish_plug(&plug);
- return ret;
- }
- EXPORT_SYMBOL_GPL(blkdev_write_iter);
- ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
- {
- struct file *file = iocb->ki_filp;
- struct inode *bd_inode = bdev_file_inode(file);
- loff_t size = i_size_read(bd_inode);
- loff_t pos = iocb->ki_pos;
- if (pos >= size)
- return 0;
- size -= pos;
- iov_iter_truncate(to, size);
- return generic_file_read_iter(iocb, to);
- }
- EXPORT_SYMBOL_GPL(blkdev_read_iter);
- /*
- * Try to release a page associated with block device when the system
- * is under memory pressure.
- */
- static int blkdev_releasepage(struct page *page, gfp_t wait)
- {
- struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
- if (super && super->s_op->bdev_try_to_free_page)
- return super->s_op->bdev_try_to_free_page(super, page, wait);
- return try_to_free_buffers(page);
- }
- static int blkdev_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
- {
- return generic_writepages(mapping, wbc);
- }
- static const struct address_space_operations def_blk_aops = {
- .readpage = blkdev_readpage,
- .readpages = blkdev_readpages,
- .writepage = blkdev_writepage,
- .write_begin = blkdev_write_begin,
- .write_end = blkdev_write_end,
- .writepages = blkdev_writepages,
- .releasepage = blkdev_releasepage,
- .direct_IO = blkdev_direct_IO,
- .is_dirty_writeback = buffer_check_dirty_writeback,
- };
- #define BLKDEV_FALLOC_FL_SUPPORTED \
- (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
- static long blkdev_fallocate(struct file *file, int mode, loff_t start,
- loff_t len)
- {
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
- struct address_space *mapping;
- loff_t end = start + len - 1;
- loff_t isize;
- int error;
- /* Fail if we don't recognize the flags. */
- if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
- return -EOPNOTSUPP;
- /* Don't go off the end of the device. */
- isize = i_size_read(bdev->bd_inode);
- if (start >= isize)
- return -EINVAL;
- if (end >= isize) {
- if (mode & FALLOC_FL_KEEP_SIZE) {
- len = isize - start;
- end = start + len - 1;
- } else
- return -EINVAL;
- }
- /*
- * Don't allow IO that isn't aligned to logical block size.
- */
- if ((start | len) & (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
- /* Invalidate the page cache, including dirty pages. */
- mapping = bdev->bd_inode->i_mapping;
- truncate_inode_pages_range(mapping, start, end);
- switch (mode) {
- case FALLOC_FL_ZERO_RANGE:
- case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
- error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
- GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
- break;
- case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
- error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
- GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
- break;
- case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
- error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
- GFP_KERNEL, 0);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (error)
- return error;
- /*
- * Invalidate again; if someone wandered in and dirtied a page,
- * the caller will be given -EBUSY. The third argument is
- * inclusive, so the rounding here is safe.
- */
- return invalidate_inode_pages2_range(mapping,
- start >> PAGE_SHIFT,
- end >> PAGE_SHIFT);
- }
- const struct file_operations def_blk_fops = {
- .open = blkdev_open,
- .release = blkdev_close,
- .llseek = block_llseek,
- .read_iter = blkdev_read_iter,
- .write_iter = blkdev_write_iter,
- .mmap = generic_file_mmap,
- .fsync = blkdev_fsync,
- .unlocked_ioctl = block_ioctl,
- #ifdef CONFIG_COMPAT
- .compat_ioctl = compat_blkdev_ioctl,
- #endif
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
- .fallocate = blkdev_fallocate,
- };
- int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
- {
- int res;
- mm_segment_t old_fs = get_fs();
- set_fs(KERNEL_DS);
- res = blkdev_ioctl(bdev, 0, cmd, arg);
- set_fs(old_fs);
- return res;
- }
- EXPORT_SYMBOL(ioctl_by_bdev);
- /**
- * lookup_bdev - lookup a struct block_device by name
- * @pathname: special file representing the block device
- *
- * Get a reference to the blockdevice at @pathname in the current
- * namespace if possible and return it. Return ERR_PTR(error)
- * otherwise.
- */
- struct block_device *lookup_bdev(const char *pathname)
- {
- struct block_device *bdev;
- struct inode *inode;
- struct path path;
- int error;
- if (!pathname || !*pathname)
- return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
- inode = d_backing_inode(path.dentry);
- error = -ENOTBLK;
- if (!S_ISBLK(inode->i_mode))
- goto fail;
- error = -EACCES;
- if (!may_open_dev(&path))
- goto fail;
- error = -ENOMEM;
- bdev = bd_acquire(inode);
- if (!bdev)
- goto fail;
- out:
- path_put(&path);
- return bdev;
- fail:
- bdev = ERR_PTR(error);
- goto out;
- }
- EXPORT_SYMBOL(lookup_bdev);
- int __invalidate_device(struct block_device *bdev, bool kill_dirty)
- {
- struct super_block *sb = get_super(bdev);
- int res = 0;
- if (sb) {
- /*
- * no need to lock the super, get_super holds the
- * read mutex so the filesystem cannot go away
- * under us (->put_super runs with the write lock
- * hold).
- */
- shrink_dcache_sb(sb);
- res = invalidate_inodes(sb, kill_dirty);
- drop_super(sb);
- }
- invalidate_bdev(bdev);
- return res;
- }
- EXPORT_SYMBOL(__invalidate_device);
- void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
- {
- struct inode *inode, *old_inode = NULL;
- spin_lock(&blockdev_superblock->s_inode_list_lock);
- list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
- struct address_space *mapping = inode->i_mapping;
- struct block_device *bdev;
- spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
- mapping->nrpages == 0) {
- spin_unlock(&inode->i_lock);
- continue;
- }
- __iget(inode);
- spin_unlock(&inode->i_lock);
- spin_unlock(&blockdev_superblock->s_inode_list_lock);
- /*
- * We hold a reference to 'inode' so it couldn't have been
- * removed from s_inodes list while we dropped the
- * s_inode_list_lock We cannot iput the inode now as we can
- * be holding the last reference and we cannot iput it under
- * s_inode_list_lock. So we keep the reference and iput it
- * later.
- */
- iput(old_inode);
- old_inode = inode;
- bdev = I_BDEV(inode);
- mutex_lock(&bdev->bd_mutex);
- if (bdev->bd_openers)
- func(bdev, arg);
- mutex_unlock(&bdev->bd_mutex);
- spin_lock(&blockdev_superblock->s_inode_list_lock);
- }
- spin_unlock(&blockdev_superblock->s_inode_list_lock);
- iput(old_inode);
- }
|