123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * linux/drivers/staging/erofs/utils.c
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
- #include "internal.h"
- #include <linux/pagevec.h>
- struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
- {
- struct page *page;
- if (!list_empty(pool)) {
- page = lru_to_page(pool);
- list_del(&page->lru);
- } else {
- page = alloc_pages(gfp | __GFP_NOFAIL, 0);
- }
- return page;
- }
- /* global shrink count (for all mounted EROFS instances) */
- static atomic_long_t erofs_global_shrink_cnt;
- #ifdef CONFIG_EROFS_FS_ZIP
- /* radix_tree and the future XArray both don't use tagptr_t yet */
- struct erofs_workgroup *erofs_find_workgroup(
- struct super_block *sb, pgoff_t index, bool *tag)
- {
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_workgroup *grp;
- int oldcount;
- repeat:
- rcu_read_lock();
- grp = radix_tree_lookup(&sbi->workstn_tree, index);
- if (grp != NULL) {
- *tag = radix_tree_exceptional_entry(grp);
- grp = (void *)((unsigned long)grp &
- ~RADIX_TREE_EXCEPTIONAL_ENTRY);
- if (erofs_workgroup_get(grp, &oldcount)) {
- /* prefer to relax rcu read side */
- rcu_read_unlock();
- goto repeat;
- }
- /* decrease refcount added by erofs_workgroup_put */
- if (unlikely(oldcount == 1))
- atomic_long_dec(&erofs_global_shrink_cnt);
- DBG_BUGON(index != grp->index);
- }
- rcu_read_unlock();
- return grp;
- }
- int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp,
- bool tag)
- {
- struct erofs_sb_info *sbi;
- int err;
- /* grp shouldn't be broken or used before */
- if (unlikely(atomic_read(&grp->refcount) != 1)) {
- DBG_BUGON(1);
- return -EINVAL;
- }
- err = radix_tree_preload(GFP_NOFS);
- if (err)
- return err;
- sbi = EROFS_SB(sb);
- erofs_workstn_lock(sbi);
- if (tag)
- grp = (void *)((unsigned long)grp |
- 1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
- /*
- * Bump up reference count before making this workgroup
- * visible to other users in order to avoid potential UAF
- * without serialized by erofs_workstn_lock.
- */
- __erofs_workgroup_get(grp);
- err = radix_tree_insert(&sbi->workstn_tree,
- grp->index, grp);
- if (unlikely(err))
- /*
- * it's safe to decrease since the workgroup isn't visible
- * and refcount >= 2 (cannot be freezed).
- */
- __erofs_workgroup_put(grp);
- erofs_workstn_unlock(sbi);
- radix_tree_preload_end();
- return err;
- }
- extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
- static void __erofs_workgroup_free(struct erofs_workgroup *grp)
- {
- atomic_long_dec(&erofs_global_shrink_cnt);
- erofs_workgroup_free_rcu(grp);
- }
- int erofs_workgroup_put(struct erofs_workgroup *grp)
- {
- int count = atomic_dec_return(&grp->refcount);
- if (count == 1)
- atomic_long_inc(&erofs_global_shrink_cnt);
- else if (!count)
- __erofs_workgroup_free(grp);
- return count;
- }
- #ifdef EROFS_FS_HAS_MANAGED_CACHE
- /* for cache-managed case, customized reclaim paths exist */
- static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
- {
- erofs_workgroup_unfreeze(grp, 0);
- __erofs_workgroup_free(grp);
- }
- bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
- {
- void *entry;
- /*
- * for managed cache enabled, the refcount of workgroups
- * themselves could be < 0 (freezed). So there is no guarantee
- * that all refcount > 0 if managed cache is enabled.
- */
- if (!erofs_workgroup_try_to_freeze(grp, 1))
- return false;
- /*
- * note that all cached pages should be unlinked
- * before delete it from the radix tree.
- * Otherwise some cached pages of an orphan old workgroup
- * could be still linked after the new one is available.
- */
- if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
- erofs_workgroup_unfreeze(grp, 1);
- return false;
- }
- /*
- * it is impossible to fail after the workgroup is freezed,
- * however in order to avoid some race conditions, add a
- * DBG_BUGON to observe this in advance.
- */
- entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
- DBG_BUGON((void *)((unsigned long)entry &
- ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
- /*
- * if managed cache is enable, the last refcount
- * should indicate the related workstation.
- */
- erofs_workgroup_unfreeze_final(grp);
- return true;
- }
- #else
- /* for nocache case, no customized reclaim path at all */
- bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
- {
- int cnt = atomic_read(&grp->refcount);
- void *entry;
- DBG_BUGON(cnt <= 0);
- DBG_BUGON(cleanup && cnt != 1);
- if (cnt > 1)
- return false;
- entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
- DBG_BUGON((void *)((unsigned long)entry &
- ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
- /* (rarely) could be grabbed again when freeing */
- erofs_workgroup_put(grp);
- return true;
- }
- #endif
- unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
- unsigned long nr_shrink,
- bool cleanup)
- {
- pgoff_t first_index = 0;
- void *batch[PAGEVEC_SIZE];
- unsigned freed = 0;
- int i, found;
- repeat:
- erofs_workstn_lock(sbi);
- found = radix_tree_gang_lookup(&sbi->workstn_tree,
- batch, first_index, PAGEVEC_SIZE);
- for (i = 0; i < found; ++i) {
- struct erofs_workgroup *grp = (void *)
- ((unsigned long)batch[i] &
- ~RADIX_TREE_EXCEPTIONAL_ENTRY);
- first_index = grp->index + 1;
- /* try to shrink each valid workgroup */
- if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
- continue;
- ++freed;
- if (unlikely(!--nr_shrink))
- break;
- }
- erofs_workstn_unlock(sbi);
- if (i && nr_shrink)
- goto repeat;
- return freed;
- }
- #endif
- /* protected by 'erofs_sb_list_lock' */
- static unsigned int shrinker_run_no;
- /* protects the mounted 'erofs_sb_list' */
- static DEFINE_SPINLOCK(erofs_sb_list_lock);
- static LIST_HEAD(erofs_sb_list);
- void erofs_register_super(struct super_block *sb)
- {
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- mutex_init(&sbi->umount_mutex);
- spin_lock(&erofs_sb_list_lock);
- list_add(&sbi->list, &erofs_sb_list);
- spin_unlock(&erofs_sb_list_lock);
- }
- void erofs_unregister_super(struct super_block *sb)
- {
- spin_lock(&erofs_sb_list_lock);
- list_del(&EROFS_SB(sb)->list);
- spin_unlock(&erofs_sb_list_lock);
- }
- unsigned long erofs_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
- {
- return atomic_long_read(&erofs_global_shrink_cnt);
- }
- unsigned long erofs_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
- {
- struct erofs_sb_info *sbi;
- struct list_head *p;
- unsigned long nr = sc->nr_to_scan;
- unsigned int run_no;
- unsigned long freed = 0;
- spin_lock(&erofs_sb_list_lock);
- do
- run_no = ++shrinker_run_no;
- while (run_no == 0);
- /* Iterate over all mounted superblocks and try to shrink them */
- p = erofs_sb_list.next;
- while (p != &erofs_sb_list) {
- sbi = list_entry(p, struct erofs_sb_info, list);
- /*
- * We move the ones we do to the end of the list, so we stop
- * when we see one we have already done.
- */
- if (sbi->shrinker_run_no == run_no)
- break;
- if (!mutex_trylock(&sbi->umount_mutex)) {
- p = p->next;
- continue;
- }
- spin_unlock(&erofs_sb_list_lock);
- sbi->shrinker_run_no = run_no;
- #ifdef CONFIG_EROFS_FS_ZIP
- freed += erofs_shrink_workstation(sbi, nr, false);
- #endif
- spin_lock(&erofs_sb_list_lock);
- /* Get the next list element before we move this one */
- p = p->next;
- /*
- * Move this one to the end of the list to provide some
- * fairness.
- */
- list_move_tail(&sbi->list, &erofs_sb_list);
- mutex_unlock(&sbi->umount_mutex);
- if (freed >= nr)
- break;
- }
- spin_unlock(&erofs_sb_list_lock);
- return freed;
- }
|