123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652 |
- /*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
- #include <linux/kernel.h>
- #include <linux/slab.h>
- #include <linux/rculist.h>
- #include <linux/llist.h>
- #include "rds_single_path.h"
- #include "ib_mr.h"
- struct workqueue_struct *rds_ib_mr_wq;
- static DEFINE_PER_CPU(unsigned long, clean_list_grace);
- #define CLEAN_LIST_BUSY_BIT 0
- static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
- {
- struct rds_ib_device *rds_ibdev;
- struct rds_ib_ipaddr *i_ipaddr;
- rcu_read_lock();
- list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
- list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
- if (i_ipaddr->ipaddr == ipaddr) {
- refcount_inc(&rds_ibdev->refcount);
- rcu_read_unlock();
- return rds_ibdev;
- }
- }
- }
- rcu_read_unlock();
- return NULL;
- }
- static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
- {
- struct rds_ib_ipaddr *i_ipaddr;
- i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
- if (!i_ipaddr)
- return -ENOMEM;
- i_ipaddr->ipaddr = ipaddr;
- spin_lock_irq(&rds_ibdev->spinlock);
- list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
- spin_unlock_irq(&rds_ibdev->spinlock);
- return 0;
- }
- static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
- {
- struct rds_ib_ipaddr *i_ipaddr;
- struct rds_ib_ipaddr *to_free = NULL;
- spin_lock_irq(&rds_ibdev->spinlock);
- list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
- if (i_ipaddr->ipaddr == ipaddr) {
- list_del_rcu(&i_ipaddr->list);
- to_free = i_ipaddr;
- break;
- }
- }
- spin_unlock_irq(&rds_ibdev->spinlock);
- if (to_free)
- kfree_rcu(to_free, rcu);
- }
- int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
- struct in6_addr *ipaddr)
- {
- struct rds_ib_device *rds_ibdev_old;
- rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
- if (!rds_ibdev_old)
- return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
- if (rds_ibdev_old != rds_ibdev) {
- rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
- rds_ib_dev_put(rds_ibdev_old);
- return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
- }
- rds_ib_dev_put(rds_ibdev_old);
- return 0;
- }
- void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
- {
- struct rds_ib_connection *ic = conn->c_transport_data;
- /* conn was previously on the nodev_conns_list */
- spin_lock_irq(&ib_nodev_conns_lock);
- BUG_ON(list_empty(&ib_nodev_conns));
- BUG_ON(list_empty(&ic->ib_node));
- list_del(&ic->ib_node);
- spin_lock(&rds_ibdev->spinlock);
- list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
- spin_unlock(&rds_ibdev->spinlock);
- spin_unlock_irq(&ib_nodev_conns_lock);
- ic->rds_ibdev = rds_ibdev;
- refcount_inc(&rds_ibdev->refcount);
- }
- void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
- {
- struct rds_ib_connection *ic = conn->c_transport_data;
- /* place conn on nodev_conns_list */
- spin_lock(&ib_nodev_conns_lock);
- spin_lock_irq(&rds_ibdev->spinlock);
- BUG_ON(list_empty(&ic->ib_node));
- list_del(&ic->ib_node);
- spin_unlock_irq(&rds_ibdev->spinlock);
- list_add_tail(&ic->ib_node, &ib_nodev_conns);
- spin_unlock(&ib_nodev_conns_lock);
- ic->rds_ibdev = NULL;
- rds_ib_dev_put(rds_ibdev);
- }
- void rds_ib_destroy_nodev_conns(void)
- {
- struct rds_ib_connection *ic, *_ic;
- LIST_HEAD(tmp_list);
- /* avoid calling conn_destroy with irqs off */
- spin_lock_irq(&ib_nodev_conns_lock);
- list_splice(&ib_nodev_conns, &tmp_list);
- spin_unlock_irq(&ib_nodev_conns_lock);
- list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
- rds_conn_destroy(ic->conn);
- }
- void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
- {
- struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
- iinfo->rdma_mr_max = pool_1m->max_items;
- iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
- }
- #if IS_ENABLED(CONFIG_IPV6)
- void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
- struct rds6_info_rdma_connection *iinfo6)
- {
- struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
- iinfo6->rdma_mr_max = pool_1m->max_items;
- iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
- }
- #endif
- struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
- {
- struct rds_ib_mr *ibmr = NULL;
- struct llist_node *ret;
- unsigned long *flag;
- preempt_disable();
- flag = this_cpu_ptr(&clean_list_grace);
- set_bit(CLEAN_LIST_BUSY_BIT, flag);
- ret = llist_del_first(&pool->clean_list);
- if (ret) {
- ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
- }
- clear_bit(CLEAN_LIST_BUSY_BIT, flag);
- preempt_enable();
- return ibmr;
- }
- static inline void wait_clean_list_grace(void)
- {
- int cpu;
- unsigned long *flag;
- for_each_online_cpu(cpu) {
- flag = &per_cpu(clean_list_grace, cpu);
- while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
- cpu_relax();
- }
- }
- void rds_ib_sync_mr(void *trans_private, int direction)
- {
- struct rds_ib_mr *ibmr = trans_private;
- struct rds_ib_device *rds_ibdev = ibmr->device;
- switch (direction) {
- case DMA_FROM_DEVICE:
- ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
- ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
- break;
- case DMA_TO_DEVICE:
- ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
- ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
- break;
- }
- }
- void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
- {
- struct rds_ib_device *rds_ibdev = ibmr->device;
- if (ibmr->sg_dma_len) {
- ib_dma_unmap_sg(rds_ibdev->dev,
- ibmr->sg, ibmr->sg_len,
- DMA_BIDIRECTIONAL);
- ibmr->sg_dma_len = 0;
- }
- /* Release the s/g list */
- if (ibmr->sg_len) {
- unsigned int i;
- for (i = 0; i < ibmr->sg_len; ++i) {
- struct page *page = sg_page(&ibmr->sg[i]);
- /* FIXME we need a way to tell a r/w MR
- * from a r/o MR */
- WARN_ON(!page->mapping && irqs_disabled());
- set_page_dirty(page);
- put_page(page);
- }
- kfree(ibmr->sg);
- ibmr->sg = NULL;
- ibmr->sg_len = 0;
- }
- }
- void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
- {
- unsigned int pinned = ibmr->sg_len;
- __rds_ib_teardown_mr(ibmr);
- if (pinned) {
- struct rds_ib_mr_pool *pool = ibmr->pool;
- atomic_sub(pinned, &pool->free_pinned);
- }
- }
- static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
- {
- unsigned int item_count;
- item_count = atomic_read(&pool->item_count);
- if (free_all)
- return item_count;
- return 0;
- }
- /*
- * given an llist of mrs, put them all into the list_head for more processing
- */
- static unsigned int llist_append_to_list(struct llist_head *llist,
- struct list_head *list)
- {
- struct rds_ib_mr *ibmr;
- struct llist_node *node;
- struct llist_node *next;
- unsigned int count = 0;
- node = llist_del_all(llist);
- while (node) {
- next = node->next;
- ibmr = llist_entry(node, struct rds_ib_mr, llnode);
- list_add_tail(&ibmr->unmap_list, list);
- node = next;
- count++;
- }
- return count;
- }
- /*
- * this takes a list head of mrs and turns it into linked llist nodes
- * of clusters. Each cluster has linked llist nodes of
- * MR_CLUSTER_SIZE mrs that are ready for reuse.
- */
- static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
- struct list_head *list,
- struct llist_node **nodes_head,
- struct llist_node **nodes_tail)
- {
- struct rds_ib_mr *ibmr;
- struct llist_node *cur = NULL;
- struct llist_node **next = nodes_head;
- list_for_each_entry(ibmr, list, unmap_list) {
- cur = &ibmr->llnode;
- *next = cur;
- next = &cur->next;
- }
- *next = NULL;
- *nodes_tail = cur;
- }
- /*
- * Flush our pool of MRs.
- * At a minimum, all currently unused MRs are unmapped.
- * If the number of MRs allocated exceeds the limit, we also try
- * to free as many MRs as needed to get back to this limit.
- */
- int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
- int free_all, struct rds_ib_mr **ibmr_ret)
- {
- struct rds_ib_mr *ibmr;
- struct llist_node *clean_nodes;
- struct llist_node *clean_tail;
- LIST_HEAD(unmap_list);
- unsigned long unpinned = 0;
- unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
- if (ibmr_ret) {
- DEFINE_WAIT(wait);
- while (!mutex_trylock(&pool->flush_lock)) {
- ibmr = rds_ib_reuse_mr(pool);
- if (ibmr) {
- *ibmr_ret = ibmr;
- finish_wait(&pool->flush_wait, &wait);
- goto out_nolock;
- }
- prepare_to_wait(&pool->flush_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (llist_empty(&pool->clean_list))
- schedule();
- ibmr = rds_ib_reuse_mr(pool);
- if (ibmr) {
- *ibmr_ret = ibmr;
- finish_wait(&pool->flush_wait, &wait);
- goto out_nolock;
- }
- }
- finish_wait(&pool->flush_wait, &wait);
- } else
- mutex_lock(&pool->flush_lock);
- if (ibmr_ret) {
- ibmr = rds_ib_reuse_mr(pool);
- if (ibmr) {
- *ibmr_ret = ibmr;
- goto out;
- }
- }
- /* Get the list of all MRs to be dropped. Ordering matters -
- * we want to put drop_list ahead of free_list.
- */
- dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
- dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
- if (free_all)
- llist_append_to_list(&pool->clean_list, &unmap_list);
- free_goal = rds_ib_flush_goal(pool, free_all);
- if (list_empty(&unmap_list))
- goto out;
- if (pool->use_fastreg)
- rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
- else
- rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
- if (!list_empty(&unmap_list)) {
- /* we have to make sure that none of the things we're about
- * to put on the clean list would race with other cpus trying
- * to pull items off. The llist would explode if we managed to
- * remove something from the clean list and then add it back again
- * while another CPU was spinning on that same item in llist_del_first.
- *
- * This is pretty unlikely, but just in case wait for an llist grace period
- * here before adding anything back into the clean list.
- */
- wait_clean_list_grace();
- list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
- if (ibmr_ret) {
- *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
- clean_nodes = clean_nodes->next;
- }
- /* more than one entry in llist nodes */
- if (clean_nodes)
- llist_add_batch(clean_nodes, clean_tail,
- &pool->clean_list);
- }
- atomic_sub(unpinned, &pool->free_pinned);
- atomic_sub(dirty_to_clean, &pool->dirty_count);
- atomic_sub(nfreed, &pool->item_count);
- out:
- mutex_unlock(&pool->flush_lock);
- if (waitqueue_active(&pool->flush_wait))
- wake_up(&pool->flush_wait);
- out_nolock:
- return 0;
- }
- struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
- {
- struct rds_ib_mr *ibmr = NULL;
- int iter = 0;
- while (1) {
- ibmr = rds_ib_reuse_mr(pool);
- if (ibmr)
- return ibmr;
- if (atomic_inc_return(&pool->item_count) <= pool->max_items)
- break;
- atomic_dec(&pool->item_count);
- if (++iter > 2) {
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
- return ERR_PTR(-EAGAIN);
- }
- /* We do have some empty MRs. Flush them out. */
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
- rds_ib_flush_mr_pool(pool, 0, &ibmr);
- if (ibmr)
- return ibmr;
- }
- return ibmr;
- }
- static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
- {
- struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
- rds_ib_flush_mr_pool(pool, 0, NULL);
- }
- void rds_ib_free_mr(void *trans_private, int invalidate)
- {
- struct rds_ib_mr *ibmr = trans_private;
- struct rds_ib_mr_pool *pool = ibmr->pool;
- struct rds_ib_device *rds_ibdev = ibmr->device;
- rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
- /* Return it to the pool's free list */
- if (rds_ibdev->use_fastreg)
- rds_ib_free_frmr_list(ibmr);
- else
- rds_ib_free_fmr_list(ibmr);
- atomic_add(ibmr->sg_len, &pool->free_pinned);
- atomic_inc(&pool->dirty_count);
- /* If we've pinned too many pages, request a flush */
- if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
- atomic_read(&pool->dirty_count) >= pool->max_items / 5)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
- if (invalidate) {
- if (likely(!in_interrupt())) {
- rds_ib_flush_mr_pool(pool, 0, NULL);
- } else {
- /* We get here if the user created a MR marked
- * as use_once and invalidate at the same time.
- */
- queue_delayed_work(rds_ib_mr_wq,
- &pool->flush_worker, 10);
- }
- }
- rds_ib_dev_put(rds_ibdev);
- }
- void rds_ib_flush_mrs(void)
- {
- struct rds_ib_device *rds_ibdev;
- down_read(&rds_ib_devices_lock);
- list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
- if (rds_ibdev->mr_8k_pool)
- rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
- if (rds_ibdev->mr_1m_pool)
- rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
- }
- up_read(&rds_ib_devices_lock);
- }
- void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
- struct rds_sock *rs, u32 *key_ret,
- struct rds_connection *conn)
- {
- struct rds_ib_device *rds_ibdev;
- struct rds_ib_mr *ibmr = NULL;
- struct rds_ib_connection *ic = NULL;
- int ret;
- rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
- if (!rds_ibdev) {
- ret = -ENODEV;
- goto out;
- }
- if (conn)
- ic = conn->c_transport_data;
- if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
- ret = -ENODEV;
- goto out;
- }
- if (rds_ibdev->use_fastreg)
- ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
- else
- ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
- if (IS_ERR(ibmr)) {
- ret = PTR_ERR(ibmr);
- pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
- } else {
- return ibmr;
- }
- out:
- if (rds_ibdev)
- rds_ib_dev_put(rds_ibdev);
- return ERR_PTR(ret);
- }
- void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
- {
- cancel_delayed_work_sync(&pool->flush_worker);
- rds_ib_flush_mr_pool(pool, 1, NULL);
- WARN_ON(atomic_read(&pool->item_count));
- WARN_ON(atomic_read(&pool->free_pinned));
- kfree(pool);
- }
- struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
- int pool_type)
- {
- struct rds_ib_mr_pool *pool;
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return ERR_PTR(-ENOMEM);
- pool->pool_type = pool_type;
- init_llist_head(&pool->free_list);
- init_llist_head(&pool->drop_list);
- init_llist_head(&pool->clean_list);
- mutex_init(&pool->flush_lock);
- init_waitqueue_head(&pool->flush_wait);
- INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
- if (pool_type == RDS_IB_MR_1M_POOL) {
- /* +1 allows for unaligned MRs */
- pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
- pool->max_items = rds_ibdev->max_1m_mrs;
- } else {
- /* pool_type == RDS_IB_MR_8K_POOL */
- pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
- pool->max_items = rds_ibdev->max_8k_mrs;
- }
- pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
- pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
- pool->fmr_attr.page_shift = PAGE_SHIFT;
- pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
- pool->use_fastreg = rds_ibdev->use_fastreg;
- return pool;
- }
- int rds_ib_mr_init(void)
- {
- rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
- if (!rds_ib_mr_wq)
- return -ENOMEM;
- return 0;
- }
- /* By the time this is called all the IB devices should have been torn down and
- * had their pools freed. As each pool is freed its work struct is waited on,
- * so the pool flushing work queue should be idle by the time we get here.
- */
- void rds_ib_mr_exit(void)
- {
- destroy_workqueue(rds_ib_mr_wq);
- }
|