1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060 |
- /*
- * Common code for the NVMe target.
- * Copyright (c) 2015-2016 HGST, a Western Digital Company.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- #include <linux/module.h>
- #include <linux/random.h>
- #include <linux/rculist.h>
- #include "nvmet.h"
- static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
- static DEFINE_IDA(cntlid_ida);
- /*
- * This read/write semaphore is used to synchronize access to configuration
- * information on a target system that will result in discovery log page
- * information change for at least one host.
- * The full list of resources to protected by this semaphore is:
- *
- * - subsystems list
- * - per-subsystem allowed hosts list
- * - allow_any_host subsystem attribute
- * - nvmet_genctr
- * - the nvmet_transports array
- *
- * When updating any of those lists/structures write lock should be obtained,
- * while when reading (popolating discovery log page or checking host-subsystem
- * link) read lock is obtained to allow concurrent reads.
- */
- DECLARE_RWSEM(nvmet_config_sem);
- static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
- const char *subsysnqn);
- u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
- size_t len)
- {
- if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
- return 0;
- }
- u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
- {
- if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
- return 0;
- }
- static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
- {
- return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
- }
- static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
- {
- struct nvmet_req *req;
- while (1) {
- mutex_lock(&ctrl->lock);
- if (!ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- return;
- }
- req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
- }
- }
- static void nvmet_async_event_work(struct work_struct *work)
- {
- struct nvmet_ctrl *ctrl =
- container_of(work, struct nvmet_ctrl, async_event_work);
- struct nvmet_async_event *aen;
- struct nvmet_req *req;
- while (1) {
- mutex_lock(&ctrl->lock);
- aen = list_first_entry_or_null(&ctrl->async_events,
- struct nvmet_async_event, entry);
- if (!aen || !ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- return;
- }
- req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- nvmet_set_result(req, nvmet_async_event_result(aen));
- list_del(&aen->entry);
- kfree(aen);
- mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, 0);
- }
- }
- static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
- u8 event_info, u8 log_page)
- {
- struct nvmet_async_event *aen;
- aen = kmalloc(sizeof(*aen), GFP_KERNEL);
- if (!aen)
- return;
- aen->event_type = event_type;
- aen->event_info = event_info;
- aen->log_page = log_page;
- mutex_lock(&ctrl->lock);
- list_add_tail(&aen->entry, &ctrl->async_events);
- mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
- }
- int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
- {
- int ret = 0;
- down_write(&nvmet_config_sem);
- if (nvmet_transports[ops->type])
- ret = -EINVAL;
- else
- nvmet_transports[ops->type] = ops;
- up_write(&nvmet_config_sem);
- return ret;
- }
- EXPORT_SYMBOL_GPL(nvmet_register_transport);
- void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
- {
- down_write(&nvmet_config_sem);
- nvmet_transports[ops->type] = NULL;
- up_write(&nvmet_config_sem);
- }
- EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
- int nvmet_enable_port(struct nvmet_port *port)
- {
- struct nvmet_fabrics_ops *ops;
- int ret;
- lockdep_assert_held(&nvmet_config_sem);
- ops = nvmet_transports[port->disc_addr.trtype];
- if (!ops) {
- up_write(&nvmet_config_sem);
- request_module("nvmet-transport-%d", port->disc_addr.trtype);
- down_write(&nvmet_config_sem);
- ops = nvmet_transports[port->disc_addr.trtype];
- if (!ops) {
- pr_err("transport type %d not supported\n",
- port->disc_addr.trtype);
- return -EINVAL;
- }
- }
- if (!try_module_get(ops->owner))
- return -EINVAL;
- ret = ops->add_port(port);
- if (ret) {
- module_put(ops->owner);
- return ret;
- }
- port->enabled = true;
- return 0;
- }
- void nvmet_disable_port(struct nvmet_port *port)
- {
- struct nvmet_fabrics_ops *ops;
- lockdep_assert_held(&nvmet_config_sem);
- port->enabled = false;
- ops = nvmet_transports[port->disc_addr.trtype];
- ops->remove_port(port);
- module_put(ops->owner);
- }
- static void nvmet_keep_alive_timer(struct work_struct *work)
- {
- struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
- struct nvmet_ctrl, ka_work);
- pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
- ctrl->cntlid, ctrl->kato);
- nvmet_ctrl_fatal_error(ctrl);
- }
- static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
- {
- if (unlikely(ctrl->kato == 0))
- return;
- pr_debug("ctrl %d start keep-alive timer for %d secs\n",
- ctrl->cntlid, ctrl->kato);
- INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
- }
- static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
- {
- if (unlikely(ctrl->kato == 0))
- return;
- pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
- cancel_delayed_work_sync(&ctrl->ka_work);
- }
- static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
- __le32 nsid)
- {
- struct nvmet_ns *ns;
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
- if (ns->nsid == le32_to_cpu(nsid))
- return ns;
- }
- return NULL;
- }
- struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
- {
- struct nvmet_ns *ns;
- rcu_read_lock();
- ns = __nvmet_find_namespace(ctrl, nsid);
- if (ns)
- percpu_ref_get(&ns->ref);
- rcu_read_unlock();
- return ns;
- }
- static void nvmet_destroy_namespace(struct percpu_ref *ref)
- {
- struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
- complete(&ns->disable_done);
- }
- void nvmet_put_namespace(struct nvmet_ns *ns)
- {
- percpu_ref_put(&ns->ref);
- }
- int nvmet_ns_enable(struct nvmet_ns *ns)
- {
- struct nvmet_subsys *subsys = ns->subsys;
- struct nvmet_ctrl *ctrl;
- int ret = 0;
- mutex_lock(&subsys->lock);
- if (ns->enabled)
- goto out_unlock;
- ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
- NULL);
- if (IS_ERR(ns->bdev)) {
- pr_err("failed to open block device %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->bdev));
- ret = PTR_ERR(ns->bdev);
- ns->bdev = NULL;
- goto out_unlock;
- }
- ns->size = i_size_read(ns->bdev->bd_inode);
- ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
- ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
- 0, GFP_KERNEL);
- if (ret)
- goto out_blkdev_put;
- if (ns->nsid > subsys->max_nsid)
- subsys->max_nsid = ns->nsid;
- /*
- * The namespaces list needs to be sorted to simplify the implementation
- * of the Identify Namepace List subcommand.
- */
- if (list_empty(&subsys->namespaces)) {
- list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
- } else {
- struct nvmet_ns *old;
- list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
- BUG_ON(ns->nsid == old->nsid);
- if (ns->nsid < old->nsid)
- break;
- }
- list_add_tail_rcu(&ns->dev_link, &old->dev_link);
- }
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
- ns->enabled = true;
- ret = 0;
- out_unlock:
- mutex_unlock(&subsys->lock);
- return ret;
- out_blkdev_put:
- blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
- ns->bdev = NULL;
- goto out_unlock;
- }
- void nvmet_ns_disable(struct nvmet_ns *ns)
- {
- struct nvmet_subsys *subsys = ns->subsys;
- struct nvmet_ctrl *ctrl;
- mutex_lock(&subsys->lock);
- if (!ns->enabled)
- goto out_unlock;
- ns->enabled = false;
- list_del_rcu(&ns->dev_link);
- mutex_unlock(&subsys->lock);
- /*
- * Now that we removed the namespaces from the lookup list, we
- * can kill the per_cpu ref and wait for any remaining references
- * to be dropped, as well as a RCU grace period for anyone only
- * using the namepace under rcu_read_lock(). Note that we can't
- * use call_rcu here as we need to ensure the namespaces have
- * been fully destroyed before unloading the module.
- */
- percpu_ref_kill(&ns->ref);
- synchronize_rcu();
- wait_for_completion(&ns->disable_done);
- percpu_ref_exit(&ns->ref);
- mutex_lock(&subsys->lock);
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
- if (ns->bdev)
- blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
- out_unlock:
- mutex_unlock(&subsys->lock);
- }
- void nvmet_ns_free(struct nvmet_ns *ns)
- {
- nvmet_ns_disable(ns);
- kfree(ns->device_path);
- kfree(ns);
- }
- struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
- {
- struct nvmet_ns *ns;
- ns = kzalloc(sizeof(*ns), GFP_KERNEL);
- if (!ns)
- return NULL;
- INIT_LIST_HEAD(&ns->dev_link);
- init_completion(&ns->disable_done);
- ns->nsid = nsid;
- ns->subsys = subsys;
- uuid_gen(&ns->uuid);
- return ns;
- }
- static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
- {
- u32 old_sqhd, new_sqhd;
- u16 sqhd;
- if (status)
- nvmet_set_status(req, status);
- if (req->sq->size) {
- do {
- old_sqhd = req->sq->sqhd;
- new_sqhd = (old_sqhd + 1) % req->sq->size;
- } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
- old_sqhd);
- }
- sqhd = req->sq->sqhd & 0x0000FFFF;
- req->rsp->sq_head = cpu_to_le16(sqhd);
- req->rsp->sq_id = cpu_to_le16(req->sq->qid);
- req->rsp->command_id = req->cmd->common.command_id;
- if (req->ns)
- nvmet_put_namespace(req->ns);
- req->ops->queue_response(req);
- }
- void nvmet_req_complete(struct nvmet_req *req, u16 status)
- {
- __nvmet_req_complete(req, status);
- percpu_ref_put(&req->sq->ref);
- }
- EXPORT_SYMBOL_GPL(nvmet_req_complete);
- void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
- u16 qid, u16 size)
- {
- cq->qid = qid;
- cq->size = size;
- ctrl->cqs[qid] = cq;
- }
- void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
- u16 qid, u16 size)
- {
- sq->sqhd = 0;
- sq->qid = qid;
- sq->size = size;
- ctrl->sqs[qid] = sq;
- }
- static void nvmet_confirm_sq(struct percpu_ref *ref)
- {
- struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
- complete(&sq->confirm_done);
- }
- void nvmet_sq_destroy(struct nvmet_sq *sq)
- {
- /*
- * If this is the admin queue, complete all AERs so that our
- * queue doesn't have outstanding requests on it.
- */
- if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
- nvmet_async_events_free(sq->ctrl);
- percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
- wait_for_completion(&sq->confirm_done);
- wait_for_completion(&sq->free_done);
- percpu_ref_exit(&sq->ref);
- if (sq->ctrl) {
- nvmet_ctrl_put(sq->ctrl);
- sq->ctrl = NULL; /* allows reusing the queue later */
- }
- }
- EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
- static void nvmet_sq_free(struct percpu_ref *ref)
- {
- struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
- complete(&sq->free_done);
- }
- int nvmet_sq_init(struct nvmet_sq *sq)
- {
- int ret;
- ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
- if (ret) {
- pr_err("percpu_ref init failed!\n");
- return ret;
- }
- init_completion(&sq->free_done);
- init_completion(&sq->confirm_done);
- return 0;
- }
- EXPORT_SYMBOL_GPL(nvmet_sq_init);
- bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
- struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
- {
- u8 flags = req->cmd->common.flags;
- u16 status;
- req->cq = cq;
- req->sq = sq;
- req->ops = ops;
- req->sg = NULL;
- req->sg_cnt = 0;
- req->rsp->status = 0;
- /* no support for fused commands yet */
- if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto fail;
- }
- /*
- * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
- * contains an address of a single contiguous physical buffer that is
- * byte aligned.
- */
- if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto fail;
- }
- if (unlikely(!req->sq->ctrl))
- /* will return an error for any Non-connect command: */
- status = nvmet_parse_connect_cmd(req);
- else if (likely(req->sq->qid != 0))
- status = nvmet_parse_io_cmd(req);
- else if (req->cmd->common.opcode == nvme_fabrics_command)
- status = nvmet_parse_fabrics_cmd(req);
- else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
- status = nvmet_parse_discovery_cmd(req);
- else
- status = nvmet_parse_admin_cmd(req);
- if (status)
- goto fail;
- if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto fail;
- }
- return true;
- fail:
- __nvmet_req_complete(req, status);
- return false;
- }
- EXPORT_SYMBOL_GPL(nvmet_req_init);
- void nvmet_req_uninit(struct nvmet_req *req)
- {
- percpu_ref_put(&req->sq->ref);
- }
- EXPORT_SYMBOL_GPL(nvmet_req_uninit);
- static inline bool nvmet_cc_en(u32 cc)
- {
- return (cc >> NVME_CC_EN_SHIFT) & 0x1;
- }
- static inline u8 nvmet_cc_css(u32 cc)
- {
- return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
- }
- static inline u8 nvmet_cc_mps(u32 cc)
- {
- return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
- }
- static inline u8 nvmet_cc_ams(u32 cc)
- {
- return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
- }
- static inline u8 nvmet_cc_shn(u32 cc)
- {
- return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
- }
- static inline u8 nvmet_cc_iosqes(u32 cc)
- {
- return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
- }
- static inline u8 nvmet_cc_iocqes(u32 cc)
- {
- return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
- }
- static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
- {
- lockdep_assert_held(&ctrl->lock);
- /*
- * Only I/O controllers should verify iosqes,iocqes.
- * Strictly speaking, the spec says a discovery controller
- * should verify iosqes,iocqes are zeroed, however that
- * would break backwards compatibility, so don't enforce it.
- */
- if (ctrl->subsys->type != NVME_NQN_DISC &&
- (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
- nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
- ctrl->csts = NVME_CSTS_CFS;
- return;
- }
- if (nvmet_cc_mps(ctrl->cc) != 0 ||
- nvmet_cc_ams(ctrl->cc) != 0 ||
- nvmet_cc_css(ctrl->cc) != 0) {
- ctrl->csts = NVME_CSTS_CFS;
- return;
- }
- ctrl->csts = NVME_CSTS_RDY;
- /*
- * Controllers that are not yet enabled should not really enforce the
- * keep alive timeout, but we still want to track a timeout and cleanup
- * in case a host died before it enabled the controller. Hence, simply
- * reset the keep alive timer when the controller is enabled.
- */
- if (ctrl->kato)
- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
- }
- static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
- {
- lockdep_assert_held(&ctrl->lock);
- /* XXX: tear down queues? */
- ctrl->csts &= ~NVME_CSTS_RDY;
- ctrl->cc = 0;
- }
- void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
- {
- u32 old;
- mutex_lock(&ctrl->lock);
- old = ctrl->cc;
- ctrl->cc = new;
- if (nvmet_cc_en(new) && !nvmet_cc_en(old))
- nvmet_start_ctrl(ctrl);
- if (!nvmet_cc_en(new) && nvmet_cc_en(old))
- nvmet_clear_ctrl(ctrl);
- if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
- nvmet_clear_ctrl(ctrl);
- ctrl->csts |= NVME_CSTS_SHST_CMPLT;
- }
- if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
- ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
- mutex_unlock(&ctrl->lock);
- }
- static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
- {
- /* command sets supported: NVMe command set: */
- ctrl->cap = (1ULL << 37);
- /* CC.EN timeout in 500msec units: */
- ctrl->cap |= (15ULL << 24);
- /* maximum queue entries supported: */
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
- }
- u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
- struct nvmet_req *req, struct nvmet_ctrl **ret)
- {
- struct nvmet_subsys *subsys;
- struct nvmet_ctrl *ctrl;
- u16 status = 0;
- subsys = nvmet_find_get_subsys(req->port, subsysnqn);
- if (!subsys) {
- pr_warn("connect request for invalid subsystem %s!\n",
- subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
- return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- }
- mutex_lock(&subsys->lock);
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
- if (ctrl->cntlid == cntlid) {
- if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
- pr_warn("hostnqn mismatch.\n");
- continue;
- }
- if (!kref_get_unless_zero(&ctrl->ref))
- continue;
- *ret = ctrl;
- goto out;
- }
- }
- pr_warn("could not find controller %d for subsys %s / host %s\n",
- cntlid, subsysnqn, hostnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- out:
- mutex_unlock(&subsys->lock);
- nvmet_subsys_put(subsys);
- return status;
- }
- u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
- {
- if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
- pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n",
- cmd->common.opcode, req->sq->qid);
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
- }
- if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
- pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n",
- cmd->common.opcode, req->sq->qid);
- req->ns = NULL;
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
- }
- return 0;
- }
- static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
- const char *hostnqn)
- {
- struct nvmet_host_link *p;
- if (subsys->allow_any_host)
- return true;
- list_for_each_entry(p, &subsys->hosts, entry) {
- if (!strcmp(nvmet_host_name(p->host), hostnqn))
- return true;
- }
- return false;
- }
- static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
- const char *hostnqn)
- {
- struct nvmet_subsys_link *s;
- list_for_each_entry(s, &req->port->subsystems, entry) {
- if (__nvmet_host_allowed(s->subsys, hostnqn))
- return true;
- }
- return false;
- }
- bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
- const char *hostnqn)
- {
- lockdep_assert_held(&nvmet_config_sem);
- if (subsys->type == NVME_NQN_DISC)
- return nvmet_host_discovery_allowed(req, hostnqn);
- else
- return __nvmet_host_allowed(subsys, hostnqn);
- }
- static void nvmet_fatal_error_handler(struct work_struct *work)
- {
- struct nvmet_ctrl *ctrl =
- container_of(work, struct nvmet_ctrl, fatal_err_work);
- pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
- ctrl->ops->delete_ctrl(ctrl);
- }
- u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
- {
- struct nvmet_subsys *subsys;
- struct nvmet_ctrl *ctrl;
- int ret;
- u16 status;
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- subsys = nvmet_find_get_subsys(req->port, subsysnqn);
- if (!subsys) {
- pr_warn("connect request for invalid subsystem %s!\n",
- subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
- goto out;
- }
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- down_read(&nvmet_config_sem);
- if (!nvmet_host_allowed(req, subsys, hostnqn)) {
- pr_info("connect by host %s for subsystem %s not allowed\n",
- hostnqn, subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
- up_read(&nvmet_config_sem);
- status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
- goto out_put_subsystem;
- }
- up_read(&nvmet_config_sem);
- status = NVME_SC_INTERNAL;
- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- goto out_put_subsystem;
- mutex_init(&ctrl->lock);
- nvmet_init_cap(ctrl);
- INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
- INIT_LIST_HEAD(&ctrl->async_events);
- INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
- memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
- memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
- kref_init(&ctrl->ref);
- ctrl->subsys = subsys;
- ctrl->cqs = kcalloc(subsys->max_qid + 1,
- sizeof(struct nvmet_cq *),
- GFP_KERNEL);
- if (!ctrl->cqs)
- goto out_free_ctrl;
- ctrl->sqs = kcalloc(subsys->max_qid + 1,
- sizeof(struct nvmet_sq *),
- GFP_KERNEL);
- if (!ctrl->sqs)
- goto out_free_cqs;
- ret = ida_simple_get(&cntlid_ida,
- NVME_CNTLID_MIN, NVME_CNTLID_MAX,
- GFP_KERNEL);
- if (ret < 0) {
- status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
- goto out_free_sqs;
- }
- ctrl->cntlid = ret;
- ctrl->ops = req->ops;
- if (ctrl->subsys->type == NVME_NQN_DISC) {
- /* Don't accept keep-alive timeout for discovery controllers */
- if (kato) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_free_sqs;
- }
- /*
- * Discovery controllers use some arbitrary high value in order
- * to cleanup stale discovery sessions
- *
- * From the latest base diff RC:
- * "The Keep Alive command is not supported by
- * Discovery controllers. A transport may specify a
- * fixed Discovery controller activity timeout value
- * (e.g., 2 minutes). If no commands are received
- * by a Discovery controller within that time
- * period, the controller may perform the
- * actions for Keep Alive Timer expiration".
- */
- ctrl->kato = NVMET_DISC_KATO;
- } else {
- /* keep-alive timeout in seconds */
- ctrl->kato = DIV_ROUND_UP(kato, 1000);
- }
- nvmet_start_keep_alive_timer(ctrl);
- mutex_lock(&subsys->lock);
- list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
- mutex_unlock(&subsys->lock);
- *ctrlp = ctrl;
- return 0;
- out_free_sqs:
- kfree(ctrl->sqs);
- out_free_cqs:
- kfree(ctrl->cqs);
- out_free_ctrl:
- kfree(ctrl);
- out_put_subsystem:
- nvmet_subsys_put(subsys);
- out:
- return status;
- }
- static void nvmet_ctrl_free(struct kref *ref)
- {
- struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
- struct nvmet_subsys *subsys = ctrl->subsys;
- nvmet_stop_keep_alive_timer(ctrl);
- mutex_lock(&subsys->lock);
- list_del(&ctrl->subsys_entry);
- mutex_unlock(&subsys->lock);
- flush_work(&ctrl->async_event_work);
- cancel_work_sync(&ctrl->fatal_err_work);
- ida_simple_remove(&cntlid_ida, ctrl->cntlid);
- nvmet_subsys_put(subsys);
- kfree(ctrl->sqs);
- kfree(ctrl->cqs);
- kfree(ctrl);
- }
- void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
- {
- kref_put(&ctrl->ref, nvmet_ctrl_free);
- }
- void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
- {
- mutex_lock(&ctrl->lock);
- if (!(ctrl->csts & NVME_CSTS_CFS)) {
- ctrl->csts |= NVME_CSTS_CFS;
- schedule_work(&ctrl->fatal_err_work);
- }
- mutex_unlock(&ctrl->lock);
- }
- EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
- static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
- const char *subsysnqn)
- {
- struct nvmet_subsys_link *p;
- if (!port)
- return NULL;
- if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
- NVMF_NQN_SIZE)) {
- if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
- return NULL;
- return nvmet_disc_subsys;
- }
- down_read(&nvmet_config_sem);
- list_for_each_entry(p, &port->subsystems, entry) {
- if (!strncmp(p->subsys->subsysnqn, subsysnqn,
- NVMF_NQN_SIZE)) {
- if (!kref_get_unless_zero(&p->subsys->ref))
- break;
- up_read(&nvmet_config_sem);
- return p->subsys;
- }
- }
- up_read(&nvmet_config_sem);
- return NULL;
- }
- struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
- enum nvme_subsys_type type)
- {
- struct nvmet_subsys *subsys;
- subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
- if (!subsys)
- return NULL;
- subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
- /* generate a random serial number as our controllers are ephemeral: */
- get_random_bytes(&subsys->serial, sizeof(subsys->serial));
- switch (type) {
- case NVME_NQN_NVME:
- subsys->max_qid = NVMET_NR_QUEUES;
- break;
- case NVME_NQN_DISC:
- subsys->max_qid = 0;
- break;
- default:
- pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
- kfree(subsys);
- return NULL;
- }
- subsys->type = type;
- subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
- GFP_KERNEL);
- if (!subsys->subsysnqn) {
- kfree(subsys);
- return NULL;
- }
- kref_init(&subsys->ref);
- mutex_init(&subsys->lock);
- INIT_LIST_HEAD(&subsys->namespaces);
- INIT_LIST_HEAD(&subsys->ctrls);
- INIT_LIST_HEAD(&subsys->hosts);
- return subsys;
- }
- static void nvmet_subsys_free(struct kref *ref)
- {
- struct nvmet_subsys *subsys =
- container_of(ref, struct nvmet_subsys, ref);
- WARN_ON_ONCE(!list_empty(&subsys->namespaces));
- kfree(subsys->subsysnqn);
- kfree(subsys);
- }
- void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
- {
- struct nvmet_ctrl *ctrl;
- mutex_lock(&subsys->lock);
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
- ctrl->ops->delete_ctrl(ctrl);
- mutex_unlock(&subsys->lock);
- }
- void nvmet_subsys_put(struct nvmet_subsys *subsys)
- {
- kref_put(&subsys->ref, nvmet_subsys_free);
- }
- static int __init nvmet_init(void)
- {
- int error;
- error = nvmet_init_discovery();
- if (error)
- goto out;
- error = nvmet_init_configfs();
- if (error)
- goto out_exit_discovery;
- return 0;
- out_exit_discovery:
- nvmet_exit_discovery();
- out:
- return error;
- }
- static void __exit nvmet_exit(void)
- {
- nvmet_exit_configfs();
- nvmet_exit_discovery();
- ida_destroy(&cntlid_ida);
- BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
- BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
- }
- module_init(nvmet_init);
- module_exit(nvmet_exit);
- MODULE_LICENSE("GPL v2");
|