Commit 0d738971 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-5.21-2020-02-02' of git://git.infradead.org/nvme into for-5.12/drivers

Pull NVMe updates from Christoph:

"nvme updates for 5.12:

 - failed reconnect fixes (Chao Leng)
 - various tracing improvements (Michal Krakowiak, Johannes Thumshirn)
 - switch the nvmet-fc assoc_list to use RCU protection (Leonid Ravich)
 - resync the status codes with the latest spec (Max Gurtovoy)
 - minor nvme-tcp improvements (Sagi Grimberg)
 - various cleanups (Rikard Falkeborn, Minwoo Im, Chaitanya Kulkarni,
   Israel Rukshin)"

* tag 'nvme-5.21-2020-02-02' of git://git.infradead.org/nvme: (22 commits)
  nvme-tcp: use cancel tagset helper for tear down
  nvme-rdma: use cancel tagset helper for tear down
  nvme-tcp: add clean action for failed reconnection
  nvme-rdma: add clean action for failed reconnection
  nvme-core: add cancel tagset helpers
  nvme-core: get rid of the extra space
  nvme: add tracing of zns commands
  nvme: parse format nvm command details when tracing
  nvme: update enumerations for status codes
  nvmet: add lba to sect conversion helpers
  nvmet: remove extra variable in identify ns
  nvmet: remove extra variable in id-desclist
  nvmet: remove extra variable in smart log nsid
  nvme: refactor ns->ctrl by request
  nvme-tcp: pass multipage bvec to request iov_iter
  nvme-tcp: get rid of unused helper function
  nvme-tcp: fix wrong setting of request iov_iter
  nvme: support command retry delay for admin command
  nvme: constify static attribute_group structs
  nvmet-fc: use RCU proctection for assoc_list
  ...
parents e8628013 563c8158
...@@ -279,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status) ...@@ -279,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status)
static void nvme_retry_req(struct request *req) static void nvme_retry_req(struct request *req)
{ {
struct nvme_ns *ns = req->q->queuedata;
unsigned long delay = 0; unsigned long delay = 0;
u16 crd; u16 crd;
/* The mask and shift result must be <= 3 */ /* The mask and shift result must be <= 3 */
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
if (ns && crd) if (crd)
delay = ns->ctrl->crdt[crd - 1] * 100; delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
nvme_req(req)->retries++; nvme_req(req)->retries++;
blk_mq_requeue_request(req, false); blk_mq_requeue_request(req, false);
...@@ -371,6 +370,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) ...@@ -371,6 +370,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
} }
EXPORT_SYMBOL_GPL(nvme_cancel_request); EXPORT_SYMBOL_GPL(nvme_cancel_request);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
{
if (ctrl->tagset) {
blk_mq_tagset_busy_iter(ctrl->tagset,
nvme_cancel_request, ctrl);
blk_mq_tagset_wait_completed_request(ctrl->tagset);
}
}
EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{
if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl);
blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
}
}
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state) enum nvme_ctrl_state new_state)
{ {
...@@ -842,11 +861,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, ...@@ -842,11 +861,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
void nvme_cleanup_cmd(struct request *req) void nvme_cleanup_cmd(struct request *req)
{ {
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
struct nvme_ns *ns = req->rq_disk->private_data; struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
struct page *page = req->special_vec.bv_page; struct page *page = req->special_vec.bv_page;
if (page == ns->ctrl->discard_page) if (page == ctrl->discard_page)
clear_bit_unlock(0, &ns->ctrl->discard_page_busy); clear_bit_unlock(0, &ctrl->discard_page_busy);
else else
kfree(page_address(page) + req->special_vec.bv_offset); kfree(page_address(page) + req->special_vec.bv_offset);
} }
...@@ -2859,7 +2878,7 @@ static struct attribute *nvme_subsys_attrs[] = { ...@@ -2859,7 +2878,7 @@ static struct attribute *nvme_subsys_attrs[] = {
NULL, NULL,
}; };
static struct attribute_group nvme_subsys_attrs_group = { static const struct attribute_group nvme_subsys_attrs_group = {
.attrs = nvme_subsys_attrs, .attrs = nvme_subsys_attrs,
}; };
...@@ -3694,7 +3713,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, ...@@ -3694,7 +3713,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return a->mode; return a->mode;
} }
static struct attribute_group nvme_dev_attrs_group = { static const struct attribute_group nvme_dev_attrs_group = {
.attrs = nvme_dev_attrs, .attrs = nvme_dev_attrs,
.is_visible = nvme_dev_attrs_are_visible, .is_visible = nvme_dev_attrs_are_visible,
}; };
...@@ -4449,7 +4468,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl) ...@@ -4449,7 +4468,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl)
struct nvme_effects_log *cel; struct nvme_effects_log *cel;
unsigned long i; unsigned long i;
xa_for_each (&ctrl->cels, i, cel) { xa_for_each(&ctrl->cels, i, cel) {
xa_erase(&ctrl->cels, i); xa_erase(&ctrl->cels, i);
kfree(cel); kfree(cel);
} }
......
...@@ -3789,7 +3789,7 @@ static struct attribute *nvme_fc_attrs[] = { ...@@ -3789,7 +3789,7 @@ static struct attribute *nvme_fc_attrs[] = {
NULL NULL
}; };
static struct attribute_group nvme_fc_attr_group = { static const struct attribute_group nvme_fc_attr_group = {
.attrs = nvme_fc_attrs, .attrs = nvme_fc_attrs,
}; };
......
...@@ -576,6 +576,8 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) ...@@ -576,6 +576,8 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
void nvme_complete_rq(struct request *req); void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved); bool nvme_cancel_request(struct request *req, void *data, bool reserved);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state); enum nvme_ctrl_state new_state);
bool nvme_wait_reset(struct nvme_ctrl *ctrl); bool nvme_wait_reset(struct nvme_ctrl *ctrl);
......
...@@ -919,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -919,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
error = nvme_init_identify(&ctrl->ctrl); error = nvme_init_identify(&ctrl->ctrl);
if (error) if (error)
goto out_stop_queue; goto out_quiesce_queue;
return 0; return 0;
out_quiesce_queue:
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue: out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue: out_cleanup_queue:
if (new) if (new)
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
...@@ -1001,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -1001,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
out_wait_freeze_timed_out: out_wait_freeze_timed_out:
nvme_stop_queues(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl); nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q: out_cleanup_connect_q:
nvme_cancel_tagset(&ctrl->ctrl);
if (new) if (new)
blk_cleanup_queue(ctrl->ctrl.connect_q); blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set: out_free_tag_set:
...@@ -1019,11 +1025,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -1019,11 +1025,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q); blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
if (ctrl->ctrl.admin_tagset) { nvme_cancel_admin_tagset(&ctrl->ctrl);
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
nvme_cancel_request, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
}
if (remove) if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove); nvme_rdma_destroy_admin_queue(ctrl, remove);
...@@ -1037,11 +1039,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, ...@@ -1037,11 +1039,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_stop_queues(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl); nvme_rdma_stop_io_queues(ctrl);
if (ctrl->ctrl.tagset) { nvme_cancel_tagset(&ctrl->ctrl);
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
nvme_cancel_request, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
}
if (remove) if (remove)
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove); nvme_rdma_destroy_io_queues(ctrl, remove);
...@@ -1144,10 +1142,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -1144,10 +1142,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return 0; return 0;
destroy_io: destroy_io:
if (ctrl->ctrl.queue_count > 1) if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, new); nvme_rdma_destroy_io_queues(ctrl, new);
}
destroy_admin: destroy_admin:
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, new); nvme_rdma_destroy_admin_queue(ctrl, new);
return ret; return ret;
} }
......
...@@ -206,11 +206,6 @@ static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) ...@@ -206,11 +206,6 @@ static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
req->pdu_len - req->pdu_sent); req->pdu_len - req->pdu_sent);
} }
static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
{
return req->iter.iov_offset;
}
static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
{ {
return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
...@@ -229,24 +224,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req, ...@@ -229,24 +224,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
struct request *rq = blk_mq_rq_from_pdu(req); struct request *rq = blk_mq_rq_from_pdu(req);
struct bio_vec *vec; struct bio_vec *vec;
unsigned int size; unsigned int size;
int nsegs; int nr_bvec;
size_t offset; size_t offset;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
vec = &rq->special_vec; vec = &rq->special_vec;
nsegs = 1; nr_bvec = 1;
size = blk_rq_payload_bytes(rq); size = blk_rq_payload_bytes(rq);
offset = 0; offset = 0;
} else { } else {
struct bio *bio = req->curr_bio; struct bio *bio = req->curr_bio;
struct bvec_iter bi;
struct bio_vec bv;
vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
nsegs = bio_segments(bio); nr_bvec = 0;
bio_for_each_bvec(bv, bio, bi) {
nr_bvec++;
}
size = bio->bi_iter.bi_size; size = bio->bi_iter.bi_size;
offset = bio->bi_iter.bi_bvec_done; offset = bio->bi_iter.bi_bvec_done;
} }
iov_iter_bvec(&req->iter, dir, vec, nsegs, size); iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
req->iter.iov_offset = offset; req->iter.iov_offset = offset;
} }
...@@ -983,7 +983,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) ...@@ -983,7 +983,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA; req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest) if (queue->data_digest)
crypto_ahash_init(queue->snd_hash); crypto_ahash_init(queue->snd_hash);
nvme_tcp_init_iter(req, WRITE);
} else { } else {
nvme_tcp_done_send_req(queue); nvme_tcp_done_send_req(queue);
} }
...@@ -1016,8 +1015,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) ...@@ -1016,8 +1015,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA; req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest) if (queue->data_digest)
crypto_ahash_init(queue->snd_hash); crypto_ahash_init(queue->snd_hash);
if (!req->data_sent)
nvme_tcp_init_iter(req, WRITE);
return 1; return 1;
} }
req->offset += ret; req->offset += ret;
...@@ -1815,8 +1812,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ...@@ -1815,8 +1812,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
out_wait_freeze_timed_out: out_wait_freeze_timed_out:
nvme_stop_queues(ctrl); nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl);
out_cleanup_connect_q: out_cleanup_connect_q:
nvme_cancel_tagset(ctrl);
if (new) if (new)
blk_cleanup_queue(ctrl->connect_q); blk_cleanup_queue(ctrl->connect_q);
out_free_tag_set: out_free_tag_set:
...@@ -1878,12 +1877,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) ...@@ -1878,12 +1877,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
error = nvme_init_identify(ctrl); error = nvme_init_identify(ctrl);
if (error) if (error)
goto out_stop_queue; goto out_quiesce_queue;
return 0; return 0;
out_quiesce_queue:
blk_mq_quiesce_queue(ctrl->admin_q);
blk_sync_queue(ctrl->admin_q);
out_stop_queue: out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0); nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
out_cleanup_queue: out_cleanup_queue:
if (new) if (new)
blk_cleanup_queue(ctrl->admin_q); blk_cleanup_queue(ctrl->admin_q);
...@@ -1904,11 +1907,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, ...@@ -1904,11 +1907,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
blk_mq_quiesce_queue(ctrl->admin_q); blk_mq_quiesce_queue(ctrl->admin_q);
blk_sync_queue(ctrl->admin_q); blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0); nvme_tcp_stop_queue(ctrl, 0);
if (ctrl->admin_tagset) { nvme_cancel_admin_tagset(ctrl);
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl);
blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
}
if (remove) if (remove)
blk_mq_unquiesce_queue(ctrl->admin_q); blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove); nvme_tcp_destroy_admin_queue(ctrl, remove);
...@@ -1924,11 +1923,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, ...@@ -1924,11 +1923,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_stop_queues(ctrl); nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl); nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl);
if (ctrl->tagset) { nvme_cancel_tagset(ctrl);
blk_mq_tagset_busy_iter(ctrl->tagset,
nvme_cancel_request, ctrl);
blk_mq_tagset_wait_completed_request(ctrl->tagset);
}
if (remove) if (remove)
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove); nvme_tcp_destroy_io_queues(ctrl, remove);
...@@ -2003,10 +1998,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) ...@@ -2003,10 +1998,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return 0; return 0;
destroy_io: destroy_io:
if (ctrl->queue_count > 1) if (ctrl->queue_count > 1) {
nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
nvme_tcp_destroy_io_queues(ctrl, new); nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin: destroy_admin:
blk_mq_quiesce_queue(ctrl->admin_q);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0); nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, new); nvme_tcp_destroy_admin_queue(ctrl, new);
return ret; return ret;
} }
...@@ -2268,12 +2271,12 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, ...@@ -2268,12 +2271,12 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_len = blk_rq_nr_phys_segments(rq) ? req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0; blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio; req->curr_bio = rq->bio;
if (req->curr_bio)
nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE && if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue)) req->data_len <= nvme_tcp_inline_data_size(queue))
req->pdu_len = req->data_len; req->pdu_len = req->data_len;
else if (req->curr_bio)
nvme_tcp_init_iter(req, READ);
pdu->hdr.type = nvme_tcp_cmd; pdu->hdr.type = nvme_tcp_cmd;
pdu->hdr.flags = 0; pdu->hdr.flags = 0;
......
...@@ -102,6 +102,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p, ...@@ -102,6 +102,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
return ret; return ret;
} }
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
u8 lbaf = cdw10[0] & 0xF;
u8 mset = (cdw10[0] >> 4) & 0x1;
u8 pi = (cdw10[0] >> 5) & 0x7;
u8 pil = cdw10[1] & 0x1;
u8 ses = (cdw10[1] >> 1) & 0x7;
trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u",
lbaf, mset, pi, pil, ses);
trace_seq_putc(p, 0);
return ret;
}
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
{ {
const char *ret = trace_seq_buffer_ptr(p); const char *ret = trace_seq_buffer_ptr(p);
...@@ -131,6 +148,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10) ...@@ -131,6 +148,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
return ret; return ret;
} }
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
u8 zsa = cdw10[12];
u8 all = cdw10[13];
trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
trace_seq_putc(p, 0);
return ret;
}
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
u32 numd = get_unaligned_le32(cdw10 + 8);
u8 zra = cdw10[12];
u8 zrasf = cdw10[13];
u8 pr = cdw10[14];
trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
slba, numd, zra, zrasf, pr);
trace_seq_putc(p, 0);
return ret;
}
static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10) static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
{ {
const char *ret = trace_seq_buffer_ptr(p); const char *ret = trace_seq_buffer_ptr(p);
...@@ -159,6 +205,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, ...@@ -159,6 +205,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
return nvme_trace_admin_get_features(p, cdw10); return nvme_trace_admin_get_features(p, cdw10);
case nvme_admin_get_lba_status: case nvme_admin_get_lba_status:
return nvme_trace_get_lba_status(p, cdw10); return nvme_trace_get_lba_status(p, cdw10);
case nvme_admin_format_nvm:
return nvme_trace_admin_format_nvm(p, cdw10);
default: default:
return nvme_trace_common(p, cdw10); return nvme_trace_common(p, cdw10);
} }
...@@ -171,9 +219,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, ...@@ -171,9 +219,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read: case nvme_cmd_read:
case nvme_cmd_write: case nvme_cmd_write:
case nvme_cmd_write_zeroes: case nvme_cmd_write_zeroes:
case nvme_cmd_zone_append:
return nvme_trace_read_write(p, cdw10); return nvme_trace_read_write(p, cdw10);
case nvme_cmd_dsm: case nvme_cmd_dsm:
return nvme_trace_dsm(p, cdw10); return nvme_trace_dsm(p, cdw10);
case nvme_cmd_zone_mgmt_send:
return nvme_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvme_trace_zone_mgmt_recv(p, cdw10);
default: default:
return nvme_trace_common(p, cdw10); return nvme_trace_common(p, cdw10);
} }
......
...@@ -74,11 +74,11 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) ...@@ -74,11 +74,11 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog) struct nvme_smart_log *slog)
{ {
struct nvmet_ns *ns;
u64 host_reads, host_writes, data_units_read, data_units_written; u64 host_reads, host_writes, data_units_read, data_units_written;
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); req->ns = nvmet_find_namespace(req->sq->ctrl,
if (!ns) { req->cmd->get_log_page.nsid);
if (!req->ns) {
pr_err("Could not find namespace id : %d\n", pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid)); le32_to_cpu(req->cmd->get_log_page.nsid));
req->error_loc = offsetof(struct nvme_rw_command, nsid); req->error_loc = offsetof(struct nvme_rw_command, nsid);
...@@ -86,22 +86,20 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, ...@@ -86,22 +86,20 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
} }
/* we don't have the right data for file backed ns */ /* we don't have the right data for file backed ns */
if (!ns->bdev) if (!req->ns->bdev)
goto out; return NVME_SC_SUCCESS;
host_reads = part_stat_read(ns->bdev, ios[READ]);