summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r--drivers/nvme/host/pci.c32
1 files changed, 11 insertions, 21 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3d21a154dce7..3faefabf339c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
-static int nvme_init_iod(struct request *rq, unsigned size,
- struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
+ unsigned int size = blk_rq_payload_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
- int total_len)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
- int length = total_len;
+ int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
}
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
- unsigned size, struct nvme_command *cmnd)
+ struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- if (!nvme_setup_prps(dev, req, size))
+ if (!nvme_setup_prps(dev, req))
goto out_unmap;
ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
- unsigned map_len;
int ret = BLK_MQ_RQ_QUEUE_OK;
/*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- map_len = nvme_map_len(req);
- ret = nvme_init_iod(req, map_len, dev);
+ ret = nvme_init_iod(req, dev);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_free_cmd;
if (blk_rq_nr_phys_segments(req))
- ret = nvme_map_data(dev, req, map_len, &cmnd);
+ ret = nvme_map_data(dev, req, &cmnd);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_cleanup_iod;
@@ -712,15 +709,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
nvme_req(req)->result = cqe.result;
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
-
}
- /* If the controller ignores the cq head doorbell and continuously
- * writes to the queue, it is theoretically possible to wrap around
- * the queue twice and mistakenly return IRQ_NONE. Linux only
- * requires that 0.1% of your interrupts are handled, so this isn't
- * a big problem.
- */
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return;
@@ -1909,10 +1899,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (!dev->bar)
goto release;
- return 0;
+ return 0;
release:
- pci_release_mem_regions(pdev);
- return -ENODEV;
+ pci_release_mem_regions(pdev);
+ return -ENODEV;
}
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)