diff options
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/iscsi/iscsi_target.c | 11 | ||||
-rw-r--r-- | drivers/target/loopback/tcm_loop.c | 18 | ||||
-rw-r--r-- | drivers/target/target_core_alua.c | 20 | ||||
-rw-r--r-- | drivers/target/target_core_tmr.c | 209 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 45 |
5 files changed, 207 insertions, 96 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 6a4ea29c2f3..26a5d8b94ba 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1079,7 +1079,9 @@ attach_cmd: */ if (!cmd->immediate_data) { cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); - if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + return 0; + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) return iscsit_add_reject_from_cmd( ISCSI_REASON_PROTOCOL_ERROR, 1, 0, buf, cmd); @@ -1819,17 +1821,16 @@ attach: int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) out_of_order_cmdsn = 1; - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) return 0; - } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */ + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) return iscsit_add_reject_from_cmd( ISCSI_REASON_PROTOCOL_ERROR, 1, 0, buf, cmd); - } } iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); - if (out_of_order_cmdsn) + if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) return 0; /* * Found the referenced task, send to transport for processing. diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index aa2d6799723..fb85b35bd2f 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -174,6 +174,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) sgl_bidi = sdb->table.sgl; sgl_bidi_count = sdb->table.nents; } + /* + * Because some userspace code via scsi-generic do not memset their + * associated read buffers, go ahead and do that here for type + * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently + * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB + * by target core in transport_generic_allocate_tasks() -> + * transport_generic_cmd_sequencer(). + */ + if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && + se_cmd->data_direction == DMA_FROM_DEVICE) { + struct scatterlist *sg = scsi_sglist(sc); + unsigned char *buf = kmap(sg_page(sg)) + sg->offset; + + if (buf != NULL) { + memset(buf, 0, sg->length); + kunmap(sg_page(sg)); + } + } /* Tell the core about our preallocated memory */ ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 98c98a3a025..8badcb46943 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -68,6 +68,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) unsigned char *buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ + /* + * Need at least 4 bytes of response data or else we can't + * even fit the return data length. + */ + if (cmd->data_length < 4) { + pr_warn("REPORT TARGET PORT GROUPS allocation length %u" + " too small\n", cmd->data_length); + return -EINVAL; + } buf = transport_kmap_first_data_page(cmd); @@ -75,6 +84,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { /* + * Check if the Target port group and Target port descriptor list + * based on tg_pt_gp_members count will fit into the response payload. + * Otherwise, bump rd_len to let the initiator know we have exceeded + * the allocation length and the response is truncated. + */ + if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > + cmd->data_length) { + rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); + continue; + } + /* * PREF: Preferred target port bit, determine if this * bit should be set for port group. */ diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 27d4925e51c..5c1b8c599f6 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -67,15 +67,16 @@ void core_tmr_release_req( struct se_tmr_req *tmr) { struct se_device *dev = tmr->tmr_dev; + unsigned long flags; if (!dev) { kmem_cache_free(se_tmr_req_cache, tmr); return; } - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_del(&tmr->tmr_list); - spin_unlock_irq(&dev->se_tmr_lock); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); kmem_cache_free(se_tmr_req_cache, tmr); } @@ -100,54 +101,20 @@ static void core_tmr_handle_tas_abort( transport_cmd_finish_abort(cmd, 0); } -int core_tmr_lun_reset( +static void core_tmr_drain_tmr_list( struct se_device *dev, struct se_tmr_req *tmr, - struct list_head *preempt_and_abort_list, - struct se_cmd *prout_cmd) + struct list_head *preempt_and_abort_list) { - struct se_cmd *cmd, *tcmd; - struct se_node_acl *tmr_nacl = NULL; - struct se_portal_group *tmr_tpg = NULL; - struct se_queue_obj *qobj = &dev->dev_queue_obj; + LIST_HEAD(drain_tmr_list); struct se_tmr_req *tmr_p, *tmr_pp; - struct se_task *task, *task_tmp; + struct se_cmd *cmd; unsigned long flags; - int fe_count, tas; - /* - * TASK_ABORTED status bit, this is configurable via ConfigFS - * struct se_device attributes. spc4r17 section 7.4.6 Control mode page - * - * A task aborted status (TAS) bit set to zero specifies that aborted - * tasks shall be terminated by the device server without any response - * to the application client. A TAS bit set to one specifies that tasks - * aborted by the actions of an I_T nexus other than the I_T nexus on - * which the command was received shall be completed with TASK ABORTED - * status (see SAM-4). - */ - tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; - /* - * Determine if this se_tmr is coming from a $FABRIC_MOD - * or struct se_device passthrough.. - */ - if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; - tmr_tpg = tmr->task_cmd->se_sess->se_tpg; - if (tmr_nacl && tmr_tpg) { - pr_debug("LUN_RESET: TMR caller fabric: %s" - " initiator port %s\n", - tmr_tpg->se_tpg_tfo->get_fabric_name(), - tmr_nacl->initiatorname); - } - } - pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", - (preempt_and_abort_list) ? "Preempt" : "TMR", - dev->transport->name, tas); /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. */ - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { /* * Allow the received TMR to return with FUNCTION_COMPLETE. @@ -169,29 +136,48 @@ int core_tmr_lun_reset( (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; - spin_unlock_irq(&dev->se_tmr_lock); - spin_lock_irqsave(&cmd->t_state_lock, flags); + spin_lock(&cmd->t_state_lock); if (!atomic_read(&cmd->t_transport_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } + spin_unlock(&cmd->t_state_lock); + + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + while (!list_empty(&drain_tmr_list)) { + tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); + list_del(&tmr->tmr_list); + cmd = tmr->task_cmd; + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", - (preempt_and_abort_list) ? "Preempt" : "", tmr_p, - tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + (preempt_and_abort_list) ? "Preempt" : "", tmr, + tmr->function, tmr->response, cmd->t_state); transport_cmd_finish_abort_tmr(cmd); - spin_lock_irq(&dev->se_tmr_lock); } - spin_unlock_irq(&dev->se_tmr_lock); +} + +static void core_tmr_drain_task_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_task_list); + struct se_cmd *cmd; + struct se_task *task, *task_tmp; + unsigned long flags; + int fe_count; /* * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. * This is following sam4r17, section 5.6 Aborting commands, Table 38 @@ -236,9 +222,23 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - list_del(&task->t_state_list); + list_move_tail(&task->t_state_list, &drain_task_list); atomic_set(&task->task_state_active, 0); - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + /* + * Remove from task execute list before processing drain_task_list + */ + if (atomic_read(&task->task_execute_queue) != 0) { + list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); + atomic_dec(&dev->execute_tasks); + } + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + while (!list_empty(&drain_task_list)) { + task = list_entry(drain_task_list.next, struct se_task, t_state_list); + list_del(&task->t_state_list); + cmd = task->task_se_cmd; spin_lock_irqsave(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: %s cmd: %p task: %p" @@ -275,20 +275,14 @@ int core_tmr_lun_reset( atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); - } else { - if (atomic_read(&task->task_execute_queue) != 0) - transport_remove_task_from_execute_queue(task, dev); } __transport_stop_task_timer(task, &flags); if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, atomic_read(&cmd->t_task_cdbs_ex_left)); - - spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } fe_count = atomic_read(&cmd->t_fe_count); @@ -298,22 +292,31 @@ int core_tmr_lun_reset( " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); continue; } pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +static void core_tmr_drain_cmd_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_cmd_list); + struct se_queue_obj *qobj = &dev->dev_queue_obj; + struct se_cmd *cmd, *tcmd; + unsigned long flags; /* * Release all commands remaining in the struct se_device cmd queue. * @@ -337,11 +340,26 @@ int core_tmr_lun_reset( */ if (prout_cmd == cmd) continue; + /* + * Skip direct processing of TRANSPORT_FREE_CMD_INTR for + * HW target mode fabrics. + */ + spin_lock(&cmd->t_state_lock); + if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) { + spin_unlock(&cmd->t_state_lock); + continue; + } + spin_unlock(&cmd->t_state_lock); - atomic_dec(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 0); atomic_dec(&qobj->queue_cnt); - list_del(&cmd->se_queue_node); - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + list_move_tail(&cmd->se_queue_node, &drain_cmd_list); + } + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + while (!list_empty(&drain_cmd_list)) { + cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); + list_del_init(&cmd->se_queue_node); pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? @@ -354,9 +372,53 @@ int core_tmr_lun_reset( core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, atomic_read(&cmd->t_fe_count)); - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +} + +int core_tmr_lun_reset( + struct se_device *dev, + struct se_tmr_req *tmr, + struct list_head *preempt_and_abort_list, + struct se_cmd *prout_cmd) +{ + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; + /* + * Determine if this se_tmr is coming from a $FABRIC_MOD + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { + tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; + tmr_tpg = tmr->task_cmd->se_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", + tmr_tpg->se_tpg_tfo->get_fabric_name(), + tmr_nacl->initiatorname); + } + } + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); + core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); + core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); /* * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET @@ -379,3 +441,4 @@ int core_tmr_lun_reset( dev->transport->name); return 0; } + diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a4b0a8d27f2..013c1006adf 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -594,13 +594,14 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) return; - if (remove) + if (remove) { + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_generic_remove(cmd, 0); + } } void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) @@ -621,8 +622,6 @@ static void transport_add_cmd_to_queue( struct se_queue_obj *qobj = &dev->dev_queue_obj; unsigned long flags; - INIT_LIST_HEAD(&cmd->se_queue_node); - if (t_state) { spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = t_state; @@ -631,15 +630,21 @@ static void transport_add_cmd_to_queue( } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + + /* If the cmd is already on the list, remove it before we add it */ + if (!list_empty(&cmd->se_queue_node)) + list_del(&cmd->se_queue_node); + else + atomic_inc(&qobj->queue_cnt); + if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; list_add(&cmd->se_queue_node, &qobj->qobj_list); } else list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); - atomic_inc(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 1); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - atomic_inc(&qobj->queue_cnt); wake_up_interruptible(&qobj->thread_wq); } @@ -656,9 +661,9 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) } cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - atomic_dec(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 0); - list_del(&cmd->se_queue_node); + list_del_init(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -668,7 +673,6 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj) { - struct se_cmd *t; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -676,14 +680,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } - - list_for_each_entry(t, &qobj->qobj_list, se_queue_node) - if (t == cmd) { - atomic_dec(&cmd->t_transport_queue_active); - atomic_dec(&qobj->queue_cnt); - list_del(&cmd->se_queue_node); - break; - } + atomic_set(&cmd->t_transport_queue_active, 0); + atomic_dec(&qobj->queue_cnt); + list_del_init(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); if (atomic_read(&cmd->t_transport_queue_active)) { @@ -1067,7 +1066,7 @@ static void transport_release_all_cmds(struct se_device *dev) list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, se_queue_node) { t_state = cmd->t_state; - list_del(&cmd->se_queue_node); + list_del_init(&cmd->se_queue_node); spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); @@ -1598,6 +1597,7 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); + INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->t_task_list); init_completion(&cmd->transport_lun_fe_stop_comp); @@ -4920,6 +4920,15 @@ EXPORT_SYMBOL(transport_check_aborted_status); void transport_send_task_abort(struct se_cmd *cmd) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + /* * If there are still expected incoming fabric WRITEs, we wait * until until they have completed before sending a TASK_ABORTED |