aboutsummaryrefslogtreecommitdiff
path: root/sys/cam/ctl/ctl.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cam/ctl/ctl.c')
-rw-r--r--sys/cam/ctl/ctl.c4198
1 files changed, 2401 insertions, 1797 deletions
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 1e9a952eaedb1..f3dbc183bf50d 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -83,20 +83,6 @@ __FBSDID("$FreeBSD$");
struct ctl_softc *control_softc = NULL;
/*
- * The default is to run with CTL_DONE_THREAD turned on. Completed
- * transactions are queued for processing by the CTL work thread. When
- * CTL_DONE_THREAD is not defined, completed transactions are processed in
- * the caller's context.
- */
-#define CTL_DONE_THREAD
-
-/*
- * Use the serial number and device ID provided by the backend, rather than
- * making up our own.
- */
-#define CTL_USE_BACKEND_SN
-
-/*
* Size and alignment macros needed for Copan-specific HA hardware. These
* can go away when the HA code is re-written, and uses busdma for any
* hardware.
@@ -296,8 +282,10 @@ static struct scsi_control_page control_page_default = {
/*rlec*/0,
/*queue_flags*/0,
/*eca_and_aen*/0,
- /*reserved*/0,
- /*aen_holdoff_period*/{0, 0}
+ /*flags4*/SCP_TAS,
+ /*aen_holdoff_period*/{0, 0},
+ /*busy_timeout_period*/{0, 0},
+ /*extended_selftest_completion_time*/{0, 0}
};
static struct scsi_control_page control_page_changeable = {
@@ -306,8 +294,10 @@ static struct scsi_control_page control_page_changeable = {
/*rlec*/SCP_DSENSE,
/*queue_flags*/0,
/*eca_and_aen*/0,
- /*reserved*/0,
- /*aen_holdoff_period*/{0, 0}
+ /*flags4*/0,
+ /*aen_holdoff_period*/{0, 0},
+ /*busy_timeout_period*/{0, 0},
+ /*extended_selftest_completion_time*/{0, 0}
};
@@ -321,20 +311,19 @@ static int ctl_is_single = 1;
static int index_to_aps_page;
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
-static int worker_threads = 1;
-TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
+static int worker_threads = -1;
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
&worker_threads, 1, "Number of worker threads");
static int verbose = 0;
-TUNABLE_INT("kern.cam.ctl.verbose", &verbose);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN,
&verbose, 0, "Show SCSI errors returned to initiator");
/*
- * Serial number (0x80), device id (0x83), supported pages (0x00),
- * Block limits (0xB0) and Logical Block Provisioning (0xB2)
+ * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
+ * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0) and
+ * Logical Block Provisioning (0xB2)
*/
-#define SCSI_EVPD_NUM_SUPPORTED_PAGES 5
+#define SCSI_EVPD_NUM_SUPPORTED_PAGES 7
static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
int param);
@@ -345,12 +334,10 @@ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
static void ctl_ioctl_online(void *arg);
static void ctl_ioctl_offline(void *arg);
-static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id);
-static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id);
static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
-static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock);
+static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
static int ctl_ioctl_submit_wait(union ctl_io *io);
static void ctl_ioctl_datamove(union ctl_io *io);
static void ctl_ioctl_done(union ctl_io *io);
@@ -362,8 +349,8 @@ static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
struct ctl_ooa_entry *kern_entries);
static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td);
-uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
-uint32_t ctl_port_idx(int port_num);
+static uint32_t ctl_map_lun(int port_num, uint32_t lun);
+static uint32_t ctl_map_lun_back(int port_num, uint32_t lun);
#ifdef unused
static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
uint32_t targ_target, uint32_t targ_lun,
@@ -392,6 +379,8 @@ static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
+ int alloc_len);
static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
int alloc_len);
static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
@@ -406,7 +395,7 @@ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
static int ctl_check_blocked(struct ctl_lun *lun);
static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc,
struct ctl_lun *lun,
- struct ctl_cmd_entry *entry,
+ const struct ctl_cmd_entry *entry,
struct ctl_scsiio *ctsio);
//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
static void ctl_failover(void);
@@ -420,7 +409,9 @@ static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
ctl_ua_type ua_type);
static int ctl_abort_task(union ctl_io *io);
-static void ctl_run_task_queue(struct ctl_softc *ctl_softc);
+static int ctl_abort_task_set(union ctl_io *io);
+static int ctl_i_t_nexus_reset(union ctl_io *io);
+static void ctl_run_task(union ctl_io *io);
#ifdef CTL_IO_DELAY
static void ctl_datamove_timer_wakeup(void *arg);
static void ctl_done_timer_wakeup(void *arg);
@@ -437,8 +428,19 @@ static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
ctl_ha_dt_cb callback);
static void ctl_datamove_remote_read(union ctl_io *io);
static void ctl_datamove_remote(union ctl_io *io);
-static int ctl_process_done(union ctl_io *io, int have_lock);
+static int ctl_process_done(union ctl_io *io);
+static void ctl_lun_thread(void *arg);
static void ctl_work_thread(void *arg);
+static void ctl_enqueue_incoming(union ctl_io *io);
+static void ctl_enqueue_rtr(union ctl_io *io);
+static void ctl_enqueue_done(union ctl_io *io);
+static void ctl_enqueue_isc(union ctl_io *io);
+static const struct ctl_cmd_entry *
+ ctl_get_cmd_entry(struct ctl_scsiio *ctsio);
+static const struct ctl_cmd_entry *
+ ctl_validate_command(struct ctl_scsiio *ctsio);
+static int ctl_cmd_applicable(uint8_t lun_type,
+ const struct ctl_cmd_entry *entry);
/*
* Load the serialization table. This isn't very pretty, but is probably
@@ -460,6 +462,7 @@ static struct cdevsw ctl_cdevsw = {
MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
+MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests");
static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
@@ -472,6 +475,11 @@ static moduledata_t ctl_moduledata = {
DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
MODULE_VERSION(ctl, 1);
+static struct ctl_frontend ioctl_frontend =
+{
+ .name = "ioctl",
+};
+
static void
ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
union ctl_ha_msg *msg_info)
@@ -496,8 +504,7 @@ ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
sizeof(ctsio->sense_data));
memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
&msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc((union ctl_io *)ctsio);
}
static void
@@ -543,8 +550,7 @@ ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
}
#endif
ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc((union ctl_io *)ctsio);
}
/*
@@ -579,7 +585,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
isc_status);
return;
}
- mtx_lock(&ctl_softc->ctl_lock);
switch (msg_info.hdr.msg_type) {
case CTL_MSG_SERIALIZE:
@@ -592,7 +597,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
"ctl_io!\n");
/* Bad Juju */
/* Need to set busy and send msg back */
- mtx_unlock(&ctl_softc->ctl_lock);
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
msg_info.hdr.status = CTL_SCSI_ERROR;
msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
@@ -634,18 +638,14 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
CTL_MAX_CDBLEN);
if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
- struct ctl_cmd_entry *entry;
- uint8_t opcode;
+ const struct ctl_cmd_entry *entry;
- opcode = io->scsiio.cdb[0];
- entry = &ctl_cmd_table[opcode];
+ entry = ctl_get_cmd_entry(&io->scsiio);
io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
io->io_hdr.flags |=
entry->flags & CTL_FLAG_DATA_MASK;
}
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
/* Performed on the Originating SC, XFER mode only */
@@ -749,11 +749,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
* the full S/G list. Queue processing in the thread.
* Otherwise wait for the next piece.
*/
- if (msg_info.dt.sg_last != 0) {
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
- }
+ if (msg_info.dt.sg_last != 0)
+ ctl_enqueue_isc(io);
break;
}
/* Performed on the Serializing (primary) SC, XFER mode only */
@@ -779,10 +776,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
io->scsiio.residual = msg_info.scsi.residual;
memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
sizeof(io->scsiio.sense_data));
-
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
}
@@ -791,7 +785,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
io = msg_info.hdr.original_sc;
if (io == NULL) {
printf("%s: Major Bummer\n", __func__);
- mtx_unlock(&ctl_softc->ctl_lock);
return;
} else {
#if 0
@@ -800,9 +793,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
}
io->io_hdr.msg_type = CTL_MSG_R2R;
io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
/*
@@ -839,9 +830,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
/* io = msg_info.hdr.serializing_sc; */
io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
/* Handle resets sent from the other side */
@@ -855,7 +844,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
/* Bad Juju */
/* should I just call the proper reset func
here??? */
- mtx_unlock(&ctl_softc->ctl_lock);
goto bailout;
}
ctl_zero_io((union ctl_io *)taskio);
@@ -872,10 +860,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
cs_prof_gettime(&taskio->io_hdr.start_ticks);
#endif
#endif /* CTL_TIME_IO */
- STAILQ_INSERT_TAIL(&ctl_softc->task_queue,
- &taskio->io_hdr, links);
- ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
- ctl_wakeup_thread();
+ ctl_run_task((union ctl_io *)taskio);
break;
}
/* Persistent Reserve action which needs attention */
@@ -887,15 +872,12 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
"ctl_io!\n");
/* Bad Juju */
/* Need to set busy and send msg back */
- mtx_unlock(&ctl_softc->ctl_lock);
goto bailout;
}
ctl_zero_io((union ctl_io *)presio);
presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
presio->pr_msg = msg_info.pr;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &presio->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc((union ctl_io *)presio);
break;
case CTL_MSG_SYNC_FE:
rcv_sync_msg = 1;
@@ -908,23 +890,21 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
struct copan_aps_subpage *current_sp;
uint32_t targ_lun;
- targ_lun = msg_info.hdr.nexus.targ_lun;
- if (msg_info.hdr.nexus.lun_map_fn != NULL)
- targ_lun = msg_info.hdr.nexus.lun_map_fn(msg_info.hdr.nexus.lun_map_arg, targ_lun);
-
+ targ_lun = msg_info.hdr.nexus.targ_mapped_lun;
lun = ctl_softc->ctl_luns[targ_lun];
+ mtx_lock(&lun->lun_lock);
page_index = &lun->mode_pages.index[index_to_aps_page];
current_sp = (struct copan_aps_subpage *)
(page_index->page_data +
(page_index->page_len * CTL_PAGE_CURRENT));
current_sp->lock_active = msg_info.aps.lock_flag;
+ mtx_unlock(&lun->lun_lock);
break;
}
default:
printf("How did I get here?\n");
}
- mtx_unlock(&ctl_softc->ctl_lock);
} else if (event == CTL_HA_EVT_MSG_SENT) {
if (param != CTL_HA_STATUS_SUCCESS) {
printf("Bad status from ctl_ha_msg_send status %d\n",
@@ -960,8 +940,7 @@ ctl_init(void)
{
struct ctl_softc *softc;
struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
- struct ctl_frontend *fe;
- struct ctl_lun *lun;
+ struct ctl_port *port;
uint8_t sc_id =0;
int i, error, retval;
//int isc_retval;
@@ -1040,22 +1019,11 @@ ctl_init(void)
softc->target.wwid[1] = 0x87654321;
STAILQ_INIT(&softc->lun_list);
STAILQ_INIT(&softc->pending_lun_queue);
- STAILQ_INIT(&softc->task_queue);
- STAILQ_INIT(&softc->incoming_queue);
- STAILQ_INIT(&softc->rtr_queue);
- STAILQ_INIT(&softc->done_queue);
- STAILQ_INIT(&softc->isc_queue);
STAILQ_INIT(&softc->fe_list);
+ STAILQ_INIT(&softc->port_list);
STAILQ_INIT(&softc->be_list);
STAILQ_INIT(&softc->io_pools);
- lun = &softc->lun;
-
- /*
- * We don't bother calling these with ctl_lock held here, because,
- * in theory, no one else can try to do anything while we're in our
- * module init routine.
- */
if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
&internal_pool)!= 0){
printf("ctl: can't allocate %d entry internal pool, "
@@ -1085,76 +1053,65 @@ ctl_init(void)
softc->emergency_pool = emergency_pool;
softc->othersc_pool = other_pool;
- /*
- * We used to allocate a processor LUN here. The new scheme is to
- * just let the user allocate LUNs as he sees fit.
- */
-#if 0
- mtx_lock(&softc->ctl_lock);
- ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target);
- mtx_unlock(&softc->ctl_lock);
-#endif
-
- if (worker_threads > MAXCPU || worker_threads == 0) {
- printf("invalid kern.cam.ctl.worker_threads value; "
- "setting to 1");
- worker_threads = 1;
- } else if (worker_threads < 0) {
- if (mp_ncpus > 2) {
- /*
- * Using more than two worker threads actually hurts
- * performance due to lock contention.
- */
- worker_threads = 2;
- } else {
- worker_threads = 1;
- }
- }
+ if (worker_threads <= 0)
+ worker_threads = max(1, mp_ncpus / 4);
+ if (worker_threads > CTL_MAX_THREADS)
+ worker_threads = CTL_MAX_THREADS;
for (i = 0; i < worker_threads; i++) {
- error = kproc_kthread_add(ctl_work_thread, softc,
- &softc->work_thread, NULL, 0, 0, "ctl", "work%d", i);
+ struct ctl_thread *thr = &softc->threads[i];
+
+ mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
+ thr->ctl_softc = softc;
+ STAILQ_INIT(&thr->incoming_queue);
+ STAILQ_INIT(&thr->rtr_queue);
+ STAILQ_INIT(&thr->done_queue);
+ STAILQ_INIT(&thr->isc_queue);
+
+ error = kproc_kthread_add(ctl_work_thread, thr,
+ &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
if (error != 0) {
printf("error creating CTL work thread!\n");
- mtx_lock(&softc->ctl_lock);
- ctl_free_lun(lun);
- mtx_unlock(&softc->ctl_lock);
ctl_pool_free(internal_pool);
ctl_pool_free(emergency_pool);
ctl_pool_free(other_pool);
return (error);
}
}
+ error = kproc_kthread_add(ctl_lun_thread, softc,
+ &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
+ if (error != 0) {
+ printf("error creating CTL lun thread!\n");
+ ctl_pool_free(internal_pool);
+ ctl_pool_free(emergency_pool);
+ ctl_pool_free(other_pool);
+ return (error);
+ }
if (bootverbose)
printf("ctl: CAM Target Layer loaded\n");
/*
- * Initialize the initiator and portname mappings
- */
- memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid));
-
- /*
* Initialize the ioctl front end.
*/
- fe = &softc->ioctl_info.fe;
- sprintf(softc->ioctl_info.port_name, "CTL ioctl");
- fe->port_type = CTL_PORT_IOCTL;
- fe->num_requested_ctl_io = 100;
- fe->port_name = softc->ioctl_info.port_name;
- fe->port_online = ctl_ioctl_online;
- fe->port_offline = ctl_ioctl_offline;
- fe->onoff_arg = &softc->ioctl_info;
- fe->targ_enable = ctl_ioctl_targ_enable;
- fe->targ_disable = ctl_ioctl_targ_disable;
- fe->lun_enable = ctl_ioctl_lun_enable;
- fe->lun_disable = ctl_ioctl_lun_disable;
- fe->targ_lun_arg = &softc->ioctl_info;
- fe->fe_datamove = ctl_ioctl_datamove;
- fe->fe_done = ctl_ioctl_done;
- fe->max_targets = 15;
- fe->max_target_id = 15;
+ ctl_frontend_register(&ioctl_frontend);
+ port = &softc->ioctl_info.port;
+ port->frontend = &ioctl_frontend;
+ sprintf(softc->ioctl_info.port_name, "ioctl");
+ port->port_type = CTL_PORT_IOCTL;
+ port->num_requested_ctl_io = 100;
+ port->port_name = softc->ioctl_info.port_name;
+ port->port_online = ctl_ioctl_online;
+ port->port_offline = ctl_ioctl_offline;
+ port->onoff_arg = &softc->ioctl_info;
+ port->lun_enable = ctl_ioctl_lun_enable;
+ port->lun_disable = ctl_ioctl_lun_disable;
+ port->targ_lun_arg = &softc->ioctl_info;
+ port->fe_datamove = ctl_ioctl_datamove;
+ port->fe_done = ctl_ioctl_done;
+ port->max_targets = 15;
+ port->max_target_id = 15;
- if (ctl_frontend_register(&softc->ioctl_info.fe,
+ if (ctl_port_register(&softc->ioctl_info.port,
(softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) {
printf("ctl: ioctl front end registration failed, will "
"continue anyway\n");
@@ -1180,7 +1137,7 @@ ctl_shutdown(void)
softc = (struct ctl_softc *)control_softc;
- if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0)
+ if (ctl_port_deregister(&softc->ioctl_info.port) != 0)
printf("ctl: ioctl front end deregistration failed\n");
mtx_lock(&softc->ctl_lock);
@@ -1195,6 +1152,8 @@ ctl_shutdown(void)
mtx_unlock(&softc->ctl_lock);
+ ctl_frontend_deregister(&ioctl_frontend);
+
/*
* This will rip the rug out from under any FETDs or anyone else
* that has a pool allocated. Since we increment our module
@@ -1208,6 +1167,7 @@ ctl_shutdown(void)
#if 0
ctl_shutdown_thread(softc->work_thread);
+ mtx_destroy(&softc->queue_lock);
#endif
mtx_destroy(&softc->pool_lock);
@@ -1258,7 +1218,7 @@ int
ctl_port_enable(ctl_port_type port_type)
{
struct ctl_softc *softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
if (ctl_is_single == 0) {
union ctl_ha_msg msg_info;
@@ -1287,13 +1247,13 @@ ctl_port_enable(ctl_port_type port_type)
softc = control_softc;
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
- if (port_type & fe->port_type)
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port_type & port->port_type)
{
#if 0
- printf("port %d\n", fe->targ_port);
+ printf("port %d\n", port->targ_port);
#endif
- ctl_frontend_online(fe);
+ ctl_port_online(port);
}
}
@@ -1304,13 +1264,13 @@ int
ctl_port_disable(ctl_port_type port_type)
{
struct ctl_softc *softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
softc = control_softc;
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
- if (port_type & fe->port_type)
- ctl_frontend_offline(fe);
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port_type & port->port_type)
+ ctl_port_offline(port);
}
return (0);
@@ -1328,7 +1288,7 @@ ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
ctl_port_type port_type, int no_virtual)
{
struct ctl_softc *softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
int entries_dropped, entries_filled;
int retval;
int i;
@@ -1341,14 +1301,14 @@ ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
i = 0;
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
struct ctl_port_entry *entry;
- if ((fe->port_type & port_type) == 0)
+ if ((port->port_type & port_type) == 0)
continue;
if ((no_virtual != 0)
- && (fe->virtual_port != 0))
+ && (port->virtual_port != 0))
continue;
if (entries_filled >= num_entries_alloced) {
@@ -1357,13 +1317,13 @@ ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
}
entry = &entries[i];
- entry->port_type = fe->port_type;
- strlcpy(entry->port_name, fe->port_name,
+ entry->port_type = port->port_type;
+ strlcpy(entry->port_name, port->port_name,
sizeof(entry->port_name));
- entry->physical_port = fe->physical_port;
- entry->virtual_port = fe->virtual_port;
- entry->wwnn = fe->wwnn;
- entry->wwpn = fe->wwpn;
+ entry->physical_port = port->physical_port;
+ entry->virtual_port = port->virtual_port;
+ entry->wwnn = port->wwnn;
+ entry->wwpn = port->wwpn;
i++;
entries_filled++;
@@ -1402,32 +1362,24 @@ ctl_ioctl_offline(void *arg)
/*
* Remove an initiator by port number and initiator ID.
- * Returns 0 for success, 1 for failure.
+ * Returns 0 for success, -1 for failure.
*/
int
-ctl_remove_initiator(int32_t targ_port, uint32_t iid)
+ctl_remove_initiator(struct ctl_port *port, int iid)
{
- struct ctl_softc *softc;
-
- softc = control_softc;
+ struct ctl_softc *softc = control_softc;
mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
- if ((targ_port < 0)
- || (targ_port > CTL_MAX_PORTS)) {
- printf("%s: invalid port number %d\n", __func__, targ_port);
- return (1);
- }
if (iid > CTL_MAX_INIT_PER_PORT) {
printf("%s: initiator ID %u > maximun %u!\n",
__func__, iid, CTL_MAX_INIT_PER_PORT);
- return (1);
+ return (-1);
}
mtx_lock(&softc->ctl_lock);
-
- softc->wwpn_iid[targ_port][iid].in_use = 0;
-
+ port->wwpn_iid[iid].in_use--;
+ port->wwpn_iid[iid].last_use = time_uptime;
mtx_unlock(&softc->ctl_lock);
return (0);
@@ -1435,41 +1387,91 @@ ctl_remove_initiator(int32_t targ_port, uint32_t iid)
/*
* Add an initiator to the initiator map.
- * Returns 0 for success, 1 for failure.
+ * Returns iid for success, < 0 for failure.
*/
int
-ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
+ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
{
- struct ctl_softc *softc;
- int retval;
-
- softc = control_softc;
+ struct ctl_softc *softc = control_softc;
+ time_t best_time;
+ int i, best;
mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
- retval = 0;
-
- if ((targ_port < 0)
- || (targ_port > CTL_MAX_PORTS)) {
- printf("%s: invalid port number %d\n", __func__, targ_port);
- return (1);
- }
- if (iid > CTL_MAX_INIT_PER_PORT) {
- printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n",
+ if (iid >= CTL_MAX_INIT_PER_PORT) {
+ printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
__func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
- return (1);
+ free(name, M_CTL);
+ return (-1);
}
mtx_lock(&softc->ctl_lock);
- if (softc->wwpn_iid[targ_port][iid].in_use != 0) {
+ if (iid < 0 && (wwpn != 0 || name != NULL)) {
+ for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+ if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
+ iid = i;
+ break;
+ }
+ if (name != NULL && port->wwpn_iid[i].name != NULL &&
+ strcmp(name, port->wwpn_iid[i].name) == 0) {
+ iid = i;
+ break;
+ }
+ }
+ }
+
+ if (iid < 0) {
+ for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+ if (port->wwpn_iid[i].in_use == 0 &&
+ port->wwpn_iid[i].wwpn == 0 &&
+ port->wwpn_iid[i].name == NULL) {
+ iid = i;
+ break;
+ }
+ }
+ }
+
+ if (iid < 0) {
+ best = -1;
+ best_time = INT32_MAX;
+ for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+ if (port->wwpn_iid[i].in_use == 0) {
+ if (port->wwpn_iid[i].last_use < best_time) {
+ best = i;
+ best_time = port->wwpn_iid[i].last_use;
+ }
+ }
+ }
+ iid = best;
+ }
+
+ if (iid < 0) {
+ mtx_unlock(&softc->ctl_lock);
+ free(name, M_CTL);
+ return (-2);
+ }
+
+ if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
/*
- * We don't treat this as an error.
+ * This is not an error yet.
*/
- if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) {
- printf("%s: port %d iid %u WWPN %#jx arrived again?\n",
- __func__, targ_port, iid, (uintmax_t)wwpn);
- goto bailout;
+ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
+#if 0
+ printf("%s: port %d iid %u WWPN %#jx arrived"
+ " again\n", __func__, port->targ_port,
+ iid, (uintmax_t)wwpn);
+#endif
+ goto take;
+ }
+ if (name != NULL && port->wwpn_iid[iid].name != NULL &&
+ strcmp(name, port->wwpn_iid[iid].name) == 0) {
+#if 0
+ printf("%s: port %d iid %u name '%s' arrived"
+ " again\n", __func__, port->targ_port,
+ iid, name);
+#endif
+ goto take;
}
/*
@@ -1477,42 +1479,80 @@ ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
* driver is telling us we have a new WWPN for this
* initiator ID, so we pretty much need to use it.
*/
- printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is "
- "still at that address\n", __func__, targ_port, iid,
- (uintmax_t)wwpn,
- (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn);
+ printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
+ " but WWPN %#jx '%s' is still at that address\n",
+ __func__, port->targ_port, iid, wwpn, name,
+ (uintmax_t)port->wwpn_iid[iid].wwpn,
+ port->wwpn_iid[iid].name);
/*
* XXX KDM clear have_ca and ua_pending on each LUN for
* this initiator.
*/
}
- softc->wwpn_iid[targ_port][iid].in_use = 1;
- softc->wwpn_iid[targ_port][iid].iid = iid;
- softc->wwpn_iid[targ_port][iid].wwpn = wwpn;
- softc->wwpn_iid[targ_port][iid].port = targ_port;
-
-bailout:
-
+take:
+ free(port->wwpn_iid[iid].name, M_CTL);
+ port->wwpn_iid[iid].name = name;
+ port->wwpn_iid[iid].wwpn = wwpn;
+ port->wwpn_iid[iid].in_use++;
mtx_unlock(&softc->ctl_lock);
- return (retval);
+ return (iid);
}
-/*
- * XXX KDM should we pretend to do something in the target/lun
- * enable/disable functions?
- */
static int
-ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id)
+ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
{
- return (0);
-}
+ int len;
-static int
-ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id)
-{
- return (0);
+ switch (port->port_type) {
+ case CTL_PORT_FC:
+ {
+ struct scsi_transportid_fcp *id =
+ (struct scsi_transportid_fcp *)buf;
+ if (port->wwpn_iid[iid].wwpn == 0)
+ return (0);
+ memset(id, 0, sizeof(*id));
+ id->format_protocol = SCSI_PROTO_FC;
+ scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
+ return (sizeof(*id));
+ }
+ case CTL_PORT_ISCSI:
+ {
+ struct scsi_transportid_iscsi_port *id =
+ (struct scsi_transportid_iscsi_port *)buf;
+ if (port->wwpn_iid[iid].name == NULL)
+ return (0);
+ memset(id, 0, 256);
+ id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
+ SCSI_PROTO_ISCSI;
+ len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
+ len = roundup2(min(len, 252), 4);
+ scsi_ulto2b(len, id->additional_length);
+ return (sizeof(*id) + len);
+ }
+ case CTL_PORT_SAS:
+ {
+ struct scsi_transportid_sas *id =
+ (struct scsi_transportid_sas *)buf;
+ if (port->wwpn_iid[iid].wwpn == 0)
+ return (0);
+ memset(id, 0, sizeof(*id));
+ id->format_protocol = SCSI_PROTO_SAS;
+ scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
+ return (sizeof(*id));
+ }
+ default:
+ {
+ struct scsi_transportid_spi *id =
+ (struct scsi_transportid_spi *)buf;
+ memset(id, 0, sizeof(*id));
+ id->format_protocol = SCSI_PROTO_SPI;
+ scsi_ulto2b(iid, id->scsi_addr);
+ scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
+ return (sizeof(*id));
+ }
+ }
}
static int
@@ -1699,7 +1739,7 @@ bailout:
* (SER_ONLY mode).
*/
static int
-ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
+ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
{
struct ctl_softc *ctl_softc;
union ctl_ha_msg msg_info;
@@ -1708,12 +1748,8 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
uint32_t targ_lun;
ctl_softc = control_softc;
- if (have_lock == 0)
- mtx_lock(&ctl_softc->ctl_lock);
- targ_lun = ctsio->io_hdr.nexus.targ_lun;
- if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
lun = ctl_softc->ctl_luns[targ_lun];
if (lun==NULL)
{
@@ -1742,12 +1778,11 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
}
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
return(1);
}
+ mtx_lock(&lun->lun_lock);
TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
@@ -1762,8 +1797,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
case CTL_ACTION_SKIP:
if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
- &ctsio->io_hdr, links);
+ ctl_enqueue_rtr((union ctl_io *)ctsio);
} else {
/* send msg back to other side */
@@ -1858,8 +1892,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
}
break;
}
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -2018,8 +2051,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
retval = 0;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
-
+ mtx_lock(&lun->lun_lock);
for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
(*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
ooa_links)) {
@@ -2056,6 +2088,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
}
+ mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -2080,40 +2113,40 @@ ctl_copyin_alloc(void *user_addr, int len, char *error_str,
}
static void
-ctl_free_args(int num_be_args, struct ctl_be_arg *be_args)
+ctl_free_args(int num_args, struct ctl_be_arg *args)
{
int i;
- if (be_args == NULL)
+ if (args == NULL)
return;
- for (i = 0; i < num_be_args; i++) {
- free(be_args[i].kname, M_CTL);
- free(be_args[i].kvalue, M_CTL);
+ for (i = 0; i < num_args; i++) {
+ free(args[i].kname, M_CTL);
+ free(args[i].kvalue, M_CTL);
}
- free(be_args, M_CTL);
+ free(args, M_CTL);
}
static struct ctl_be_arg *
-ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
+ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
char *error_str, size_t error_str_len)
{
struct ctl_be_arg *args;
int i;
- args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args),
+ args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
error_str, error_str_len);
if (args == NULL)
goto bailout;
- for (i = 0; i < num_be_args; i++) {
+ for (i = 0; i < num_args; i++) {
args[i].kname = NULL;
args[i].kvalue = NULL;
}
- for (i = 0; i < num_be_args; i++) {
+ for (i = 0; i < num_args; i++) {
uint8_t *tmpptr;
args[i].kname = ctl_copyin_alloc(args[i].name,
@@ -2127,31 +2160,43 @@ ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
goto bailout;
}
- args[i].kvalue = NULL;
-
- tmpptr = ctl_copyin_alloc(args[i].value,
- args[i].vallen, error_str, error_str_len);
- if (tmpptr == NULL)
- goto bailout;
-
- args[i].kvalue = tmpptr;
-
- if ((args[i].flags & CTL_BEARG_ASCII)
- && (tmpptr[args[i].vallen - 1] != '\0')) {
- snprintf(error_str, error_str_len, "Argument %d "
- "value is not NUL-terminated", i);
- goto bailout;
+ if (args[i].flags & CTL_BEARG_RD) {
+ tmpptr = ctl_copyin_alloc(args[i].value,
+ args[i].vallen, error_str, error_str_len);
+ if (tmpptr == NULL)
+ goto bailout;
+ if ((args[i].flags & CTL_BEARG_ASCII)
+ && (tmpptr[args[i].vallen - 1] != '\0')) {
+ snprintf(error_str, error_str_len, "Argument "
+ "%d value is not NUL-terminated", i);
+ goto bailout;
+ }
+ args[i].kvalue = tmpptr;
+ } else {
+ args[i].kvalue = malloc(args[i].vallen,
+ M_CTL, M_WAITOK | M_ZERO);
}
}
return (args);
bailout:
- ctl_free_args(num_be_args, args);
+ ctl_free_args(num_args, args);
return (NULL);
}
+static void
+ctl_copyout_args(int num_args, struct ctl_be_arg *args)
+{
+ int i;
+
+ for (i = 0; i < num_args; i++) {
+ if (args[i].flags & CTL_BEARG_WR)
+ copyout(args[i].kvalue, args[i].value, args[i].vallen);
+ }
+}
+
/*
* Escape characters that are illegal or not recommended in XML.
*/
@@ -2207,14 +2252,14 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* to this FETD.
*/
if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
- retval = -EPERM;
+ retval = EPERM;
break;
}
- io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref);
+ io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref);
if (io == NULL) {
printf("ctl_ioctl: can't allocate ctl_io!\n");
- retval = -ENOSPC;
+ retval = ENOSPC;
break;
}
@@ -2235,7 +2280,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
/*
* The user sets the initiator ID, target and LUN IDs.
*/
- io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port;
+ io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port;
io->io_hdr.flags |= CTL_FLAG_USER_REQ;
if ((io->io_hdr.io_type == CTL_IO_SCSI)
&& (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
@@ -2258,20 +2303,20 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
case CTL_ENABLE_PORT:
case CTL_DISABLE_PORT:
case CTL_SET_PORT_WWNS: {
- struct ctl_frontend *fe;
+ struct ctl_port *port;
struct ctl_port_entry *entry;
entry = (struct ctl_port_entry *)addr;
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
int action, done;
action = 0;
done = 0;
if ((entry->port_type == CTL_PORT_NONE)
- && (entry->targ_port == fe->targ_port)) {
+ && (entry->targ_port == port->targ_port)) {
/*
* If the user only wants to enable or
* disable or set WWNs on a specific port,
@@ -2279,7 +2324,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
*/
action = 1;
done = 1;
- } else if (entry->port_type & fe->port_type) {
+ } else if (entry->port_type & port->port_type) {
/*
* Compare the user's type mask with the
* particular frontend type to see if we
@@ -2314,21 +2359,21 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
STAILQ_FOREACH(lun, &softc->lun_list,
links) {
- fe->lun_enable(fe->targ_lun_arg,
+ port->lun_enable(port->targ_lun_arg,
lun->target,
lun->lun);
}
- ctl_frontend_online(fe);
+ ctl_port_online(port);
} else if (cmd == CTL_DISABLE_PORT) {
struct ctl_lun *lun;
- ctl_frontend_offline(fe);
+ ctl_port_offline(port);
STAILQ_FOREACH(lun, &softc->lun_list,
links) {
- fe->lun_disable(
- fe->targ_lun_arg,
+ port->lun_disable(
+ port->targ_lun_arg,
lun->target,
lun->lun);
}
@@ -2337,7 +2382,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
mtx_lock(&softc->ctl_lock);
if (cmd == CTL_SET_PORT_WWNS)
- ctl_frontend_set_wwns(fe,
+ ctl_port_set_wwns(port,
(entry->flags & CTL_PORT_WWNN_VALID) ?
1 : 0, entry->wwnn,
(entry->flags & CTL_PORT_WWPN_VALID) ?
@@ -2350,7 +2395,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_GET_PORT_LIST: {
- struct ctl_frontend *fe;
+ struct ctl_port *port;
struct ctl_port_list *list;
int i;
@@ -2370,7 +2415,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
list->dropped_num = 0;
i = 0;
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
struct ctl_port_entry entry, *list_entry;
if (list->fill_num >= list->alloc_num) {
@@ -2378,15 +2423,15 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
continue;
}
- entry.port_type = fe->port_type;
- strlcpy(entry.port_name, fe->port_name,
+ entry.port_type = port->port_type;
+ strlcpy(entry.port_name, port->port_name,
sizeof(entry.port_name));
- entry.targ_port = fe->targ_port;
- entry.physical_port = fe->physical_port;
- entry.virtual_port = fe->virtual_port;
- entry.wwnn = fe->wwnn;
- entry.wwpn = fe->wwpn;
- if (fe->status & CTL_PORT_STATUS_ONLINE)
+ entry.targ_port = port->targ_port;
+ entry.physical_port = port->physical_port;
+ entry.virtual_port = port->virtual_port;
+ entry.wwnn = port->wwnn;
+ entry.wwpn = port->wwpn;
+ if (port->status & CTL_PORT_STATUS_ONLINE)
entry.online = 1;
else
entry.online = 0;
@@ -2428,6 +2473,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
mtx_lock(&softc->ctl_lock);
printf("Dumping OOA queues:\n");
STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
for (io = (union ctl_io *)TAILQ_FIRST(
&lun->ooa_queue); io != NULL;
io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
@@ -2449,6 +2495,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
sbuf_finish(&sb);
printf("%s\n", sbuf_data(&sb));
}
+ mtx_unlock(&lun->lun_lock);
}
printf("OOA queues dump done\n");
mtx_unlock(&softc->ctl_lock);
@@ -2564,15 +2611,16 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
ooa_info->status = CTL_OOA_INVALID_LUN;
break;
}
-
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
ooa_info->num_entries = 0;
for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
io != NULL; io = (union ctl_io *)TAILQ_NEXT(
&io->io_hdr, ooa_links)) {
ooa_info->num_entries++;
}
+ mtx_unlock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
ooa_info->status = CTL_OOA_SUCCESS;
break;
@@ -2690,6 +2738,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
} else {
lun = softc->ctl_luns[delay_info->lun_id];
+ mtx_lock(&lun->lun_lock);
delay_info->status = CTL_DELAY_STATUS_OK;
@@ -2722,6 +2771,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
CTL_DELAY_STATUS_INVALID_LOC;
break;
}
+ mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
@@ -2744,7 +2794,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
softc->flags |= CTL_FLAG_REAL_SYNC;
break;
default:
- retval = -EINVAL;
+ retval = EINVAL;
break;
}
mtx_unlock(&softc->ctl_lock);
@@ -2782,12 +2832,13 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* in the set case, hopefully the user won't do something
* silly.
*/
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
if (cmd == CTL_GETSYNC)
sync_info->sync_interval = lun->sync_interval;
else
lun->sync_interval = sync_info->sync_interval;
-
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
sync_info->status = CTL_GS_SYNC_OK;
@@ -2848,6 +2899,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = EINVAL;
break;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
/*
* We could do some checking here to verify the validity
@@ -2870,7 +2923,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
err_desc->serial = lun->error_serial;
lun->error_serial++;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
break;
}
case CTL_ERROR_INJECT_DELETE: {
@@ -2890,6 +2943,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = EINVAL;
break;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
if (desc->serial != delete_desc->serial)
continue;
@@ -2899,7 +2954,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
free(desc, M_CTL);
delete_done = 1;
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
if (delete_done == 0) {
printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
"error serial %ju on LUN %u\n", __func__,
@@ -2910,22 +2965,11 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_DUMP_STRUCTS: {
- int i, j, k;
+ int i, j, k, idx;
+ struct ctl_port *port;
struct ctl_frontend *fe;
- printf("CTL IID to WWPN map start:\n");
- for (i = 0; i < CTL_MAX_PORTS; i++) {
- for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
- if (softc->wwpn_iid[i][j].in_use == 0)
- continue;
-
- printf("port %d iid %u WWPN %#jx\n",
- softc->wwpn_iid[i][j].port,
- softc->wwpn_iid[i][j].iid,
- (uintmax_t)softc->wwpn_iid[i][j].wwpn);
- }
- }
- printf("CTL IID to WWPN map end\n");
+ mtx_lock(&softc->ctl_lock);
printf("CTL Persistent Reservation information start:\n");
for (i = 0; i < CTL_MAX_LUNS; i++) {
struct ctl_lun *lun;
@@ -2938,36 +2982,48 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
- if (lun->per_res[j+k].registered == 0)
+ idx = j * CTL_MAX_INIT_PER_PORT + k;
+ if (lun->per_res[idx].registered == 0)
continue;
- printf("LUN %d port %d iid %d key "
+ printf(" LUN %d port %d iid %d key "
"%#jx\n", i, j, k,
(uintmax_t)scsi_8btou64(
- lun->per_res[j+k].res_key.key));
+ lun->per_res[idx].res_key.key));
}
}
}
printf("CTL Persistent Reservation information end\n");
- printf("CTL Frontends:\n");
+ printf("CTL Ports:\n");
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
+ "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
+ port->frontend->name, port->port_type,
+ port->physical_port, port->virtual_port,
+ (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
+ for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
+ if (port->wwpn_iid[j].in_use == 0 &&
+ port->wwpn_iid[j].wwpn == 0 &&
+ port->wwpn_iid[j].name == NULL)
+ continue;
+
+ printf(" iid %u use %d WWPN %#jx '%s'\n",
+ j, port->wwpn_iid[j].in_use,
+ (uintmax_t)port->wwpn_iid[j].wwpn,
+ port->wwpn_iid[j].name);
+ }
+ }
+ printf("CTL Port information end\n");
+ mtx_unlock(&softc->ctl_lock);
/*
* XXX KDM calling this without a lock. We'd likely want
* to drop the lock before calling the frontend's dump
* routine anyway.
*/
+ printf("CTL Frontends:\n");
STAILQ_FOREACH(fe, &softc->fe_list, links) {
- printf("Frontend %s Type %u pport %d vport %d WWNN "
- "%#jx WWPN %#jx\n", fe->port_name, fe->port_type,
- fe->physical_port, fe->virtual_port,
- (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn);
-
- /*
- * Frontends are not required to support the dump
- * routine.
- */
- if (fe->fe_dump == NULL)
- continue;
-
- fe->fe_dump();
+ printf(" Frontend '%s'\n", fe->name);
+ if (fe->fe_dump != NULL)
+ fe->fe_dump();
}
printf("CTL Frontend information end\n");
break;
@@ -3002,6 +3058,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = backend->ioctl(dev, cmd, addr, flag, td);
if (lun_req->num_be_args > 0) {
+ ctl_copyout_args(lun_req->num_be_args,
+ lun_req->kern_be_args);
ctl_free_args(lun_req->num_be_args,
lun_req->kern_be_args);
}
@@ -3011,7 +3069,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct sbuf *sb;
struct ctl_lun *lun;
struct ctl_lun_list *list;
- struct ctl_be_lun_option *opt;
+ struct ctl_option *opt;
list = (struct ctl_lun_list *)addr;
@@ -3052,8 +3110,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
sbuf_printf(sb, "<ctllunlist>\n");
mtx_lock(&softc->ctl_lock);
-
STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
(uintmax_t)lun->lun);
@@ -3064,7 +3122,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<backend_type>%s"
+ retval = sbuf_printf(sb, "\t<backend_type>%s"
"</backend_type>\n",
(lun->backend == NULL) ? "none" :
lun->backend->name);
@@ -3072,7 +3130,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n",
+ retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
lun->be_lun->lun_type);
if (retval != 0)
@@ -3085,20 +3143,20 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
continue;
}
- retval = sbuf_printf(sb, "<size>%ju</size>\n",
+ retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
(lun->be_lun->maxlba > 0) ?
lun->be_lun->maxlba + 1 : 0);
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n",
+ retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
lun->be_lun->blocksize);
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<serial_number>");
+ retval = sbuf_printf(sb, "\t<serial_number>");
if (retval != 0)
break;
@@ -3114,7 +3172,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<device_id>");
+ retval = sbuf_printf(sb, "\t<device_id>");
if (retval != 0)
break;
@@ -3135,7 +3193,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
- retval = sbuf_printf(sb, "<%s>%s</%s>", opt->name, opt->value, opt->name);
+ retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
+ opt->name, opt->value, opt->name);
if (retval != 0)
break;
}
@@ -3144,7 +3203,10 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
+ mtx_unlock(&lun->lun_lock);
}
+ if (lun != NULL)
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
if ((retval != 0)
@@ -3174,20 +3236,155 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
ci = (struct ctl_iscsi *)addr;
+ fe = ctl_frontend_find("iscsi");
+ if (fe == NULL) {
+ ci->status = CTL_ISCSI_ERROR;
+ snprintf(ci->error_str, sizeof(ci->error_str),
+ "Frontend \"iscsi\" not found.");
+ break;
+ }
+
+ retval = fe->ioctl(dev, cmd, addr, flag, td);
+ break;
+ }
+ case CTL_PORT_REQ: {
+ struct ctl_req *req;
+ struct ctl_frontend *fe;
+
+ req = (struct ctl_req *)addr;
+
+ fe = ctl_frontend_find(req->driver);
+ if (fe == NULL) {
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "Frontend \"%s\" not found.", req->driver);
+ break;
+ }
+ if (req->num_args > 0) {
+ req->kern_args = ctl_copyin_args(req->num_args,
+ req->args, req->error_str, sizeof(req->error_str));
+ if (req->kern_args == NULL) {
+ req->status = CTL_LUN_ERROR;
+ break;
+ }
+ }
+
+ retval = fe->ioctl(dev, cmd, addr, flag, td);
+
+ if (req->num_args > 0) {
+ ctl_copyout_args(req->num_args, req->kern_args);
+ ctl_free_args(req->num_args, req->kern_args);
+ }
+ break;
+ }
+ case CTL_PORT_LIST: {
+ struct sbuf *sb;
+ struct ctl_port *port;
+ struct ctl_lun_list *list;
+ struct ctl_option *opt;
+
+ list = (struct ctl_lun_list *)addr;
+
+ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
+ if (sb == NULL) {
+ list->status = CTL_LUN_LIST_ERROR;
+ snprintf(list->error_str, sizeof(list->error_str),
+ "Unable to allocate %d bytes for LUN list",
+ list->alloc_len);
+ break;
+ }
+
+ sbuf_printf(sb, "<ctlportlist>\n");
+
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
- if (strcmp(fe->port_name, "iscsi") == 0)
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
+ (uintmax_t)port->targ_port);
+
+ /*
+ * Bail out as soon as we see that we've overfilled
+ * the buffer.
+ */
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<frontend_type>%s"
+ "</frontend_type>\n", port->frontend->name);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
+ port->port_type);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<online>%s</online>\n",
+ (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
+ port->port_name);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
+ port->physical_port);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
+ port->virtual_port);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<wwnn>%#jx</wwnn>\n",
+ (uintmax_t)port->wwnn);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<wwpn>%#jx</wwpn>\n",
+ (uintmax_t)port->wwpn);
+ if (retval != 0)
+ break;
+
+ if (port->port_info != NULL) {
+ retval = port->port_info(port->onoff_arg, sb);
+ if (retval != 0)
+ break;
+ }
+ STAILQ_FOREACH(opt, &port->options, links) {
+ retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
+ opt->name, opt->value, opt->name);
+ if (retval != 0)
+ break;
+ }
+
+ retval = sbuf_printf(sb, "</targ_port>\n");
+ if (retval != 0)
break;
}
mtx_unlock(&softc->ctl_lock);
- if (fe == NULL) {
- ci->status = CTL_ISCSI_ERROR;
- snprintf(ci->error_str, sizeof(ci->error_str), "Backend \"iscsi\" not found.");
+ if ((retval != 0)
+ || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
+ retval = 0;
+ sbuf_delete(sb);
+ list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
+ snprintf(list->error_str, sizeof(list->error_str),
+ "Out of space, %d bytes is too small",
+ list->alloc_len);
break;
}
- retval = fe->ioctl(dev, cmd, addr, flag, td);
+ sbuf_finish(sb);
+
+ retval = copyout(sbuf_data(sb), list->lun_xml,
+ sbuf_len(sb) + 1);
+
+ list->fill_len = sbuf_len(sb) + 1;
+ list->status = CTL_LUN_LIST_OK;
+ sbuf_delete(sb);
break;
}
default: {
@@ -3215,7 +3412,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (found == 0) {
printf("ctl: unknown ioctl command %#lx or backend "
"%d\n", cmd, type);
- retval = -EINVAL;
+ retval = EINVAL;
break;
}
retval = backend->ioctl(dev, cmd, addr, flag, td);
@@ -3254,6 +3451,35 @@ ctl_port_idx(int port_num)
return(port_num - CTL_MAX_PORTS);
}
+static uint32_t
+ctl_map_lun(int port_num, uint32_t lun_id)
+{
+ struct ctl_port *port;
+
+ port = control_softc->ctl_ports[ctl_port_idx(port_num)];
+ if (port == NULL)
+ return (UINT32_MAX);
+ if (port->lun_map == NULL)
+ return (lun_id);
+ return (port->lun_map(port->targ_lun_arg, lun_id));
+}
+
+static uint32_t
+ctl_map_lun_back(int port_num, uint32_t lun_id)
+{
+ struct ctl_port *port;
+ uint32_t i;
+
+ port = control_softc->ctl_ports[ctl_port_idx(port_num)];
+ if (port->lun_map == NULL)
+ return (lun_id);
+ for (i = 0; i < CTL_MAX_LUNS; i++) {
+ if (port->lun_map(port->targ_lun_arg, i) == lun_id)
+ return (i);
+ }
+ return (UINT32_MAX);
+}
+
/*
* Note: This only works for bitmask sizes that are at least 32 bits, and
* that are a power of 2.
@@ -3380,7 +3606,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
M_NOWAIT | M_ZERO);
if (pool == NULL) {
- retval = -ENOMEM;
+ retval = ENOMEM;
goto bailout;
}
@@ -3403,7 +3629,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
* tracking.
*/
for (i = 0; i < total_ctl_io; i++) {
- cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL,
+ cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO,
M_NOWAIT);
if (cur_io == NULL) {
retval = ENOMEM;
@@ -3422,7 +3648,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
links);
STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
ctl_io_hdr, links);
- free(cur_io, M_CTL);
+ free(cur_io, M_CTLIO);
}
free(pool, M_CTL);
@@ -3442,7 +3668,6 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
#if 0
if ((pool_type != CTL_POOL_EMERGENCY)
&& (pool_type != CTL_POOL_INTERNAL)
- && (pool_type != CTL_POOL_IOCTL)
&& (pool_type != CTL_POOL_4OTHERSC))
MOD_INC_USE_COUNT;
#endif
@@ -3463,7 +3688,7 @@ ctl_pool_acquire(struct ctl_io_pool *pool)
mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
if (pool->flags & CTL_POOL_FLAG_INVALID)
- return (-EINVAL);
+ return (EINVAL);
pool->refcount++;
@@ -3484,7 +3709,7 @@ ctl_pool_release(struct ctl_io_pool *pool)
while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
links);
- free(io, M_CTL);
+ free(io, M_CTLIO);
}
STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
@@ -3496,7 +3721,7 @@ ctl_pool_release(struct ctl_io_pool *pool)
#if 0
if ((pool->type != CTL_POOL_EMERGENCY)
&& (pool->type != CTL_POOL_INTERNAL)
- && (pool->type != CTL_POOL_IOCTL))
+ && (pool->type != CTL_POOL_4OTHERSC))
MOD_DEC_USE_COUNT;
#endif
@@ -3588,7 +3813,7 @@ ctl_alloc_io(void *pool_ref)
* The emergency pool (if it exists) didn't have one, so try an
* atomic (i.e. nonblocking) malloc and see if we get lucky.
*/
- io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
+ io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT);
if (io != NULL) {
/*
* If the emergency pool exists but is empty, add this
@@ -3626,49 +3851,9 @@ ctl_free_io(union ctl_io *io)
*/
if (io->io_hdr.pool != NULL) {
struct ctl_io_pool *pool;
-#if 0
- struct ctl_softc *ctl_softc;
- union ctl_io *tmp_io;
- unsigned long xflags;
- int i;
-
- ctl_softc = control_softc;
-#endif
pool = (struct ctl_io_pool *)io->io_hdr.pool;
-
mtx_lock(&pool->ctl_softc->pool_lock);
-#if 0
- save_flags(xflags);
-
- for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST(
- &ctl_softc->task_queue); tmp_io != NULL; i++,
- tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr,
- links)) {
- if (tmp_io == io) {
- printf("%s: %p is still on the task queue!\n",
- __func__, tmp_io);
- printf("%s: (%d): type %d "
- "msg %d cdb %x iptl: "
- "%d:%d:%d:%d tag 0x%04x "
- "flg %#lx\n",
- __func__, i,
- tmp_io->io_hdr.io_type,
- tmp_io->io_hdr.msg_type,
- tmp_io->scsiio.cdb[0],
- tmp_io->io_hdr.nexus.initid.id,
- tmp_io->io_hdr.nexus.targ_port,
- tmp_io->io_hdr.nexus.targ_target.id,
- tmp_io->io_hdr.nexus.targ_lun,
- (tmp_io->io_hdr.io_type ==
- CTL_IO_TASK) ?
- tmp_io->taskio.tag_num :
- tmp_io->scsiio.tag_num,
- xflags);
- panic("I/O still on the task queue!");
- }
- }
-#endif
io->io_hdr.io_type = 0xff;
STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
pool->total_freed++;
@@ -3680,7 +3865,7 @@ ctl_free_io(union ctl_io *io)
* Otherwise, just free it. We probably malloced it and
* the emergency pool wasn't available.
*/
- free(io, M_CTL);
+ free(io, M_CTLIO);
}
}
@@ -4242,8 +4427,12 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
struct ctl_be_lun *const be_lun, struct ctl_id target_id)
{
struct ctl_lun *nlun, *lun;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
+ struct scsi_vpd_id_descriptor *desc;
+ struct scsi_vpd_id_t10 *t10id;
+ const char *eui, *naa, *scsiname, *vendor;
int lun_number, i, lun_malloced;
+ int devidlen, idlen1, idlen2 = 0, len;
if (be_lun == NULL)
return (EINVAL);
@@ -4275,6 +4464,69 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
if (lun_malloced)
lun->flags = CTL_LUN_MALLOCED;
+ /* Generate LUN ID. */
+ devidlen = max(CTL_DEVID_MIN_LEN,
+ strnlen(be_lun->device_id, CTL_DEVID_LEN));
+ idlen1 = sizeof(*t10id) + devidlen;
+ len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
+ scsiname = ctl_get_opt(&be_lun->options, "scsiname");
+ if (scsiname != NULL) {
+ idlen2 = roundup2(strlen(scsiname) + 1, 4);
+ len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
+ }
+ eui = ctl_get_opt(&be_lun->options, "eui");
+ if (eui != NULL) {
+ len += sizeof(struct scsi_vpd_id_descriptor) + 8;
+ }
+ naa = ctl_get_opt(&be_lun->options, "naa");
+ if (naa != NULL) {
+ len += sizeof(struct scsi_vpd_id_descriptor) + 8;
+ }
+ lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
+ M_CTL, M_WAITOK | M_ZERO);
+ lun->lun_devid->len = len;
+ desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
+ desc->proto_codeset = SVPD_ID_CODESET_ASCII;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
+ desc->length = idlen1;
+ t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
+ memset(t10id->vendor, ' ', sizeof(t10id->vendor));
+ if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
+ strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
+ } else {
+ strncpy(t10id->vendor, vendor,
+ min(sizeof(t10id->vendor), strlen(vendor)));
+ }
+ strncpy((char *)t10id->vendor_spec_id,
+ (char *)be_lun->device_id, devidlen);
+ if (scsiname != NULL) {
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ desc->length);
+ desc->proto_codeset = SVPD_ID_CODESET_UTF8;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+ SVPD_ID_TYPE_SCSI_NAME;
+ desc->length = idlen2;
+ strlcpy(desc->identifier, scsiname, idlen2);
+ }
+ if (eui != NULL) {
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ desc->length);
+ desc->proto_codeset = SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+ SVPD_ID_TYPE_EUI64;
+ desc->length = 8;
+ scsi_u64to8b(strtouq(eui, NULL, 0), desc->identifier);
+ }
+ if (naa != NULL) {
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ desc->length);
+ desc->proto_codeset = SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+ SVPD_ID_TYPE_NAA;
+ desc->length = 8;
+ scsi_u64to8b(strtouq(naa, NULL, 0), desc->identifier);
+ }
+
mtx_lock(&ctl_softc->ctl_lock);
/*
* See if the caller requested a particular LUN number. If so, see
@@ -4318,6 +4570,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
}
ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
+ mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
lun->target = target_id;
lun->lun = lun_number;
lun->be_lun = be_lun;
@@ -4343,6 +4596,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
TAILQ_INIT(&lun->ooa_queue);
TAILQ_INIT(&lun->blocked_queue);
STAILQ_INIT(&lun->error_list);
+ ctl_tpc_init(lun);
/*
* Initialize the mode page index.
@@ -4353,7 +4607,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
* Set the poweron UA for all initiators on this LUN only.
*/
for (i = 0; i < CTL_MAX_INITIATORS; i++)
- lun->pending_sense[i].ua_pending = CTL_UA_POWERON;
+ lun->pending_ua[i] = CTL_UA_POWERON;
/*
* Now, before we insert this lun on the lun list, set the lun
@@ -4361,7 +4615,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
*/
STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
+ nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE;
}
}
@@ -4390,35 +4644,17 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
* already. Enable the target ID if it hasn't been enabled, and
* enable this particular LUN.
*/
- STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
+ STAILQ_FOREACH(port, &ctl_softc->port_list, links) {
int retval;
- /*
- * XXX KDM this only works for ONE TARGET ID. We'll need
- * to do things differently if we go to a multiple target
- * ID scheme.
- */
- if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) {
-
- retval = fe->targ_enable(fe->targ_lun_arg, target_id);
- if (retval != 0) {
- printf("ctl_alloc_lun: FETD %s port %d "
- "returned error %d for targ_enable on "
- "target %ju\n", fe->port_name,
- fe->targ_port, retval,
- (uintmax_t)target_id.id);
- } else
- fe->status |= CTL_PORT_STATUS_TARG_ONLINE;
- }
-
- retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number);
+ retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number);
if (retval != 0) {
printf("ctl_alloc_lun: FETD %s port %d returned error "
"%d for lun_enable on target %ju lun %d\n",
- fe->port_name, fe->targ_port, retval,
+ port->port_name, port->targ_port, retval,
(uintmax_t)target_id.id, lun_number);
} else
- fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
+ port->status |= CTL_PORT_STATUS_LUN_ONLINE;
}
return (0);
}
@@ -4434,10 +4670,9 @@ ctl_free_lun(struct ctl_lun *lun)
{
struct ctl_softc *softc;
#if 0
- struct ctl_frontend *fe;
+ struct ctl_port *port;
#endif
struct ctl_lun *nlun;
- union ctl_io *io, *next_io;
int i;
softc = lun->ctl_softc;
@@ -4450,49 +4685,8 @@ ctl_free_lun(struct ctl_lun *lun)
softc->ctl_luns[lun->lun] = NULL;
- if (TAILQ_FIRST(&lun->ooa_queue) != NULL) {
- printf("ctl_free_lun: aieee!! freeing a LUN with "
- "outstanding I/O!!\n");
- }
-
- /*
- * If we have anything pending on the RtR queue, remove it.
- */
- for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
- io = next_io) {
- uint32_t targ_lun;
-
- next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
- if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
- && (targ_lun == lun->lun))
- STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
- ctl_io_hdr, links);
- }
-
- /*
- * Then remove everything from the blocked queue.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL;
- io = next_io) {
- next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links);
- TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links);
- io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
- }
-
- /*
- * Now clear out the OOA queue, and free all the I/O.
- * XXX KDM should we notify the FETD here? We probably need to
- * quiesce the LUN before deleting it.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL;
- io = next_io) {
- next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
- TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
- ctl_free_io(io);
- }
+ if (!TAILQ_EMPTY(&lun->ooa_queue))
+ panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
softc->num_luns--;
@@ -4500,7 +4694,7 @@ ctl_free_lun(struct ctl_lun *lun)
* XXX KDM this scheme only works for a single target/multiple LUN
* setup. It needs to be revamped for a multiple target scheme.
*
- * XXX KDM this results in fe->lun_disable() getting called twice,
+ * XXX KDM this results in port->lun_disable() getting called twice,
* once when ctl_disable_lun() is called, and a second time here.
* We really need to re-think the LUN disable semantics. There
* should probably be several steps/levels to LUN removal:
@@ -4512,37 +4706,37 @@ ctl_free_lun(struct ctl_lun *lun)
* the front end ports, at least for individual LUNs.
*/
#if 0
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
int retval;
- retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
+ retval = port->lun_disable(port->targ_lun_arg, lun->target,
lun->lun);
if (retval != 0) {
printf("ctl_free_lun: FETD %s port %d returned error "
"%d for lun_disable on target %ju lun %jd\n",
- fe->port_name, fe->targ_port, retval,
+ port->port_name, port->targ_port, retval,
(uintmax_t)lun->target.id, (intmax_t)lun->lun);
}
if (STAILQ_FIRST(&softc->lun_list) == NULL) {
- fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
+ port->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
- retval = fe->targ_disable(fe->targ_lun_arg,lun->target);
+ retval = port->targ_disable(port->targ_lun_arg,lun->target);
if (retval != 0) {
printf("ctl_free_lun: FETD %s port %d "
"returned error %d for targ_disable on "
- "target %ju\n", fe->port_name,
- fe->targ_port, retval,
+ "target %ju\n", port->port_name,
+ port->targ_port, retval,
(uintmax_t)lun->target.id);
} else
- fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
+ port->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
- if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
+ if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
continue;
#if 0
- fe->port_offline(fe->onoff_arg);
- fe->status &= ~CTL_PORT_STATUS_ONLINE;
+ port->port_offline(port->onoff_arg);
+ port->status &= ~CTL_PORT_STATUS_ONLINE;
#endif
}
}
@@ -4554,12 +4748,15 @@ ctl_free_lun(struct ctl_lun *lun)
atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
+ ctl_tpc_shutdown(lun);
+ mtx_destroy(&lun->lun_lock);
+ free(lun->lun_devid, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
free(lun, M_CTL);
STAILQ_FOREACH(nlun, &softc->lun_list, links) {
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
+ nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE;
}
}
@@ -4582,15 +4779,12 @@ ctl_create_lun(struct ctl_be_lun *be_lun)
int
ctl_add_lun(struct ctl_be_lun *be_lun)
{
- struct ctl_softc *ctl_softc;
-
- ctl_softc = control_softc;
+ struct ctl_softc *ctl_softc = control_softc;
mtx_lock(&ctl_softc->ctl_lock);
STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
mtx_unlock(&ctl_softc->ctl_lock);
-
- ctl_wakeup_thread();
+ wakeup(&ctl_softc->pending_lun_queue);
return (0);
}
@@ -4599,7 +4793,7 @@ int
ctl_enable_lun(struct ctl_be_lun *be_lun)
{
struct ctl_softc *ctl_softc;
- struct ctl_frontend *fe, *nfe;
+ struct ctl_port *port, *nport;
struct ctl_lun *lun;
int retval;
@@ -4608,18 +4802,21 @@ ctl_enable_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if ((lun->flags & CTL_LUN_DISABLED) == 0) {
/*
* eh? Why did we get called if the LUN is already
* enabled?
*/
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&ctl_softc->ctl_lock);
return (0);
}
lun->flags &= ~CTL_LUN_DISABLED;
+ mtx_unlock(&lun->lun_lock);
- for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) {
- nfe = STAILQ_NEXT(fe, links);
+ for (port = STAILQ_FIRST(&ctl_softc->port_list); port != NULL; port = nport) {
+ nport = STAILQ_NEXT(port, links);
/*
* Drop the lock while we call the FETD's enable routine.
@@ -4627,18 +4824,18 @@ ctl_enable_lun(struct ctl_be_lun *be_lun)
* case of the internal initiator frontend.
*/
mtx_unlock(&ctl_softc->ctl_lock);
- retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun);
+ retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun);
mtx_lock(&ctl_softc->ctl_lock);
if (retval != 0) {
printf("%s: FETD %s port %d returned error "
"%d for lun_enable on target %ju lun %jd\n",
- __func__, fe->port_name, fe->targ_port, retval,
+ __func__, port->port_name, port->targ_port, retval,
(uintmax_t)lun->target.id, (intmax_t)lun->lun);
}
#if 0
else {
/* NOTE: TODO: why does lun enable affect port status? */
- fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
+ port->status |= CTL_PORT_STATUS_LUN_ONLINE;
}
#endif
}
@@ -4652,7 +4849,7 @@ int
ctl_disable_lun(struct ctl_be_lun *be_lun)
{
struct ctl_softc *ctl_softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
struct ctl_lun *lun;
int retval;
@@ -4661,14 +4858,16 @@ ctl_disable_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
mtx_lock(&ctl_softc->ctl_lock);
-
+ mtx_lock(&lun->lun_lock);
if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&ctl_softc->ctl_lock);
return (0);
}
lun->flags |= CTL_LUN_DISABLED;
+ mtx_unlock(&lun->lun_lock);
- STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
+ STAILQ_FOREACH(port, &ctl_softc->port_list, links) {
mtx_unlock(&ctl_softc->ctl_lock);
/*
* Drop the lock before we call the frontend's disable
@@ -4677,13 +4876,13 @@ ctl_disable_lun(struct ctl_be_lun *be_lun)
* XXX KDM what happens if the frontend list changes while
* we're traversing it? It's unlikely, but should be handled.
*/
- retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
+ retval = port->lun_disable(port->targ_lun_arg, lun->target,
lun->lun);
mtx_lock(&ctl_softc->ctl_lock);
if (retval != 0) {
printf("ctl_alloc_lun: FETD %s port %d returned error "
"%d for lun_disable on target %ju lun %jd\n",
- fe->port_name, fe->targ_port, retval,
+ port->port_name, port->targ_port, retval,
(uintmax_t)lun->target.id, (intmax_t)lun->lun);
}
}
@@ -4703,9 +4902,9 @@ ctl_start_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_STOPPED;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4720,9 +4919,9 @@ ctl_stop_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags |= CTL_LUN_STOPPED;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4737,9 +4936,9 @@ ctl_lun_offline(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags |= CTL_LUN_OFFLINE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4754,9 +4953,9 @@ ctl_lun_online(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_OFFLINE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4771,13 +4970,13 @@ ctl_invalidate_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* The LUN needs to be disabled before it can be marked invalid.
*/
if ((lun->flags & CTL_LUN_DISABLED) == 0) {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (-1);
}
/*
@@ -4790,9 +4989,13 @@ ctl_invalidate_lun(struct ctl_be_lun *be_lun)
* If we have something in the OOA queue, we'll free it when the
* last I/O completes.
*/
- if (TAILQ_FIRST(&lun->ooa_queue) == NULL)
+ if (TAILQ_EMPTY(&lun->ooa_queue)) {
+ mtx_unlock(&lun->lun_lock);
+ mtx_lock(&ctl_softc->ctl_lock);
ctl_free_lun(lun);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ } else
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4806,9 +5009,9 @@ ctl_lun_inoperable(struct ctl_be_lun *be_lun)
ctl_softc = control_softc;
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags |= CTL_LUN_INOPERABLE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4822,9 +5025,9 @@ ctl_lun_operable(struct ctl_be_lun *be_lun)
ctl_softc = control_softc;
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_INOPERABLE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4844,6 +5047,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
mtx_lock(&softc->ctl_lock);
lun = (struct ctl_lun *)be_lun->ctl_lun;
+ mtx_lock(&lun->lun_lock);
page_index = NULL;
for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
@@ -4857,6 +5061,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
}
if (page_index == NULL) {
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
printf("%s: APS subpage not found for lun %ju!\n", __func__,
(uintmax_t)lun->lun);
@@ -4867,6 +5072,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
&& (softc->aps_locked_lun != lun->lun)) {
printf("%s: attempt to lock LUN %llu when %llu is already "
"locked\n");
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (1);
}
@@ -4903,11 +5109,13 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
if (isc_retval > CTL_HA_STATUS_SUCCESS) {
printf("%s: APS (lock=%d) error returned from "
"ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (1);
}
}
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (0);
@@ -4922,14 +5130,14 @@ ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
softc = control_softc;
- mtx_lock(&softc->ctl_lock);
-
lun = (struct ctl_lun *)be_lun->ctl_lun;
+ mtx_lock(&lun->lun_lock);
+
for (i = 0; i < CTL_MAX_INITIATORS; i++)
- lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED;
+ lun->pending_ua[i] |= CTL_UA_CAPACITY_CHANGED;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
/*
@@ -4970,7 +5178,8 @@ ctl_config_move_done(union ctl_io *io)
/*sks_valid*/ 1,
/*retry_count*/
io->io_hdr.port_status);
- free(io->scsiio.kern_data_ptr, M_CTL);
+ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
+ free(io->scsiio.kern_data_ptr, M_CTL);
ctl_done(io);
goto bailout;
}
@@ -4983,7 +5192,8 @@ ctl_config_move_done(union ctl_io *io)
* S/G list. If we start using S/G lists for config data,
* we'll need to know how to clean them up here as well.
*/
- free(io->scsiio.kern_data_ptr, M_CTL);
+ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
+ free(io->scsiio.kern_data_ptr, M_CTL);
/* Hopefully the user has already set the status... */
ctl_done(io);
} else {
@@ -5012,6 +5222,31 @@ bailout:
/*
* This gets called by a backend driver when it is done with a
+ * data_submit method.
+ */
+void
+ctl_data_submit_done(union ctl_io *io)
+{
+ /*
+ * If the IO_CONT flag is set, we need to call the supplied
+ * function to continue processing the I/O, instead of completing
+ * the I/O just yet.
+ *
+ * If there is an error, though, we don't want to keep processing.
+ * Instead, just send status back to the initiator.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
+ (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ io->scsiio.io_cont(io);
+ return;
+ }
+ ctl_done(io);
+}
+
+/*
+ * This gets called by a backend driver when it is done with a
* configuration write.
*/
void
@@ -5060,50 +5295,11 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
ctl_softc = control_softc;
switch (ctsio->cdb[0]) {
- case RELEASE: {
- struct scsi_release *cdb;
-
- cdb = (struct scsi_release *)ctsio->cdb;
- if ((cdb->byte2 & 0x1f) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 0,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
- break;
- }
case RELEASE_10: {
struct scsi_release_10 *cdb;
cdb = (struct scsi_release_10 *)ctsio->cdb;
- if ((cdb->byte2 & SR10_EXTENT) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
-
- }
-
- if ((cdb->byte2 & SR10_3RDPTY) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
-
if (cdb->byte2 & SR10_LONGID)
longid = 1;
else
@@ -5143,7 +5339,7 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
if (length > 0)
thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* According to SPC, it is not an error for an intiator to attempt
@@ -5161,6 +5357,8 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
}
}
+ mtx_unlock(&lun->lun_lock);
+
ctsio->scsi_status = SCSI_STATUS_OK;
ctsio->io_hdr.status = CTL_SUCCESS;
@@ -5169,8 +5367,6 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
}
- mtx_unlock(&ctl_softc->ctl_lock);
-
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
@@ -5197,49 +5393,11 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsio)
ctl_softc = control_softc;
switch (ctsio->cdb[0]) {
- case RESERVE: {
- struct scsi_reserve *cdb;
-
- cdb = (struct scsi_reserve *)ctsio->cdb;
- if ((cdb->byte2 & 0x1f) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 0,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
- resv_id = cdb->resv_id;
- length = scsi_2btoul(cdb->length);
- break;
- }
case RESERVE_10: {
struct scsi_reserve_10 *cdb;
cdb = (struct scsi_reserve_10 *)ctsio->cdb;
- if ((cdb->byte2 & SR10_EXTENT) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
- if ((cdb->byte2 & SR10_3RDPTY) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
if (cdb->byte2 & SR10_LONGID)
longid = 1;
else
@@ -5278,7 +5436,7 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsio)
if (length > 0)
thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (lun->flags & CTL_LUN_RESERVED) {
if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
|| (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
@@ -5297,13 +5455,13 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsio)
ctsio->io_hdr.status = CTL_SUCCESS;
bailout:
+ mtx_unlock(&lun->lun_lock);
+
if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
free(ctsio->kern_data_ptr, M_CTL);
ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
}
- mtx_unlock(&ctl_softc->ctl_lock);
-
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
@@ -5352,35 +5510,6 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- /*
- * We don't support the power conditions field. We need to check
- * this prior to checking the load/eject and start/stop bits.
- */
- if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 4,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
-
- /*
- * Media isn't removable, so we can't load or eject it.
- */
- if ((cdb->how & SSS_LOEJ) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 4,
- /*bit_valid*/ 1,
- /*bit*/ 1);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
-
if ((lun->flags & CTL_LUN_PR_RESERVED)
&& ((cdb->how & SSS_START)==0)) {
uint32_t residx;
@@ -5412,7 +5541,7 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
* Figure out a reasonable way to port this?
*/
#ifdef NEEDTOPORT
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
&& (lun->flags & CTL_LUN_OFFLINE)) {
@@ -5420,11 +5549,11 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
* If the LUN is offline, and the on/offline bit isn't set,
* reject the start or stop. Otherwise, let it through.
*/
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_lun_not_ready(ctsio);
ctl_done((union ctl_io *)ctsio);
} else {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
#endif /* NEEDTOPORT */
/*
* This could be a start or a stop when we're online,
@@ -5478,7 +5607,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
struct ctl_softc *ctl_softc;
uint64_t starting_lba;
uint32_t block_count;
- int reladr, immed;
int retval;
CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
@@ -5486,20 +5614,12 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
ctl_softc = control_softc;
retval = 0;
- reladr = 0;
- immed = 0;
switch (ctsio->cdb[0]) {
case SYNCHRONIZE_CACHE: {
struct scsi_sync_cache *cdb;
cdb = (struct scsi_sync_cache *)ctsio->cdb;
- if (cdb->byte2 & SSC_RELADR)
- reladr = 1;
-
- if (cdb->byte2 & SSC_IMMED)
- immed = 1;
-
starting_lba = scsi_4btoul(cdb->begin_lba);
block_count = scsi_2btoul(cdb->lb_count);
break;
@@ -5508,12 +5628,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
struct scsi_sync_cache_16 *cdb;
cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
- if (cdb->byte2 & SSC_RELADR)
- reladr = 1;
-
- if (cdb->byte2 & SSC_IMMED)
- immed = 1;
-
starting_lba = scsi_8btou64(cdb->begin_lba);
block_count = scsi_4btoul(cdb->lb_count);
break;
@@ -5525,41 +5639,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
break; /* NOTREACHED */
}
- if (immed) {
- /*
- * We don't support the immediate bit. Since it's in the
- * same place for the 10 and 16 byte SYNCHRONIZE CACHE
- * commands, we can just return the same error in either
- * case.
- */
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 1);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
-
- if (reladr) {
- /*
- * We don't support the reladr bit either. It can only be
- * used with linked commands, and we don't support linked
- * commands. Since the bit is in the same place for the
- * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can
- * just return the same error in either case.
- */
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
-
/*
* We check the LBA and length, but don't do anything with them.
* A SYNCHRONIZE CACHE will cause the entire cache for this lun to
@@ -5585,14 +5664,14 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
* Check to see whether we're configured to send the SYNCHRONIZE
* CACHE command directly to the back end.
*/
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
&& (++(lun->sync_count) >= lun->sync_interval)) {
lun->sync_count = 0;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
retval = lun->backend->config_write((union ctl_io *)ctsio);
} else {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_success(ctsio);
ctl_done((union ctl_io *)ctsio);
}
@@ -5685,9 +5764,9 @@ ctl_format(struct ctl_scsiio *ctsio)
* get them to issue a command that will basically make them think
* they're blowing away the media.
*/
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_INOPERABLE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctsio->scsi_status = SCSI_STATUS_OK;
ctsio->io_hdr.status = CTL_SUCCESS;
@@ -5703,26 +5782,40 @@ bailout:
}
int
-ctl_write_buffer(struct ctl_scsiio *ctsio)
+ctl_read_buffer(struct ctl_scsiio *ctsio)
{
- struct scsi_write_buffer *cdb;
- struct copan_page_header *header;
+ struct scsi_read_buffer *cdb;
struct ctl_lun *lun;
- struct ctl_softc *ctl_softc;
int buffer_offset, len;
- int retval;
+ static uint8_t descr[4];
+ static uint8_t echo_descr[4] = { 0 };
- header = NULL;
+ CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
- retval = CTL_RETVAL_COMPLETE;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_read_buffer *)ctsio->cdb;
- CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
+ if (lun->flags & CTL_LUN_PR_RESERVED) {
+ uint32_t residx;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- ctl_softc = control_softc;
- cdb = (struct scsi_write_buffer *)ctsio->cdb;
+ /*
+ * XXX KDM need a lock here.
+ */
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ if ((lun->res_type == SPR_TYPE_EX_AC
+ && residx != lun->pr_res_idx)
+ || ((lun->res_type == SPR_TYPE_EX_AC_RO
+ || lun->res_type == SPR_TYPE_EX_AC_AR)
+ && !lun->per_res[residx].registered)) {
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
- if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
+ if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA &&
+ (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR &&
+ (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -5732,36 +5825,73 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- if (cdb->buffer_id != 0) {
+
+ len = scsi_3btoul(cdb->length);
+ buffer_offset = scsi_3btoul(cdb->offset);
+
+ if (buffer_offset + len > sizeof(lun->write_buffer)) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 2,
+ /*field*/ 6,
/*bit_valid*/ 0,
/*bit*/ 0);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- len = scsi_3btoul(cdb->length);
- buffer_offset = scsi_3btoul(cdb->offset);
+ if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) {
+ descr[0] = 0;
+ scsi_ulto3b(sizeof(lun->write_buffer), &descr[1]);
+ ctsio->kern_data_ptr = descr;
+ len = min(len, sizeof(descr));
+ } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
+ ctsio->kern_data_ptr = echo_descr;
+ len = min(len, sizeof(echo_descr));
+ } else
+ ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
+ ctsio->kern_data_len = len;
+ ctsio->kern_total_len = len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_write_buffer(struct ctl_scsiio *ctsio)
+{
+ struct scsi_write_buffer *cdb;
+ struct ctl_lun *lun;
+ int buffer_offset, len;
- if (len > sizeof(lun->write_buffer)) {
+ CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_write_buffer *)ctsio->cdb;
+
+ if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 6,
- /*bit_valid*/ 0,
- /*bit*/ 0);
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- if (buffer_offset != 0) {
+ len = scsi_3btoul(cdb->length);
+ buffer_offset = scsi_3btoul(cdb->offset);
+
+ if (buffer_offset + len > sizeof(lun->write_buffer)) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 3,
+ /*field*/ 6,
/*bit_valid*/ 0,
/*bit*/ 0);
ctl_done((union ctl_io *)ctsio);
@@ -5773,7 +5903,7 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
* malloc it and tell the caller the data buffer is here.
*/
if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
- ctsio->kern_data_ptr = lun->write_buffer;
+ ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
@@ -5795,7 +5925,7 @@ int
ctl_write_same(struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
- struct ctl_lba_len_flags lbalen;
+ struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int len, retval;
@@ -5888,11 +6018,10 @@ ctl_write_same(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lbalen.lba = lba;
- lbalen.len = num_blocks;
- lbalen.flags = byte2;
- memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
- sizeof(lbalen));
+ lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ lbalen->flags = byte2;
retval = lun->backend->config_write((union ctl_io *)ctsio);
return (retval);
@@ -5903,7 +6032,7 @@ ctl_unmap(struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
struct scsi_unmap *cdb;
- struct ctl_ptr_len_flags ptrlen;
+ struct ctl_ptr_len_flags *ptrlen;
struct scsi_unmap_header *hdr;
struct scsi_unmap_desc *buf, *end;
uint64_t lba;
@@ -5958,11 +6087,10 @@ ctl_unmap(struct ctl_scsiio *ctsio)
buf = (struct scsi_unmap_desc *)(hdr + 1);
end = buf + len / sizeof(*buf);
- ptrlen.ptr = (void *)buf;
- ptrlen.len = len;
- ptrlen.flags = byte2;
- memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &ptrlen,
- sizeof(ptrlen));
+ ptrlen = (struct ctl_ptr_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ ptrlen->ptr = (void *)buf;
+ ptrlen->len = len;
+ ptrlen->flags = byte2;
for (; buf < end; buf++) {
lba = scsi_8btou64(buf->lba);
@@ -6017,7 +6145,7 @@ ctl_control_page_handler(struct ctl_scsiio *ctsio,
softc = control_softc;
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (((current_cp->rlec & SCP_DSENSE) == 0)
&& ((user_cp->rlec & SCP_DSENSE) != 0)) {
/*
@@ -6109,11 +6237,10 @@ ctl_control_page_handler(struct ctl_scsiio *ctsio,
if (i == initidx)
continue;
- lun->pending_sense[i].ua_pending |=
- CTL_UA_MODE_CHANGE;
+ lun->pending_ua[i] |= CTL_UA_MODE_CHANGE;
}
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -6692,6 +6819,24 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
else
control_dev = 0;
+ if (lun->flags & CTL_LUN_PR_RESERVED) {
+ uint32_t residx;
+
+ /*
+ * XXX KDM need a lock here.
+ */
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ if ((lun->res_type == SPR_TYPE_EX_AC
+ && residx != lun->pr_res_idx)
+ || ((lun->res_type == SPR_TYPE_EX_AC_RO
+ || lun->res_type == SPR_TYPE_EX_AC_AR)
+ && !lun->per_res[residx].registered)) {
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
+
switch (ctsio->cdb[0]) {
case MODE_SENSE_6: {
struct scsi_mode_sense_6 *cdb;
@@ -6986,6 +7131,7 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -7045,13 +7191,14 @@ ctl_read_capacity(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
-static int
+int
ctl_read_capacity_16(struct ctl_scsiio *ctsio)
{
struct scsi_read_capacity_16 *cdb;
@@ -7107,6 +7254,7 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -7114,83 +7262,330 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
}
int
-ctl_service_action_in(struct ctl_scsiio *ctsio)
+ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
{
- struct scsi_service_action_in *cdb;
+ struct scsi_maintenance_in *cdb;
int retval;
+ int alloc_len, ext, total_len = 0, g, p, pc, pg;
+ int num_target_port_groups, num_target_ports, single;
+ struct ctl_lun *lun;
+ struct ctl_softc *softc;
+ struct ctl_port *port;
+ struct scsi_target_group_data *rtg_ptr;
+ struct scsi_target_group_data_extended *rtg_ext_ptr;
+ struct scsi_target_port_group_descriptor *tpg_desc;
- CTL_DEBUG_PRINT(("ctl_service_action_in\n"));
+ CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
- cdb = (struct scsi_service_action_in *)ctsio->cdb;
+ cdb = (struct scsi_maintenance_in *)ctsio->cdb;
+ softc = control_softc;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
retval = CTL_RETVAL_COMPLETE;
- switch (cdb->service_action) {
- case SRC16_SERVICE_ACTION:
- retval = ctl_read_capacity_16(ctsio);
+ switch (cdb->byte2 & STG_PDF_MASK) {
+ case STG_PDF_LENGTH:
+ ext = 0;
+ break;
+ case STG_PDF_EXTENDED:
+ ext = 1;
break;
default:
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 1,
+ /*field*/ 2,
/*bit_valid*/ 1,
- /*bit*/ 4);
+ /*bit*/ 5);
ctl_done((union ctl_io *)ctsio);
- break;
+ return(retval);
}
- return (retval);
+ single = ctl_is_single;
+ if (single)
+ num_target_port_groups = 1;
+ else
+ num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+ num_target_ports = 0;
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (ctl_map_lun_back(port->targ_port, lun->lun) >= CTL_MAX_LUNS)
+ continue;
+ num_target_ports++;
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ if (ext)
+ total_len = sizeof(struct scsi_target_group_data_extended);
+ else
+ total_len = sizeof(struct scsi_target_group_data);
+ total_len += sizeof(struct scsi_target_port_group_descriptor) *
+ num_target_port_groups +
+ sizeof(struct scsi_target_port_descriptor) *
+ num_target_ports * num_target_port_groups;
+
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ if (ext) {
+ rtg_ext_ptr = (struct scsi_target_group_data_extended *)
+ ctsio->kern_data_ptr;
+ scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
+ rtg_ext_ptr->format_type = 0x10;
+ rtg_ext_ptr->implicit_transition_time = 0;
+ tpg_desc = &rtg_ext_ptr->groups[0];
+ } else {
+ rtg_ptr = (struct scsi_target_group_data *)
+ ctsio->kern_data_ptr;
+ scsi_ulto4b(total_len - 4, rtg_ptr->length);
+ tpg_desc = &rtg_ptr->groups[0];
+ }
+
+ pg = ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS;
+ mtx_lock(&softc->ctl_lock);
+ for (g = 0; g < num_target_port_groups; g++) {
+ if (g == pg)
+ tpg_desc->pref_state = TPG_PRIMARY |
+ TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+ else
+ tpg_desc->pref_state =
+ TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ tpg_desc->support = TPG_AO_SUP;
+ if (!single)
+ tpg_desc->support |= TPG_AN_SUP;
+ scsi_ulto2b(g + 1, tpg_desc->target_port_group);
+ tpg_desc->status = TPG_IMPLICIT;
+ pc = 0;
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (ctl_map_lun_back(port->targ_port, lun->lun) >=
+ CTL_MAX_LUNS)
+ continue;
+ p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
+ scsi_ulto2b(p, tpg_desc->descriptors[pc].
+ relative_target_port_identifier);
+ pc++;
+ }
+ tpg_desc->target_port_count = pc;
+ tpg_desc = (struct scsi_target_port_group_descriptor *)
+ &tpg_desc->descriptors[pc];
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
+ ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
+ ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
+ ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
+ ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return(retval);
}
int
-ctl_maintenance_in(struct ctl_scsiio *ctsio)
+ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
{
- struct scsi_maintenance_in *cdb;
- int retval;
- int alloc_len, total_len = 0;
- int num_target_port_groups, single;
struct ctl_lun *lun;
- struct ctl_softc *softc;
- struct scsi_target_group_data *rtg_ptr;
- struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2;
- struct scsi_target_port_descriptor *tp_desc_ptr1_1, *tp_desc_ptr1_2,
- *tp_desc_ptr2_1, *tp_desc_ptr2_2;
+ struct scsi_report_supported_opcodes *cdb;
+ const struct ctl_cmd_entry *entry, *sentry;
+ struct scsi_report_supported_opcodes_all *all;
+ struct scsi_report_supported_opcodes_descr *descr;
+ struct scsi_report_supported_opcodes_one *one;
+ int retval;
+ int alloc_len, total_len;
+ int opcode, service_action, i, j, num;
- CTL_DEBUG_PRINT(("ctl_maintenance_in\n"));
+ CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
- cdb = (struct scsi_maintenance_in *)ctsio->cdb;
- softc = control_softc;
+ cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
retval = CTL_RETVAL_COMPLETE;
- if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) {
+ opcode = cdb->requested_opcode;
+ service_action = scsi_2btoul(cdb->requested_service_action);
+ switch (cdb->options & RSO_OPTIONS_MASK) {
+ case RSO_OPTIONS_ALL:
+ num = 0;
+ for (i = 0; i < 256; i++) {
+ entry = &ctl_cmd_table[i];
+ if (entry->flags & CTL_CMD_FLAG_SA5) {
+ for (j = 0; j < 32; j++) {
+ sentry = &((const struct ctl_cmd_entry *)
+ entry->execute)[j];
+ if (ctl_cmd_applicable(
+ lun->be_lun->lun_type, sentry))
+ num++;
+ }
+ } else {
+ if (ctl_cmd_applicable(lun->be_lun->lun_type,
+ entry))
+ num++;
+ }
+ }
+ total_len = sizeof(struct scsi_report_supported_opcodes_all) +
+ num * sizeof(struct scsi_report_supported_opcodes_descr);
+ break;
+ case RSO_OPTIONS_OC:
+ if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 2);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
+ break;
+ case RSO_OPTIONS_OC_SA:
+ if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
+ service_action >= 32) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 2);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
+ break;
+ default:
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 1,
+ /*field*/ 2,
/*bit_valid*/ 1,
- /*bit*/ 4);
+ /*bit*/ 2);
ctl_done((union ctl_io *)ctsio);
- return(retval);
+ return (CTL_RETVAL_COMPLETE);
}
- mtx_lock(&softc->ctl_lock);
- single = ctl_is_single;
- mtx_unlock(&softc->ctl_lock);
+ alloc_len = scsi_4btoul(cdb->length);
- if (single)
- num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1;
- else
- num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
- total_len = sizeof(struct scsi_target_group_data) +
- sizeof(struct scsi_target_port_group_descriptor) *
- num_target_port_groups +
- sizeof(struct scsi_target_port_descriptor) *
- NUM_PORTS_PER_GRP * num_target_port_groups;
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ switch (cdb->options & RSO_OPTIONS_MASK) {
+ case RSO_OPTIONS_ALL:
+ all = (struct scsi_report_supported_opcodes_all *)
+ ctsio->kern_data_ptr;
+ num = 0;
+ for (i = 0; i < 256; i++) {
+ entry = &ctl_cmd_table[i];
+ if (entry->flags & CTL_CMD_FLAG_SA5) {
+ for (j = 0; j < 32; j++) {
+ sentry = &((const struct ctl_cmd_entry *)
+ entry->execute)[j];
+ if (!ctl_cmd_applicable(
+ lun->be_lun->lun_type, sentry))
+ continue;
+ descr = &all->descr[num++];
+ descr->opcode = i;
+ scsi_ulto2b(j, descr->service_action);
+ descr->flags = RSO_SERVACTV;
+ scsi_ulto2b(sentry->length,
+ descr->cdb_length);
+ }
+ } else {
+ if (!ctl_cmd_applicable(lun->be_lun->lun_type,
+ entry))
+ continue;
+ descr = &all->descr[num++];
+ descr->opcode = i;
+ scsi_ulto2b(0, descr->service_action);
+ descr->flags = 0;
+ scsi_ulto2b(entry->length, descr->cdb_length);
+ }
+ }
+ scsi_ulto4b(
+ num * sizeof(struct scsi_report_supported_opcodes_descr),
+ all->length);
+ break;
+ case RSO_OPTIONS_OC:
+ one = (struct scsi_report_supported_opcodes_one *)
+ ctsio->kern_data_ptr;
+ entry = &ctl_cmd_table[opcode];
+ goto fill_one;
+ case RSO_OPTIONS_OC_SA:
+ one = (struct scsi_report_supported_opcodes_one *)
+ ctsio->kern_data_ptr;
+ entry = &ctl_cmd_table[opcode];
+ entry = &((const struct ctl_cmd_entry *)
+ entry->execute)[service_action];
+fill_one:
+ if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
+ one->support = 3;
+ scsi_ulto2b(entry->length, one->cdb_length);
+ one->cdb_usage[0] = opcode;
+ memcpy(&one->cdb_usage[1], entry->usage,
+ entry->length - 1);
+ } else
+ one->support = 1;
+ break;
+ }
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return(retval);
+}
+
+int
+ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_report_supported_tmf *cdb;
+ struct scsi_report_supported_tmf_data *data;
+ int retval;
+ int alloc_len, total_len;
+ CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
+
+ cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ total_len = sizeof(struct scsi_report_supported_tmf_data);
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
@@ -7209,87 +7604,67 @@ ctl_maintenance_in(struct ctl_scsiio *ctsio)
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr;
+ data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
+ data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS;
+ data->byte2 |= RST_ITNRS;
- tpg_desc_ptr1 = &rtg_ptr->groups[0];
- tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0];
- tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *)
- &tp_desc_ptr1_1->desc_list[0];
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
- if (single == 0) {
- tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *)
- &tp_desc_ptr1_2->desc_list[0];
- tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0];
- tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *)
- &tp_desc_ptr2_1->desc_list[0];
- } else {
- tpg_desc_ptr2 = NULL;
- tp_desc_ptr2_1 = NULL;
- tp_desc_ptr2_2 = NULL;
- }
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
- scsi_ulto4b(total_len - 4, rtg_ptr->length);
- if (single == 0) {
- if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
- if (lun->flags & CTL_LUN_PRIMARY_SC) {
- tpg_desc_ptr1->pref_state = TPG_PRIMARY;
- tpg_desc_ptr2->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- } else {
- tpg_desc_ptr1->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- tpg_desc_ptr2->pref_state = TPG_PRIMARY;
- }
- } else {
- if (lun->flags & CTL_LUN_PRIMARY_SC) {
- tpg_desc_ptr1->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- tpg_desc_ptr2->pref_state = TPG_PRIMARY;
- } else {
- tpg_desc_ptr1->pref_state = TPG_PRIMARY;
- tpg_desc_ptr2->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- }
- }
- } else {
- tpg_desc_ptr1->pref_state = TPG_PRIMARY;
- }
- tpg_desc_ptr1->support = 0;
- tpg_desc_ptr1->target_port_group[1] = 1;
- tpg_desc_ptr1->status = TPG_IMPLICIT;
- tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP;
+int
+ctl_report_timestamp(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_report_timestamp *cdb;
+ struct scsi_report_timestamp_data *data;
+ struct timeval tv;
+ int64_t timestamp;
+ int retval;
+ int alloc_len, total_len;
+
+ CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
+
+ cdb = (struct scsi_report_timestamp *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- if (single == 0) {
- tpg_desc_ptr2->support = 0;
- tpg_desc_ptr2->target_port_group[1] = 2;
- tpg_desc_ptr2->status = TPG_IMPLICIT;
- tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP;
+ retval = CTL_RETVAL_COMPLETE;
+
+ total_len = sizeof(struct scsi_report_timestamp_data);
+ alloc_len = scsi_4btoul(cdb->length);
- tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
- tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
- tp_desc_ptr2_1->relative_target_port_identifier[1] = 9;
- tp_desc_ptr2_2->relative_target_port_identifier[1] = 10;
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
} else {
- if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
- tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
- tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
- } else {
- tp_desc_ptr1_1->relative_target_port_identifier[1] = 9;
- tp_desc_ptr1_2->relative_target_port_identifier[1] = 10;
- }
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
}
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
- ctsio->be_move_done = ctl_config_move_done;
+ data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
+ scsi_ulto2b(sizeof(*data) - 2, data->length);
+ data->origin = RTS_ORIG_OUTSIDE;
+ getmicrotime(&tv);
+ timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
+ scsi_ulto4b(timestamp >> 16, data->timestamp);
+ scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
- CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
- ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
- ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
- ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
- ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
- return(retval);
+ return (retval);
}
int
@@ -7312,7 +7687,7 @@ ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
retry:
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
switch (cdb->action) {
case SPRI_RK: /* read keys */
total_len = sizeof(struct scsi_per_res_in_keys) +
@@ -7329,19 +7704,14 @@ retry:
total_len = sizeof(struct scsi_per_res_cap);
break;
case SPRI_RS: /* read full status */
+ total_len = sizeof(struct scsi_per_res_in_header) +
+ (sizeof(struct scsi_per_res_in_full_desc) + 256) *
+ lun->pr_key_count;
+ break;
default:
- mtx_unlock(&softc->ctl_lock);
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- break; /* NOTREACHED */
+ panic("Invalid PR type %x", cdb->action);
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
@@ -7359,7 +7729,7 @@ retry:
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
switch (cdb->action) {
case SPRI_RK: { // read keys
struct scsi_per_res_in_keys *res_keys;
@@ -7377,7 +7747,7 @@ retry:
if (total_len != (sizeof(struct scsi_per_res_in_keys) +
(lun->pr_key_count *
sizeof(struct scsi_per_res_key)))){
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
printf("%s: reservation length changed, retrying\n",
__func__);
@@ -7452,7 +7822,7 @@ retry:
* command active right now.)
*/
if (tmp_len != total_len) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
printf("%s: reservation status changed, retrying\n",
__func__);
@@ -7484,7 +7854,7 @@ retry:
res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
scsi_ulto2b(sizeof(*res_cap), res_cap->length);
- res_cap->flags2 |= SPRI_TMV;
+ res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_3;
type_mask = SPRI_TM_WR_EX_AR |
SPRI_TM_EX_AC_RO |
SPRI_TM_WR_EX_RO |
@@ -7494,7 +7864,62 @@ retry:
scsi_ulto2b(type_mask, res_cap->type_mask);
break;
}
- case SPRI_RS: //read full status
+ case SPRI_RS: { // read full status
+ struct scsi_per_res_in_full *res_status;
+ struct scsi_per_res_in_full_desc *res_desc;
+ struct ctl_port *port;
+ int i, len;
+
+ res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
+
+ /*
+ * We had to drop the lock to allocate our buffer, which
+ * leaves time for someone to come in with another
+ * persistent reservation. (That is unlikely, though,
+ * since this should be the only persistent reservation
+ * command active right now.)
+ */
+ if (total_len < (sizeof(struct scsi_per_res_in_header) +
+ (sizeof(struct scsi_per_res_in_full_desc) + 256) *
+ lun->pr_key_count)){
+ mtx_unlock(&lun->lun_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ printf("%s: reservation length changed, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ scsi_ulto4b(lun->PRGeneration, res_status->header.generation);
+
+ res_desc = &res_status->desc[0];
+ for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (!lun->per_res[i].registered)
+ continue;
+
+ memcpy(&res_desc->res_key, &lun->per_res[i].res_key.key,
+ sizeof(res_desc->res_key));
+ if ((lun->flags & CTL_LUN_PR_RESERVED) &&
+ (lun->pr_res_idx == i ||
+ lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
+ res_desc->flags = SPRI_FULL_R_HOLDER;
+ res_desc->scopetype = lun->res_type;
+ }
+ scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
+ res_desc->rel_trgt_port_id);
+ len = 0;
+ port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
+ if (port != NULL)
+ len = ctl_create_iid(port,
+ i % CTL_MAX_INIT_PER_PORT,
+ res_desc->transport_id);
+ scsi_ulto4b(len, res_desc->additional_length);
+ res_desc = (struct scsi_per_res_in_full_desc *)
+ &res_desc->transport_id[len];
+ }
+ scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
+ res_status->header.length);
+ break;
+ }
default:
/*
* This is a bug, because we just checked for this above,
@@ -7503,8 +7928,9 @@ retry:
panic("Invalid PR type %x", cdb->action);
break; /* NOTREACHED */
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
@@ -7534,13 +7960,13 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
retval = 0;
+ mtx_lock(&lun->lun_lock);
if (sa_res_key == 0) {
- mtx_lock(&softc->ctl_lock);
if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
/* validate scope and type */
if ((cdb->scope_type & SPR_SCOPE_MASK) !=
SPR_LU_SCOPE) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7552,7 +7978,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
if (type>8 || type==2 || type==4 || type==0) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7576,12 +8002,11 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
if (!persis_offset
&& i <CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i-persis_offset
- ].ua_pending |=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
lun->per_res[i].registered = 0;
memset(&lun->per_res[i].res_key, 0,
@@ -7594,7 +8019,6 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = residx;
- mtx_unlock(&softc->ctl_lock);
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7613,7 +8037,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
} else {
/* not all registrants */
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
@@ -7628,7 +8052,6 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
|| !(lun->flags & CTL_LUN_PR_RESERVED)) {
int found = 0;
- mtx_lock(&softc->ctl_lock);
if (res_key == sa_res_key) {
/* special case */
/*
@@ -7640,7 +8063,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
* zero I'll take that approach since this has
* to do with the sa_res_key.
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
@@ -7665,17 +8088,14 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
sizeof(struct scsi_per_res_key));
lun->pr_key_count--;
- if (!persis_offset
- && i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
- CTL_UA_REG_PREEMPT;
- else if (persis_offset
- && i >= persis_offset)
- lun->pending_sense[i-persis_offset].ua_pending|=
+ if (!persis_offset && i < CTL_MAX_INITIATORS)
+ lun->pending_ua[i] |= CTL_UA_REG_PREEMPT;
+ else if (persis_offset && i >= persis_offset)
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
}
- mtx_unlock(&softc->ctl_lock);
if (!found) {
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -7705,6 +8125,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
/* validate scope and type */
if ((cdb->scope_type & SPR_SCOPE_MASK) !=
SPR_LU_SCOPE) {
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7716,6 +8137,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
if (type>8 || type==2 || type==4 || type==0) {
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7760,27 +8182,23 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[
- i-persis_offset].ua_pending |=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
} else if (type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type ==SPR_TYPE_EX_AC_RO)){
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[
- i-persis_offset
- ].ua_pending |=
+ lun->pending_ua[
+ i-persis_offset] |=
CTL_UA_RES_RELEASE;
}
}
@@ -7790,8 +8208,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = residx;
else
- lun->pr_res_idx =
- CTL_PR_ALL_REGISTRANTS;
+ lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7814,7 +8231,6 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
* remove registrants
*/
int found=0;
- mtx_lock(&softc->ctl_lock);
for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
if (memcmp(param->serv_act_res_key,
@@ -7830,23 +8246,21 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[
- i-persis_offset].ua_pending |=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
}
if (!found) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
return (1);
}
- mtx_unlock(&softc->ctl_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
@@ -7866,6 +8280,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -7897,11 +8312,10 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset && i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_REG_PREEMPT;
lun->per_res[i].registered = 0;
memset(&lun->per_res[i].res_key, 0,
@@ -7928,12 +8342,11 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
if (!persis_offset
&& i < persis_offset)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_REG_PREEMPT;
}
}
@@ -7956,25 +8369,22 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
lun->pr_key_count--;
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_REG_PREEMPT;
} else if (msg->pr.pr_info.res_type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type == SPR_TYPE_EX_AC_RO)) {
if (!persis_offset
&& i < persis_offset)
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_RES_RELEASE;
}
}
@@ -8048,28 +8458,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
}
}
- switch (cdb->action & SPRO_ACTION_MASK) {
- case SPRO_REGISTER:
- case SPRO_RESERVE:
- case SPRO_RELEASE:
- case SPRO_CLEAR:
- case SPRO_PREEMPT:
- case SPRO_REG_IGNO:
- break;
- case SPRO_REG_MOVE:
- case SPRO_PRE_ABO:
- default:
- ctl_set_invalid_field(/*ctsio*/ ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- break; /* NOTREACHED */
- }
-
param_len = scsi_4btoul(cdb->length);
if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
@@ -8097,7 +8485,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* This must be done for all other service actions
*/
if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (lun->per_res[residx].registered) {
if (memcmp(param->res_key.key,
lun->per_res[residx].res_key.key,
@@ -8108,7 +8496,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* the one the initiator previously
* registered.
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -8118,7 +8506,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
/*
* We are not registered
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -8128,13 +8516,13 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* We are not registered and trying to register but
* the register key isn't zero.
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
switch (cdb->action & SPRO_ACTION_MASK) {
@@ -8173,7 +8561,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* The initiator wants to clear the
@@ -8184,7 +8572,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
&& (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
|| ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
&& !lun->per_res[residx].registered)) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
goto done;
}
@@ -8213,8 +8601,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
i+persis_offset].registered
== 0)
continue;
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
}
@@ -8236,7 +8623,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
printf("CTL:Persis Out error returned from "
"ctl_ha_msg_send %d\n", isc_retval);
}
- mtx_unlock(&softc->ctl_lock);
} else /* sa_res_key != 0 */ {
/*
@@ -8260,7 +8646,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- mtx_unlock(&softc->ctl_lock);
if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
&persis_io, sizeof(persis_io), 0)) >
CTL_HA_STATUS_SUCCESS) {
@@ -8269,6 +8654,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
}
}
lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
break;
}
@@ -8276,7 +8662,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
#if 0
printf("Reserve executed type %d\n", type);
#endif
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (lun->flags & CTL_LUN_PR_RESERVED) {
/*
* if this isn't the reservation holder and it's
@@ -8286,13 +8672,13 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
if ((lun->pr_res_idx != residx
&& lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
|| lun->res_type != type) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
} else /* create a reservation */ {
/*
* If it's not an "all registrants" type record
@@ -8307,7 +8693,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
lun->flags |= CTL_LUN_PR_RESERVED;
lun->res_type = type;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
@@ -8325,10 +8711,10 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
break;
case SPRO_RELEASE:
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
/* No reservation exists return good status */
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
goto done;
}
/*
@@ -8340,12 +8726,12 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* not a res holder return good status but
* do nothing
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
goto done;
}
if (lun->res_type != type) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_illegal_pr_release(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -8373,13 +8759,13 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
if (lun->per_res[i+persis_offset].registered
== 0)
continue;
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
lun->per_res[residx].registered = 1;
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
/* Send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -8394,7 +8780,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
case SPRO_CLEAR:
/* send msg to other side */
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_PR_RESERVED;
lun->res_type = 0;
lun->pr_key_count = 0;
@@ -8408,18 +8794,18 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
if (lun->per_res[i].registered) {
if (!persis_offset && i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_PREEMPT;
else if (persis_offset && i >= persis_offset)
- lun->pending_sense[i-persis_offset
- ].ua_pending |= CTL_UA_RES_PREEMPT;
+ lun->pending_ua[i-persis_offset] |=
+ CTL_UA_RES_PREEMPT;
memset(&lun->per_res[i].res_key,
0, sizeof(struct scsi_per_res_key));
lun->per_res[i].registered = 0;
}
lun->PRGeneration++;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_CLEAR;
@@ -8439,19 +8825,8 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
break;
}
- case SPRO_REG_MOVE:
- case SPRO_PRE_ABO:
default:
- free(ctsio->kern_data_ptr, M_CTL);
- ctl_set_invalid_field(/*ctsio*/ ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- break; /* NOTREACHED */
+ panic("Invalid PR type %x", cdb->action);
}
done:
@@ -8478,12 +8853,9 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
softc = control_softc;
- mtx_lock(&softc->ctl_lock);
-
- targ_lun = msg->hdr.nexus.targ_lun;
- if (msg->hdr.nexus.lun_map_fn != NULL)
- targ_lun = msg->hdr.nexus.lun_map_fn(msg->hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = msg->hdr.nexus.targ_mapped_lun;
lun = softc->ctl_luns[targ_lun];
+ mtx_lock(&lun->lun_lock);
switch(msg->pr.pr_info.action) {
case CTL_PR_REG_KEY:
if (!lun->per_res[msg->pr.pr_info.residx].registered) {
@@ -8524,8 +8896,7 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
persis_offset].registered == 0)
continue;
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
}
@@ -8556,7 +8927,7 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
&& lun->res_type != SPR_TYPE_WR_EX) {
for (i = 0; i < CTL_MAX_INITIATORS; i++)
if (lun->per_res[i+persis_offset].registered)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
@@ -8579,11 +8950,10 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
continue;
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
- CTL_UA_RES_PREEMPT;
+ lun->pending_ua[i] |= CTL_UA_RES_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i-persis_offset].ua_pending|=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_RES_PREEMPT;
memset(&lun->per_res[i].res_key, 0,
sizeof(struct scsi_per_res_key));
@@ -8593,17 +8963,17 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
break;
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
int
ctl_read_write(struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
- struct ctl_lba_len lbalen;
+ struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
- int reladdr, fua, dpo, ebp;
+ int fua, dpo;
int retval;
int isread;
@@ -8611,10 +8981,8 @@ ctl_read_write(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
- reladdr = 0;
fua = 0;
dpo = 0;
- ebp = 0;
retval = CTL_RETVAL_COMPLETE;
@@ -8662,17 +9030,11 @@ ctl_read_write(struct ctl_scsiio *ctsio)
cdb = (struct scsi_rw_10 *)ctsio->cdb;
- if (cdb->byte2 & SRW10_RELADDR)
- reladdr = 1;
if (cdb->byte2 & SRW10_FUA)
fua = 1;
if (cdb->byte2 & SRW10_DPO)
dpo = 1;
- if ((cdb->opcode == WRITE_10)
- && (cdb->byte2 & SRW10_EBP))
- ebp = 1;
-
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_2btoul(cdb->length);
break;
@@ -8702,8 +9064,6 @@ ctl_read_write(struct ctl_scsiio *ctsio)
cdb = (struct scsi_rw_12 *)ctsio->cdb;
- if (cdb->byte2 & SRW12_RELADDR)
- reladdr = 1;
if (cdb->byte2 & SRW12_FUA)
fua = 1;
if (cdb->byte2 & SRW12_DPO)
@@ -8731,8 +9091,6 @@ ctl_read_write(struct ctl_scsiio *ctsio)
cdb = (struct scsi_rw_16 *)ctsio->cdb;
- if (cdb->byte2 & SRW12_RELADDR)
- reladdr = 1;
if (cdb->byte2 & SRW12_FUA)
fua = 1;
if (cdb->byte2 & SRW12_DPO)
@@ -8772,22 +9130,121 @@ ctl_read_write(struct ctl_scsiio *ctsio)
* getting it to do write-through for a particular transaction may
* not be possible.
*/
+
/*
- * We don't support relative addressing. That also requires
- * supporting linked commands, which we don't do.
+ * The first check is to make sure we're in bounds, the second
+ * check is to catch wrap-around problems. If the lba + num blocks
+ * is less than the lba, then we've wrapped around and the block
+ * range is invalid anyway.
*/
- if (reladdr != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
+ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+ || ((lba + num_blocks) < lba)) {
+ ctl_set_lba_out_of_range(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * According to SBC-3, a transfer length of 0 is not an error.
+ * Note that this cannot happen with WRITE(6) or READ(6), since 0
+ * translates to 256 blocks for those commands.
+ */
+ if (num_blocks == 0) {
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ lbalen->flags = isread ? CTL_LLF_READ : CTL_LLF_WRITE;
+
+ ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
+ ctsio->kern_rel_offset = 0;
+
+ CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
+
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
+
+ return (retval);
+}
+
+static int
+ctl_cnw_cont(union ctl_io *io)
+{
+ struct ctl_scsiio *ctsio;
+ struct ctl_lun *lun;
+ struct ctl_lba_len_flags *lbalen;
+ int retval;
+
+ ctsio = &io->scsiio;
+ ctsio->io_hdr.status = CTL_STATUS_NONE;
+ ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->flags = CTL_LLF_WRITE;
+
+ CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_cnw(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct ctl_lba_len_flags *lbalen;
+ uint64_t lba;
+ uint32_t num_blocks;
+ int fua, dpo;
+ int retval;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
+
+ fua = 0;
+ dpo = 0;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ switch (ctsio->cdb[0]) {
+ case COMPARE_AND_WRITE: {
+ struct scsi_compare_and_write *cdb;
+
+ cdb = (struct scsi_compare_and_write *)ctsio->cdb;
+
+ if (cdb->byte2 & SRW10_FUA)
+ fua = 1;
+ if (cdb->byte2 & SRW10_DPO)
+ dpo = 1;
+ lba = scsi_8btou64(cdb->addr);
+ num_blocks = cdb->length;
+ break;
+ }
+ default:
+ /*
+ * We got a command we don't support. This shouldn't
+ * happen, commands should be filtered out above us.
+ */
+ ctl_set_invalid_opcode(ctsio);
ctl_done((union ctl_io *)ctsio);
+
return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
}
/*
+ * XXX KDM what do we do with the DPO and FUA bits? FUA might be
+ * interesting for us, but if RAIDCore is in write-back mode,
+ * getting it to do write-through for a particular transaction may
+ * not be possible.
+ */
+
+ /*
* The first check is to make sure we're in bounds, the second
* check is to catch wrap-around problems. If the lba + num blocks
* is less than the lba, then we've wrapped around and the block
@@ -8802,8 +9259,6 @@ ctl_read_write(struct ctl_scsiio *ctsio)
/*
* According to SBC-3, a transfer length of 0 is not an error.
- * Note that this cannot happen with WRITE(6) or READ(6), since 0
- * translates to 256 blocks for those commands.
*/
if (num_blocks == 0) {
ctl_set_success(ctsio);
@@ -8811,15 +9266,130 @@ ctl_read_write(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lbalen.lba = lba;
- lbalen.len = num_blocks;
- memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
- sizeof(lbalen));
+ ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
+ ctsio->kern_rel_offset = 0;
- CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
+ /*
+ * Set the IO_CONT flag, so that if this I/O gets passed to
+ * ctl_data_submit_done(), it'll get passed back to
+ * ctl_ctl_cnw_cont() for further processing.
+ */
+ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
+ ctsio->io_cont = ctl_cnw_cont;
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ lbalen->flags = CTL_LLF_COMPARE;
+
+ CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
retval = lun->backend->data_submit((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_verify(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct ctl_lba_len_flags *lbalen;
+ uint64_t lba;
+ uint32_t num_blocks;
+ int bytchk, dpo;
+ int retval;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
+
+ bytchk = 0;
+ dpo = 0;
+ retval = CTL_RETVAL_COMPLETE;
+
+ switch (ctsio->cdb[0]) {
+ case VERIFY_10: {
+ struct scsi_verify_10 *cdb;
+
+ cdb = (struct scsi_verify_10 *)ctsio->cdb;
+ if (cdb->byte2 & SVFY_BYTCHK)
+ bytchk = 1;
+ if (cdb->byte2 & SVFY_DPO)
+ dpo = 1;
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_2btoul(cdb->length);
+ break;
+ }
+ case VERIFY_12: {
+ struct scsi_verify_12 *cdb;
+
+ cdb = (struct scsi_verify_12 *)ctsio->cdb;
+ if (cdb->byte2 & SVFY_BYTCHK)
+ bytchk = 1;
+ if (cdb->byte2 & SVFY_DPO)
+ dpo = 1;
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ break;
+ }
+ case VERIFY_16: {
+ struct scsi_rw_16 *cdb;
+
+ cdb = (struct scsi_rw_16 *)ctsio->cdb;
+ if (cdb->byte2 & SVFY_BYTCHK)
+ bytchk = 1;
+ if (cdb->byte2 & SVFY_DPO)
+ dpo = 1;
+ lba = scsi_8btou64(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ break;
+ }
+ default:
+ /*
+ * We got a command we don't support. This shouldn't
+ * happen, commands should be filtered out above us.
+ */
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ /*
+ * The first check is to make sure we're in bounds, the second
+ * check is to catch wrap-around problems. If the lba + num blocks
+ * is less than the lba, then we've wrapped around and the block
+ * range is invalid anyway.
+ */
+ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+ || ((lba + num_blocks) < lba)) {
+ ctl_set_lba_out_of_range(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * According to SBC-3, a transfer length of 0 is not an error.
+ */
+ if (num_blocks == 0) {
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ if (bytchk) {
+ lbalen->flags = CTL_LLF_COMPARE;
+ ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
+ } else {
+ lbalen->flags = CTL_LLF_VERIFY;
+ ctsio->kern_total_len = 0;
+ }
+ ctsio->kern_rel_offset = 0;
+
+ CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
return (retval);
}
@@ -8897,9 +9467,7 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
mtx_lock(&control_softc->ctl_lock);
for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
- lun_id = targ_lun_id;
- if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
- lun_id = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, lun_id);
+ lun_id = ctl_map_lun(ctsio->io_hdr.nexus.targ_port, targ_lun_id);
if (lun_id >= CTL_MAX_LUNS)
continue;
lun = control_softc->ctl_luns[lun_id];
@@ -8951,9 +9519,11 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
* case, we shouldn't clear any pending lun change unit
* attention.
*/
- if (request_lun != NULL)
- lun->pending_sense[initidx].ua_pending &=
- ~CTL_UA_LUN_CHANGE;
+ if (request_lun != NULL) {
+ mtx_lock(&lun->lun_lock);
+ lun->pending_ua[initidx] &= ~CTL_UA_LUN_CHANGE;
+ mtx_unlock(&lun->lun_lock);
+ }
}
mtx_unlock(&control_softc->ctl_lock);
@@ -8992,6 +9562,7 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
*/
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9051,7 +9622,8 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
* Check for pending sense, and then for pending unit attentions.
* Pending sense gets returned first, then pending unit attentions.
*/
- mtx_lock(&lun->ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
+#ifdef CTL_WITH_CA
if (ctl_is_set(lun->have_ca, initidx)) {
scsi_sense_data_type stored_format;
@@ -9059,8 +9631,7 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
* Check to see which sense format was used for the stored
* sense data.
*/
- stored_format = scsi_sense_type(
- &lun->pending_sense[initidx].sense);
+ stored_format = scsi_sense_type(&lun->pending_sense[initidx]);
/*
* If the user requested a different sense format than the
@@ -9075,32 +9646,34 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
if ((stored_format == SSD_TYPE_FIXED)
&& (sense_format == SSD_TYPE_DESC))
ctl_sense_to_desc((struct scsi_sense_data_fixed *)
- &lun->pending_sense[initidx].sense,
+ &lun->pending_sense[initidx],
(struct scsi_sense_data_desc *)sense_ptr);
else if ((stored_format == SSD_TYPE_DESC)
&& (sense_format == SSD_TYPE_FIXED))
ctl_sense_to_fixed((struct scsi_sense_data_desc *)
- &lun->pending_sense[initidx].sense,
+ &lun->pending_sense[initidx],
(struct scsi_sense_data_fixed *)sense_ptr);
else
- memcpy(sense_ptr, &lun->pending_sense[initidx].sense,
+ memcpy(sense_ptr, &lun->pending_sense[initidx],
ctl_min(sizeof(*sense_ptr),
- sizeof(lun->pending_sense[initidx].sense)));
+ sizeof(lun->pending_sense[initidx])));
ctl_clear_mask(lun->have_ca, initidx);
have_error = 1;
- } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) {
+ } else
+#endif
+ if (lun->pending_ua[initidx] != CTL_UA_NONE) {
ctl_ua_type ua_type;
- ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending,
+ ua_type = ctl_build_ua(lun->pending_ua[initidx],
sense_ptr, sense_format);
if (ua_type != CTL_UA_NONE) {
have_error = 1;
/* We're reporting this UA, so clear it */
- lun->pending_sense[initidx].ua_pending &= ~ua_type;
+ lun->pending_ua[initidx] &= ~ua_type;
}
}
- mtx_unlock(&lun->ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
/*
* We already have a pending error, return it.
@@ -9118,7 +9691,7 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
* parameter data.
*/
ctsio->sense_len = 0;
-
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9147,6 +9720,7 @@ no_sense:
* autosense in this case. We're reporting sense as parameter data.
*/
ctsio->sense_len = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9163,7 +9737,7 @@ ctl_tur(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_tur\n"));
if (lun == NULL)
- return (-EINVAL);
+ return (EINVAL);
ctsio->scsi_status = SCSI_STATUS_OK;
ctsio->io_hdr.status = CTL_SUCCESS;
@@ -9227,13 +9801,18 @@ ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER;
/* Device Identification */
pages->page_list[2] = SVPD_DEVICE_ID;
+ /* SCSI Ports */
+ pages->page_list[3] = SVPD_SCSI_PORTS;
+ /* Third-party Copy */
+ pages->page_list[4] = SVPD_SCSI_TPC;
/* Block limits */
- pages->page_list[3] = SVPD_BLOCK_LIMITS;
+ pages->page_list[5] = SVPD_BLOCK_LIMITS;
/* Logical Block Provisioning */
- pages->page_list[4] = SVPD_LBP;
+ pages->page_list[6] = SVPD_LBP;
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9245,9 +9824,6 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
{
struct scsi_vpd_unit_serial_number *sn_ptr;
struct ctl_lun *lun;
-#ifndef CTL_USE_BACKEND_SN
- char tmpstr[32];
-#endif
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
@@ -9281,7 +9857,6 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN);
-#ifdef CTL_USE_BACKEND_SN
/*
* If we don't have a LUN, we just leave the serial number as
* all spaces.
@@ -9291,17 +9866,9 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
strncpy((char *)sn_ptr->serial_num,
(char *)lun->be_lun->serial_num, CTL_SN_LEN);
}
-#else
- /*
- * Note that we're using a non-unique serial number here,
- */
- snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000");
- memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
- strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN,
- ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4)));
-#endif
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9313,45 +9880,38 @@ static int
ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
{
struct scsi_vpd_device_id *devid_ptr;
- struct scsi_vpd_id_descriptor *desc, *desc1;
- struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */
- struct scsi_vpd_id_t10 *t10id;
+ struct scsi_vpd_id_descriptor *desc;
struct ctl_softc *ctl_softc;
struct ctl_lun *lun;
- struct ctl_frontend *fe;
-#ifndef CTL_USE_BACKEND_SN
- char tmpstr[32];
-#endif /* CTL_USE_BACKEND_SN */
- int devid_len;
+ struct ctl_port *port;
+ int data_len;
+ uint8_t proto;
ctl_softc = control_softc;
- mtx_lock(&ctl_softc->ctl_lock);
- fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
- mtx_unlock(&ctl_softc->ctl_lock);
-
- if (fe->devid != NULL)
- return ((fe->devid)(ctsio, alloc_len));
-
+ port = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- devid_len = sizeof(struct scsi_vpd_device_id) +
- sizeof(struct scsi_vpd_id_descriptor) +
- sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN +
- sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN +
- sizeof(struct scsi_vpd_id_descriptor) +
+ data_len = sizeof(struct scsi_vpd_device_id) +
+ sizeof(struct scsi_vpd_id_descriptor) +
sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
- sizeof(struct scsi_vpd_id_descriptor) +
+ sizeof(struct scsi_vpd_id_descriptor) +
sizeof(struct scsi_vpd_id_trgt_port_grp_id);
+ if (lun && lun->lun_devid)
+ data_len += lun->lun_devid->len;
+ if (port->port_devid)
+ data_len += port->port_devid->len;
+ if (port->target_devid)
+ data_len += port->target_devid->len;
- ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO);
+ ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
- if (devid_len < alloc_len) {
- ctsio->residual = alloc_len - devid_len;
- ctsio->kern_data_len = devid_len;
- ctsio->kern_total_len = devid_len;
+ if (data_len < alloc_len) {
+ ctsio->residual = alloc_len - data_len;
+ ctsio->kern_data_len = data_len;
+ ctsio->kern_total_len = data_len;
} else {
ctsio->residual = 0;
ctsio->kern_data_len = alloc_len;
@@ -9361,15 +9921,6 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
- desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
- t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
- desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
- sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN);
- desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] +
- CTL_WWPN_LEN);
- desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] +
- sizeof(struct scsi_vpd_id_rel_trgt_port_id));
-
/*
* The control device is always connected. The disk device, on the
* other hand, may not be online all the time.
@@ -9379,115 +9930,187 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
lun->be_lun->lun_type;
else
devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
-
devid_ptr->page_code = SVPD_DEVICE_ID;
+ scsi_ulto2b(data_len - 4, devid_ptr->length);
- scsi_ulto2b(devid_len - 4, devid_ptr->length);
-
- mtx_lock(&ctl_softc->ctl_lock);
-
- /*
- * For Fibre channel,
- */
- if (fe->port_type == CTL_PORT_FC)
- {
- desc->proto_codeset = (SCSI_PROTO_FC << 4) |
- SVPD_ID_CODESET_ASCII;
- desc1->proto_codeset = (SCSI_PROTO_FC << 4) |
- SVPD_ID_CODESET_BINARY;
- }
+ if (port->port_type == CTL_PORT_FC)
+ proto = SCSI_PROTO_FC << 4;
+ else if (port->port_type == CTL_PORT_ISCSI)
+ proto = SCSI_PROTO_ISCSI << 4;
else
- {
- desc->proto_codeset = (SCSI_PROTO_SPI << 4) |
- SVPD_ID_CODESET_ASCII;
- desc1->proto_codeset = (SCSI_PROTO_SPI << 4) |
- SVPD_ID_CODESET_BINARY;
- }
- desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset;
- mtx_unlock(&ctl_softc->ctl_lock);
+ proto = SCSI_PROTO_SPI << 4;
+ desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
/*
* We're using a LUN association here. i.e., this device ID is a
* per-LUN identifier.
*/
- desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
- desc->length = sizeof(*t10id) + CTL_DEVID_LEN;
- strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
+ if (lun && lun->lun_devid) {
+ memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
+ desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
+ lun->lun_devid->len);
+ }
/*
- * desc1 is for the WWPN which is a port asscociation.
+ * This is for the WWPN which is a port association.
*/
- desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA;
- desc1->length = CTL_WWPN_LEN;
- /* XXX Call Reggie's get_WWNN func here then add port # to the end */
- /* For testing just create the WWPN */
-#if 0
- ddb_GetWWNN((char *)desc1->identifier);
-
- /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
- /* This is so Copancontrol will return something sane */
- if (ctsio->io_hdr.nexus.targ_port!=0 &&
- ctsio->io_hdr.nexus.targ_port!=8)
- desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1;
- else
- desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port;
-#endif
+ if (port->port_devid) {
+ memcpy(desc, port->port_devid->data, port->port_devid->len);
+ desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
+ port->port_devid->len);
+ }
- be64enc(desc1->identifier, fe->wwpn);
+ /*
+ * This is for the Relative Target Port(type 4h) identifier
+ */
+ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+ SVPD_ID_TYPE_RELTARG;
+ desc->length = 4;
+ scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ sizeof(struct scsi_vpd_id_rel_trgt_port_id));
/*
- * desc2 is for the Relative Target Port(type 4h) identifier
+ * This is for the Target Port Group(type 5h) identifier
*/
- desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
- | SVPD_ID_TYPE_RELTARG;
- desc2->length = 4;
-//#if 0
- /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
- /* This is so Copancontrol will return something sane */
- if (ctsio->io_hdr.nexus.targ_port!=0 &&
- ctsio->io_hdr.nexus.targ_port!=8)
- desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1;
- else
- desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port;
-//#endif
+ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+ SVPD_ID_TYPE_TPORTGRP;
+ desc->length = 4;
+ scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1,
+ &desc->identifier[2]);
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ sizeof(struct scsi_vpd_id_trgt_port_grp_id));
/*
- * desc3 is for the Target Port Group(type 5h) identifier
+ * This is for the Target identifier
*/
- desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
- | SVPD_ID_TYPE_TPORTGRP;
- desc3->length = 4;
- if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single)
- desc3->identifier[3] = 1;
+ if (port->target_devid) {
+ memcpy(desc, port->target_devid->data, port->target_devid->len);
+ }
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
+{
+ struct ctl_softc *softc = control_softc;
+ struct scsi_vpd_scsi_ports *sp;
+ struct scsi_vpd_port_designation *pd;
+ struct scsi_vpd_port_designation_cont *pdc;
+ struct ctl_lun *lun;
+ struct ctl_port *port;
+ int data_len, num_target_ports, iid_len, id_len, g, pg, p;
+ int num_target_port_groups, single;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ single = ctl_is_single;
+ if (single)
+ num_target_port_groups = 1;
else
- desc3->identifier[3] = 2;
+ num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+ num_target_ports = 0;
+ iid_len = 0;
+ id_len = 0;
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (lun != NULL &&
+ ctl_map_lun_back(port->targ_port, lun->lun) >=
+ CTL_MAX_LUNS)
+ continue;
+ num_target_ports++;
+ if (port->init_devid)
+ iid_len += port->init_devid->len;
+ if (port->port_devid)
+ id_len += port->port_devid->len;
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups *
+ num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
+ sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
+ ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+ sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ if (data_len < alloc_len) {
+ ctsio->residual = alloc_len - data_len;
+ ctsio->kern_data_len = data_len;
+ ctsio->kern_total_len = data_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
-#ifdef CTL_USE_BACKEND_SN
/*
- * If we've actually got a backend, copy the device id from the
- * per-LUN data. Otherwise, set it to all spaces.
+ * The control device is always connected. The disk device, on the
+ * other hand, may not be online all the time. Need to change this
+ * to figure out whether the disk device is actually online or not.
*/
- if (lun != NULL) {
- /*
- * Copy the backend's LUN ID.
- */
- strncpy((char *)t10id->vendor_spec_id,
- (char *)lun->be_lun->device_id, CTL_DEVID_LEN);
- } else {
- /*
- * No backend, set this to spaces.
- */
- memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN);
+ if (lun != NULL)
+ sp->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ else
+ sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+
+ sp->page_code = SVPD_SCSI_PORTS;
+ scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
+ sp->page_length);
+ pd = &sp->design[0];
+
+ mtx_lock(&softc->ctl_lock);
+ if (softc->flags & CTL_FLAG_MASTER_SHELF)
+ pg = 0;
+ else
+ pg = 1;
+ for (g = 0; g < num_target_port_groups; g++) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (lun != NULL &&
+ ctl_map_lun_back(port->targ_port, lun->lun) >=
+ CTL_MAX_LUNS)
+ continue;
+ p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
+ scsi_ulto2b(p, pd->relative_port_id);
+ if (port->init_devid && g == pg) {
+ iid_len = port->init_devid->len;
+ memcpy(pd->initiator_transportid,
+ port->init_devid->data, port->init_devid->len);
+ } else
+ iid_len = 0;
+ scsi_ulto2b(iid_len, pd->initiator_transportid_length);
+ pdc = (struct scsi_vpd_port_designation_cont *)
+ (&pd->initiator_transportid[iid_len]);
+ if (port->port_devid && g == pg) {
+ id_len = port->port_devid->len;
+ memcpy(pdc->target_port_descriptors,
+ port->port_devid->data, port->port_devid->len);
+ } else
+ id_len = 0;
+ scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
+ pd = (struct scsi_vpd_port_designation *)
+ ((uint8_t *)pdc->target_port_descriptors + id_len);
+ }
}
-#else
- snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d",
- (lun != NULL) ? (int)lun->lun : 0);
- strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN,
- sizeof(tmpstr)));
-#endif
+ mtx_unlock(&softc->ctl_lock);
ctsio->scsi_status = SCSI_STATUS_OK;
-
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9502,7 +10125,6 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
int bs;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- bs = lun->be_lun->blocksize;
ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
@@ -9534,15 +10156,20 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
bl_ptr->page_code = SVPD_BLOCK_LIMITS;
scsi_ulto2b(sizeof(*bl_ptr), bl_ptr->page_length);
+ bl_ptr->max_cmp_write_len = 0xff;
scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
- scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len);
- if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
- scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
- scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
+ if (lun != NULL) {
+ bs = lun->be_lun->blocksize;
+ scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len);
+ if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
+ scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
+ scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
+ }
}
scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length);
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9554,10 +10181,8 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
{
struct scsi_vpd_logical_block_prov *lbp_ptr;
struct ctl_lun *lun;
- int bs;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- bs = lun->be_lun->blocksize;
ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
@@ -9588,10 +10213,11 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
lbp_ptr->page_code = SVPD_LBP;
- if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
+ if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10;
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9622,6 +10248,12 @@ ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
case SVPD_DEVICE_ID:
retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
break;
+ case SVPD_SCSI_PORTS:
+ retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
+ break;
+ case SVPD_SCSI_TPC:
+ retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
+ break;
case SVPD_BLOCK_LIMITS:
retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
break;
@@ -9650,8 +10282,9 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
struct scsi_inquiry *cdb;
struct ctl_softc *ctl_softc;
struct ctl_lun *lun;
+ char *val;
uint32_t alloc_len;
- int is_fc;
+ ctl_port_type port_type;
ctl_softc = control_softc;
@@ -9660,13 +10293,10 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
* We treat the ioctl front end, and any SCSI adapters, as packetized
* SCSI front ends.
*/
- mtx_lock(&ctl_softc->ctl_lock);
- if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type !=
- CTL_PORT_FC)
- is_fc = 0;
- else
- is_fc = 1;
- mtx_unlock(&ctl_softc->ctl_lock);
+ port_type = ctl_softc->ctl_ports[
+ ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type;
+ if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
+ port_type = CTL_PORT_SCSI;
lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_inquiry *)ctsio->cdb;
@@ -9745,7 +10375,7 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
/* RMB in byte 2 is 0 */
- inq_ptr->version = SCSI_REV_SPC3;
+ inq_ptr->version = SCSI_REV_SPC4;
/*
* According to SAM-3, even if a device only supports a single
@@ -9770,17 +10400,18 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("additional_length = %d\n",
inq_ptr->additional_length));
- inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT;
+ inq_ptr->spc3_flags = SPC3_SID_3PC;
+ if (!ctl_is_single)
+ inq_ptr->spc3_flags |= SPC3_SID_TPGS_IMPLICIT;
/* 16 bit addressing */
- if (is_fc == 0)
+ if (port_type == CTL_PORT_SCSI)
inq_ptr->spc2_flags = SPC2_SID_ADDR16;
/* XXX set the SID_MultiP bit here if we're actually going to
respond on multiple ports */
inq_ptr->spc2_flags |= SPC2_SID_MultiP;
/* 16 bit data bus, synchronous transfers */
- /* XXX these flags don't apply for FC */
- if (is_fc == 0)
+ if (port_type == CTL_PORT_SCSI)
inq_ptr->flags = SID_WBus16 | SID_Sync;
/*
* XXX KDM do we want to support tagged queueing on the control
@@ -9794,10 +10425,17 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
* We have 8 bytes for the vendor name, and 16 bytes for the device
* name and 4 bytes for the revision.
*/
- strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
+ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
+ "vendor")) == NULL) {
+ strcpy(inq_ptr->vendor, CTL_VENDOR);
+ } else {
+ memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
+ strncpy(inq_ptr->vendor, val,
+ min(sizeof(inq_ptr->vendor), strlen(val)));
+ }
if (lun == NULL) {
strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
- } else {
+ } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
switch (lun->be_lun->lun_type) {
case T_DIRECT:
strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
@@ -9809,13 +10447,24 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT);
break;
}
+ } else {
+ memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
+ strncpy(inq_ptr->product, val,
+ min(sizeof(inq_ptr->product), strlen(val)));
}
/*
* XXX make this a macro somewhere so it automatically gets
* incremented when we make changes.
*/
- strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
+ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
+ "revision")) == NULL) {
+ strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
+ } else {
+ memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
+ strncpy(inq_ptr->revision, val,
+ min(sizeof(inq_ptr->revision), strlen(val)));
+ }
/*
* For parallel SCSI, we support double transition and single
@@ -9823,33 +10472,36 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
* and Selection) and Information Unit transfers on both the
* control and array devices.
*/
- if (is_fc == 0)
+ if (port_type == CTL_PORT_SCSI)
inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
SID_SPI_IUS;
- /* SAM-3 */
- scsi_ulto2b(0x0060, inq_ptr->version1);
- /* SPC-3 (no version claimed) XXX should we claim a version? */
- scsi_ulto2b(0x0300, inq_ptr->version2);
- if (is_fc) {
+ /* SAM-5 (no version claimed) */
+ scsi_ulto2b(0x00A0, inq_ptr->version1);
+ /* SPC-4 (no version claimed) */
+ scsi_ulto2b(0x0460, inq_ptr->version2);
+ if (port_type == CTL_PORT_FC) {
/* FCP-2 ANSI INCITS.350:2003 */
scsi_ulto2b(0x0917, inq_ptr->version3);
- } else {
+ } else if (port_type == CTL_PORT_SCSI) {
/* SPI-4 ANSI INCITS.362:200x */
scsi_ulto2b(0x0B56, inq_ptr->version3);
+ } else if (port_type == CTL_PORT_ISCSI) {
+ /* iSCSI (no version claimed) */
+ scsi_ulto2b(0x0960, inq_ptr->version3);
+ } else if (port_type == CTL_PORT_SAS) {
+ /* SAS (no version claimed) */
+ scsi_ulto2b(0x0BE0, inq_ptr->version3);
}
if (lun == NULL) {
- /* SBC-2 (no version claimed) XXX should we claim a version? */
- scsi_ulto2b(0x0320, inq_ptr->version4);
+ /* SBC-3 (no version claimed) */
+ scsi_ulto2b(0x04C0, inq_ptr->version4);
} else {
switch (lun->be_lun->lun_type) {
case T_DIRECT:
- /*
- * SBC-2 (no version claimed) XXX should we claim a
- * version?
- */
- scsi_ulto2b(0x0320, inq_ptr->version4);
+ /* SBC-3 (no version claimed) */
+ scsi_ulto2b(0x04C0, inq_ptr->version4);
break;
case T_PROCESSOR:
default:
@@ -9859,6 +10511,7 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
if (ctsio->kern_data_len > 0) {
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
} else {
@@ -9928,6 +10581,15 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
return (1);
switch (io->scsiio.cdb[0]) {
+ case COMPARE_AND_WRITE: {
+ struct scsi_compare_and_write *cdb;
+
+ cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
+
+ *lba = scsi_8btou64(cdb->addr);
+ *len = cdb->length;
+ break;
+ }
case READ_6:
case WRITE_6: {
struct scsi_rw_6 *cdb;
@@ -10016,6 +10678,33 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
*len = scsi_4btoul(cdb->length);
break;
}
+ case VERIFY_10: {
+ struct scsi_verify_10 *cdb;
+
+ cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_2btoul(cdb->length);
+ break;
+ }
+ case VERIFY_12: {
+ struct scsi_verify_12 *cdb;
+
+ cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
+ case VERIFY_16: {
+ struct scsi_verify_16 *cdb;
+
+ cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
+
+ *lba = scsi_8btou64(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
default:
return (1);
break; /* NOTREACHED */
@@ -10060,7 +10749,7 @@ ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
static ctl_action
ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
{
- struct ctl_cmd_entry *pending_entry, *ooa_entry;
+ const struct ctl_cmd_entry *pending_entry, *ooa_entry;
ctl_serialize_action *serialize_row;
/*
@@ -10133,8 +10822,8 @@ ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
|| (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
return (CTL_ACTION_BLOCK);
- pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]];
- ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]];
+ pending_entry = ctl_get_cmd_entry(&pending_io->scsiio);
+ ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio);
serialize_row = ctl_serialize_table[ooa_entry->seridx];
@@ -10173,7 +10862,7 @@ ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
union ctl_io *ooa_io;
ctl_action action;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
+ mtx_assert(&lun->lun_lock, MA_OWNED);
/*
* Run back along the OOA queue, starting with the current
@@ -10221,7 +10910,7 @@ ctl_check_blocked(struct ctl_lun *lun)
{
union ctl_io *cur_blocked, *next_blocked;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
+ mtx_assert(&lun->lun_lock, MA_OWNED);
/*
* Run forward from the head of the blocked queue, checking each
@@ -10264,9 +10953,8 @@ ctl_check_blocked(struct ctl_lun *lun)
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP: {
struct ctl_softc *softc;
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
uint32_t initidx;
- uint8_t opcode;
int isc_retval;
/*
@@ -10303,8 +10991,7 @@ ctl_check_blocked(struct ctl_lun *lun)
}
break;
}
- opcode = cur_blocked->scsiio.cdb[0];
- entry = &ctl_cmd_table[opcode];
+ entry = ctl_get_cmd_entry(&cur_blocked->scsiio);
softc = control_softc;
initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus);
@@ -10321,28 +11008,9 @@ ctl_check_blocked(struct ctl_lun *lun)
&cur_blocked->scsiio) == 0) {
cur_blocked->io_hdr.flags |=
CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue,
- &cur_blocked->io_hdr, links);
- /*
- * In the non CTL_DONE_THREAD case, we need
- * to wake up the work thread here. When
- * we're processing completed requests from
- * the work thread context, we'll pop back
- * around and end up pulling things off the
- * RtR queue. When we aren't processing
- * things from the work thread context,
- * though, we won't ever check the RtR queue.
- * So we need to wake up the thread to clear
- * things off the queue. Otherwise this
- * transaction will just sit on the RtR queue
- * until a new I/O comes in. (Which may or
- * may not happen...)
- */
-#ifndef CTL_DONE_THREAD
- ctl_wakeup_thread();
-#endif
+ ctl_enqueue_rtr(cur_blocked);
} else
- ctl_done_lock(cur_blocked, /*have_lock*/ 1);
+ ctl_done(cur_blocked);
break;
}
default:
@@ -10371,12 +11039,14 @@ ctl_check_blocked(struct ctl_lun *lun)
*/
static int
ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
- struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
+ const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
{
int retval;
retval = 0;
+ mtx_assert(&lun->lun_lock, MA_OWNED);
+
/*
* If this shelf is a secondary shelf controller, we have to reject
* any media access commands.
@@ -10468,7 +11138,7 @@ static void
ctl_failover_io(union ctl_io *io, int have_lock)
{
ctl_set_busy(&io->scsiio);
- ctl_done_lock(io, have_lock);
+ ctl_done(io);
}
static void
@@ -10492,6 +11162,8 @@ ctl_failover(void)
* We'll either abort them or delete them below, depending on
* which HA mode we're in.
*/
+#ifdef notyet
+ mtx_lock(&ctl_softc->queue_lock);
for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue);
io != NULL; io = next_io) {
next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
@@ -10499,6 +11171,8 @@ ctl_failover(void)
STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr,
ctl_io_hdr, links);
}
+ mtx_unlock(&ctl_softc->queue_lock);
+#endif
for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) {
lun = ctl_softc->ctl_luns[lun_idx];
@@ -10606,8 +11280,7 @@ ctl_failover(void)
CTL_FLAG_FAILOVER;
} else {
ctl_set_busy(&pending_io->scsiio);
- ctl_done_lock(pending_io,
- /*have_lock*/1);
+ ctl_done(pending_io);
}
}
@@ -10615,7 +11288,7 @@ ctl_failover(void)
* Build Unit Attention
*/
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_ASYM_ACC_CHANGE;
}
} else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
@@ -10639,8 +11312,7 @@ ctl_failover(void)
CTL_FLAG_IS_WAS_ON_RTR) == 0) {
pending_io->io_hdr.flags |=
CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
- &pending_io->io_hdr, links);
+ ctl_enqueue_rtr(pending_io);
}
#if 0
else
@@ -10683,22 +11355,18 @@ ctl_failover(void)
case CTL_ACTION_SKIP:
pending_io->io_hdr.flags |=
CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(
- &ctl_softc->rtr_queue,
- &pending_io->io_hdr, links);
+ ctl_enqueue_rtr(pending_io);
break;
case CTL_ACTION_OVERLAP:
ctl_set_overlapped_cmd(
(struct ctl_scsiio *)pending_io);
- ctl_done_lock(pending_io,
- /*have_lock*/ 1);
+ ctl_done(pending_io);
break;
case CTL_ACTION_OVERLAP_TAG:
ctl_set_overlapped_tag(
(struct ctl_scsiio *)pending_io,
pending_io->scsiio.tag_num & 0xff);
- ctl_done_lock(pending_io,
- /*have_lock*/ 1);
+ ctl_done(pending_io);
break;
case CTL_ACTION_ERROR:
default:
@@ -10706,8 +11374,7 @@ ctl_failover(void)
(struct ctl_scsiio *)pending_io,
0, // sks_valid
0); //retry count
- ctl_done_lock(pending_io,
- /*have_lock*/ 1);
+ ctl_done(pending_io);
break;
}
}
@@ -10716,7 +11383,7 @@ ctl_failover(void)
* Build Unit Attention
*/
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_ASYM_ACC_CHANGE;
}
} else {
@@ -10732,8 +11399,7 @@ static int
ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
- struct ctl_cmd_entry *entry;
- uint8_t opcode;
+ const struct ctl_cmd_entry *entry;
uint32_t initidx, targ_lun;
int retval;
@@ -10741,13 +11407,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
lun = NULL;
- opcode = ctsio->cdb[0];
-
- mtx_lock(&ctl_softc->ctl_lock);
-
- targ_lun = ctsio->io_hdr.nexus.targ_lun;
- if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
if ((targ_lun < CTL_MAX_LUNS)
&& (ctl_softc->ctl_luns[targ_lun] != NULL)) {
lun = ctl_softc->ctl_luns[targ_lun];
@@ -10765,13 +11425,27 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
if (lun->be_lun->lun_type == T_PROCESSOR) {
ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
}
+
+ /*
+ * Every I/O goes into the OOA queue for a
+ * particular LUN, and stays there until completion.
+ */
+ mtx_lock(&lun->lun_lock);
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
+ ooa_links);
}
} else {
ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
}
- entry = &ctl_cmd_table[opcode];
+ /* Get command entry and return error if it is unsuppotyed. */
+ entry = ctl_validate_command(ctsio);
+ if (entry == NULL) {
+ if (lun)
+ mtx_unlock(&lun->lun_lock);
+ return (retval);
+ }
ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
@@ -10784,66 +11458,42 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
* it on the rtr queue.
*/
if (lun == NULL) {
- if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
- goto queue_rtr;
+ if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) {
+ ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ ctl_enqueue_rtr((union ctl_io *)ctsio);
+ return (retval);
+ }
ctl_set_unsupported_lun(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
- goto bailout;
+ return (retval);
} else {
/*
- * Every I/O goes into the OOA queue for a particular LUN, and
- * stays there until completion.
- */
- TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
-
- /*
* Make sure we support this particular command on this LUN.
* e.g., we don't support writes to the control LUN.
*/
- switch (lun->be_lun->lun_type) {
- case T_PROCESSOR:
- if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
- && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
- == 0)) {
- ctl_set_invalid_opcode(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
- break;
- case T_DIRECT:
- if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0)
- && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
- == 0)){
- ctl_set_invalid_opcode(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
- break;
- default:
- printf("Unsupported CTL LUN type %d\n",
- lun->be_lun->lun_type);
- panic("Unsupported CTL LUN type %d\n",
- lun->be_lun->lun_type);
- break; /* NOTREACHED */
+ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
}
}
initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+#ifdef CTL_WITH_CA
/*
* If we've got a request sense, it'll clear the contingent
* allegiance condition. Otherwise, if we have a CA condition for
* this initiator, clear it, because it sent down a command other
* than request sense.
*/
- if ((opcode != REQUEST_SENSE)
+ if ((ctsio->cdb[0] != REQUEST_SENSE)
&& (ctl_is_set(lun->have_ca, initidx)))
ctl_clear_mask(lun->have_ca, initidx);
+#endif
/*
* If the command has this flag set, it handles its own unit
@@ -10870,7 +11520,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
ctl_ua_type ua_type;
- ua_type = lun->pending_sense[initidx].ua_pending;
+ ua_type = lun->pending_ua[initidx];
if (ua_type != CTL_UA_NONE) {
scsi_sense_data_type sense_format;
@@ -10888,20 +11538,19 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
ctsio->io_hdr.status = CTL_SCSI_ERROR |
CTL_AUTOSENSE;
ctsio->sense_len = SSD_FULL_SIZE;
- lun->pending_sense[initidx].ua_pending &=
- ~ua_type;
- mtx_unlock(&ctl_softc->ctl_lock);
+ lun->pending_ua[initidx] &= ~ua_type;
+ mtx_unlock(&lun->lun_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
+ return (retval);
}
}
}
if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
+ return (retval);
}
/*
@@ -10937,7 +11586,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
CTL_HA_STATUS_SUCCESS) {
printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
isc_retval);
- printf("CTL:opcode is %x\n",opcode);
+ printf("CTL:opcode is %x\n", ctsio->cdb[0]);
} else {
#if 0
printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
@@ -10951,7 +11600,8 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
* so that we have an idea of what we're waiting for from
* the other side.
*/
- goto bailout_unlock;
+ mtx_unlock(&lun->lun_lock);
+ return (retval);
}
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
@@ -10961,59 +11611,115 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
blocked_links);
- goto bailout_unlock;
- break; /* NOTREACHED */
+ mtx_unlock(&lun->lun_lock);
+ return (retval);
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP:
- goto queue_rtr;
- break; /* NOTREACHED */
+ ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ mtx_unlock(&lun->lun_lock);
+ ctl_enqueue_rtr((union ctl_io *)ctsio);
+ break;
case CTL_ACTION_OVERLAP:
+ mtx_unlock(&lun->lun_lock);
ctl_set_overlapped_cmd(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
- break; /* NOTREACHED */
+ break;
case CTL_ACTION_OVERLAP_TAG:
+ mtx_unlock(&lun->lun_lock);
ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
- break; /* NOTREACHED */
+ break;
case CTL_ACTION_ERROR:
default:
+ mtx_unlock(&lun->lun_lock);
ctl_set_internal_failure(ctsio,
/*sks_valid*/ 0,
/*retry_count*/ 0);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
- break; /* NOTREACHED */
+ break;
+ }
+ return (retval);
+}
+
+const struct ctl_cmd_entry *
+ctl_get_cmd_entry(struct ctl_scsiio *ctsio)
+{
+ const struct ctl_cmd_entry *entry;
+ int service_action;
+
+ entry = &ctl_cmd_table[ctsio->cdb[0]];
+ if (entry->flags & CTL_CMD_FLAG_SA5) {
+ service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
+ entry = &((const struct ctl_cmd_entry *)
+ entry->execute)[service_action];
}
+ return (entry);
+}
- goto bailout_unlock;
+const struct ctl_cmd_entry *
+ctl_validate_command(struct ctl_scsiio *ctsio)
+{
+ const struct ctl_cmd_entry *entry;
+ int i;
+ uint8_t diff;
-queue_rtr:
- ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links);
+ entry = ctl_get_cmd_entry(ctsio);
+ if (entry->execute == NULL) {
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (NULL);
+ }
+ KASSERT(entry->length > 0,
+ ("Not defined length for command 0x%02x/0x%02x",
+ ctsio->cdb[0], ctsio->cdb[1]));
+ for (i = 1; i < entry->length; i++) {
+ diff = ctsio->cdb[i] & ~entry->usage[i - 1];
+ if (diff == 0)
+ continue;
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ i,
+ /*bit_valid*/ 1,
+ /*bit*/ fls(diff) - 1);
+ ctl_done((union ctl_io *)ctsio);
+ return (NULL);
+ }
+ return (entry);
+}
-bailout_unlock:
- mtx_unlock(&ctl_softc->ctl_lock);
+static int
+ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
+{
-bailout:
- return (retval);
+ switch (lun_type) {
+ case T_PROCESSOR:
+ if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) &&
+ ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
+ return (0);
+ break;
+ case T_DIRECT:
+ if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) &&
+ ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
+ return (0);
+ break;
+ default:
+ return (0);
+ }
+ return (1);
}
static int
ctl_scsiio(struct ctl_scsiio *ctsio)
{
int retval;
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
retval = CTL_RETVAL_COMPLETE;
CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
- entry = &ctl_cmd_table[ctsio->cdb[0]];
+ entry = ctl_get_cmd_entry(ctsio);
/*
* If this I/O has been aborted, just send it straight to
@@ -11069,8 +11775,10 @@ ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
}
retval = 0;
+ mtx_lock(&ctl_softc->ctl_lock);
STAILQ_FOREACH(lun, &ctl_softc->lun_list, links)
retval += ctl_lun_reset(lun, io, ua_type);
+ mtx_unlock(&ctl_softc->ctl_lock);
return (retval);
}
@@ -11105,6 +11813,7 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
#endif
int i;
+ mtx_lock(&lun->lun_lock);
/*
* Run through the OOA queue and abort each I/O.
*/
@@ -11113,7 +11822,7 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
#endif
for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
- xio->io_hdr.flags |= CTL_FLAG_ABORT;
+ xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
}
/*
@@ -11124,7 +11833,7 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (initindex == i)
continue;
- lun->pending_sense[i].ua_pending |= ua_type;
+ lun->pending_ua[i] |= ua_type;
}
#endif
@@ -11139,10 +11848,116 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
lun->flags &= ~CTL_LUN_RESERVED;
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+#ifdef CTL_WITH_CA
ctl_clear_mask(lun->have_ca, i);
- lun->pending_sense[i].ua_pending |= ua_type;
+#endif
+ lun->pending_ua[i] |= ua_type;
+ }
+ mtx_unlock(&lun->lun_lock);
+
+ return (0);
+}
+
+static int
+ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
+ int other_sc)
+{
+ union ctl_io *xio;
+ int found;
+
+ mtx_assert(&lun->lun_lock, MA_OWNED);
+
+ /*
+ * Run through the OOA queue and attempt to find the given I/O.
+ * The target port, initiator ID, tag type and tag number have to
+ * match the values that we got from the initiator. If we have an
+ * untagged command to abort, simply abort the first untagged command
+ * we come to. We only allow one untagged command at a time of course.
+ */
+ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
+ xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
+
+ if ((targ_port == UINT32_MAX ||
+ targ_port == xio->io_hdr.nexus.targ_port) &&
+ (init_id == UINT32_MAX ||
+ init_id == xio->io_hdr.nexus.initid.id)) {
+ if (targ_port != xio->io_hdr.nexus.targ_port ||
+ init_id != xio->io_hdr.nexus.initid.id)
+ xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
+ xio->io_hdr.flags |= CTL_FLAG_ABORT;
+ found = 1;
+ if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
+ union ctl_ha_msg msg_info;
+
+ msg_info.hdr.nexus = xio->io_hdr.nexus;
+ msg_info.task.task_action = CTL_TASK_ABORT_TASK;
+ msg_info.task.tag_num = xio->scsiio.tag_num;
+ msg_info.task.tag_type = xio->scsiio.tag_type;
+ msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info, sizeof(msg_info), 0);
+ }
+ }
}
+ return (found);
+}
+static int
+ctl_abort_task_set(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_lun *lun;
+ uint32_t targ_lun;
+
+ /*
+ * Look up the LUN.
+ */
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+ mtx_lock(&softc->ctl_lock);
+ if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
+ lun = softc->ctl_luns[targ_lun];
+ else {
+ mtx_unlock(&softc->ctl_lock);
+ return (1);
+ }
+
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
+ ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.initid.id,
+ (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+ } else { /* CTL_TASK_CLEAR_TASK_SET */
+ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
+ (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+ }
+ mtx_unlock(&lun->lun_lock);
+ return (0);
+}
+
+static int
+ctl_i_t_nexus_reset(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_lun *lun;
+ uint32_t initindex;
+
+ initindex = ctl_get_initindex(&io->io_hdr.nexus);
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
+ ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.initid.id,
+ (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+#ifdef CTL_WITH_CA
+ ctl_clear_mask(lun->have_ca, initindex);
+#endif
+ lun->pending_ua[initindex] |= CTL_UA_I_T_NEXUS_LOSS;
+ mtx_unlock(&lun->lun_lock);
+ }
+ mtx_unlock(&softc->ctl_lock);
return (0);
}
@@ -11165,20 +11980,23 @@ ctl_abort_task(union ctl_io *io)
/*
* Look up the LUN.
*/
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+ mtx_lock(&ctl_softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS)
&& (ctl_softc->ctl_luns[targ_lun] != NULL))
lun = ctl_softc->ctl_luns[targ_lun];
- else
- goto bailout;
+ else {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (1);
+ }
#if 0
printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
lun->lun, io->taskio.tag_num, io->taskio.tag_type);
#endif
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&ctl_softc->ctl_lock);
/*
* Run through the OOA queue and attempt to find the given I/O.
* The target port, initiator ID, tag type and tag number have to
@@ -11270,8 +12088,7 @@ ctl_abort_task(union ctl_io *io)
}
}
}
-
-bailout:
+ mtx_unlock(&lun->lun_lock);
if (found == 0) {
/*
@@ -11288,166 +12105,123 @@ bailout:
io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
io->taskio.tag_type);
#endif
- return (1);
- } else
- return (0);
+ }
+ return (0);
}
-/*
- * This routine cannot block! It must be callable from an interrupt
- * handler as well as from the work thread.
- */
static void
-ctl_run_task_queue(struct ctl_softc *ctl_softc)
+ctl_run_task(union ctl_io *io)
{
- union ctl_io *io, *next_io;
-
- mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
-
- CTL_DEBUG_PRINT(("ctl_run_task_queue\n"));
-
- for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue);
- io != NULL; io = next_io) {
- int retval;
- const char *task_desc;
+ struct ctl_softc *ctl_softc = control_softc;
+ int retval = 1;
+ const char *task_desc;
- next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
+ CTL_DEBUG_PRINT(("ctl_run_task\n"));
- retval = 0;
+ KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
+ ("ctl_run_task: Unextected io_type %d\n",
+ io->io_hdr.io_type));
- switch (io->io_hdr.io_type) {
- case CTL_IO_TASK: {
- task_desc = ctl_scsi_task_string(&io->taskio);
- if (task_desc != NULL) {
+ task_desc = ctl_scsi_task_string(&io->taskio);
+ if (task_desc != NULL) {
#ifdef NEEDTOPORT
- csevent_log(CSC_CTL | CSC_SHELF_SW |
- CTL_TASK_REPORT,
- csevent_LogType_Trace,
- csevent_Severity_Information,
- csevent_AlertLevel_Green,
- csevent_FRU_Firmware,
- csevent_FRU_Unknown,
- "CTL: received task: %s",task_desc);
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TASK_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "CTL: received task: %s",task_desc);
#endif
- } else {
+ } else {
#ifdef NEEDTOPORT
- csevent_log(CSC_CTL | CSC_SHELF_SW |
- CTL_TASK_REPORT,
- csevent_LogType_Trace,
- csevent_Severity_Information,
- csevent_AlertLevel_Green,
- csevent_FRU_Firmware,
- csevent_FRU_Unknown,
- "CTL: received unknown task "
- "type: %d (%#x)",
- io->taskio.task_action,
- io->taskio.task_action);
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TASK_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "CTL: received unknown task "
+ "type: %d (%#x)",
+ io->taskio.task_action,
+ io->taskio.task_action);
#endif
- }
- switch (io->taskio.task_action) {
- case CTL_TASK_ABORT_TASK:
- retval = ctl_abort_task(io);
- break;
- case CTL_TASK_ABORT_TASK_SET:
- break;
- case CTL_TASK_CLEAR_ACA:
- break;
- case CTL_TASK_CLEAR_TASK_SET:
- break;
- case CTL_TASK_LUN_RESET: {
- struct ctl_lun *lun;
- uint32_t targ_lun;
- int retval;
-
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
-
- if ((targ_lun < CTL_MAX_LUNS)
- && (ctl_softc->ctl_luns[targ_lun] != NULL))
- lun = ctl_softc->ctl_luns[targ_lun];
- else {
- retval = 1;
- break;
- }
+ }
+ switch (io->taskio.task_action) {
+ case CTL_TASK_ABORT_TASK:
+ retval = ctl_abort_task(io);
+ break;
+ case CTL_TASK_ABORT_TASK_SET:
+ case CTL_TASK_CLEAR_TASK_SET:
+ retval = ctl_abort_task_set(io);
+ break;
+ case CTL_TASK_CLEAR_ACA:
+ break;
+ case CTL_TASK_I_T_NEXUS_RESET:
+ retval = ctl_i_t_nexus_reset(io);
+ break;
+ case CTL_TASK_LUN_RESET: {
+ struct ctl_lun *lun;
+ uint32_t targ_lun;
- if (!(io->io_hdr.flags &
- CTL_FLAG_FROM_OTHER_SC)) {
- union ctl_ha_msg msg_info;
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+ mtx_lock(&ctl_softc->ctl_lock);
+ if ((targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[targ_lun] != NULL))
+ lun = ctl_softc->ctl_luns[targ_lun];
+ else {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ retval = 1;
+ break;
+ }
- io->io_hdr.flags |=
- CTL_FLAG_SENT_2OTHER_SC;
- msg_info.hdr.msg_type =
- CTL_MSG_MANAGE_TASKS;
- msg_info.hdr.nexus = io->io_hdr.nexus;
- msg_info.task.task_action =
- CTL_TASK_LUN_RESET;
- msg_info.hdr.original_sc = NULL;
- msg_info.hdr.serializing_sc = NULL;
- if (CTL_HA_STATUS_SUCCESS !=
- ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info,
- sizeof(msg_info), 0)) {
- }
- }
+ if (!(io->io_hdr.flags &
+ CTL_FLAG_FROM_OTHER_SC)) {
+ union ctl_ha_msg msg_info;
- retval = ctl_lun_reset(lun, io,
- CTL_UA_LUN_RESET);
- break;
+ io->io_hdr.flags |=
+ CTL_FLAG_SENT_2OTHER_SC;
+ msg_info.hdr.msg_type =
+ CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.nexus = io->io_hdr.nexus;
+ msg_info.task.task_action =
+ CTL_TASK_LUN_RESET;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
+ if (CTL_HA_STATUS_SUCCESS !=
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info,
+ sizeof(msg_info), 0)) {
}
- case CTL_TASK_TARGET_RESET:
- retval = ctl_target_reset(ctl_softc, io,
- CTL_UA_TARG_RESET);
- break;
- case CTL_TASK_BUS_RESET:
- retval = ctl_bus_reset(ctl_softc, io);
- break;
- case CTL_TASK_PORT_LOGIN:
- break;
- case CTL_TASK_PORT_LOGOUT:
- break;
- default:
- printf("ctl_run_task_queue: got unknown task "
- "management event %d\n",
- io->taskio.task_action);
- break;
- }
- if (retval == 0)
- io->io_hdr.status = CTL_SUCCESS;
- else
- io->io_hdr.status = CTL_ERROR;
-
- STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
- ctl_io_hdr, links);
- /*
- * This will queue this I/O to the done queue, but the
- * work thread won't be able to process it until we
- * return and the lock is released.
- */
- ctl_done_lock(io, /*have_lock*/ 1);
- break;
}
- default: {
- printf("%s: invalid I/O type %d msg %d cdb %x"
- " iptl: %ju:%d:%ju:%d tag 0x%04x\n",
- __func__, io->io_hdr.io_type,
- io->io_hdr.msg_type, io->scsiio.cdb[0],
- (uintmax_t)io->io_hdr.nexus.initid.id,
- io->io_hdr.nexus.targ_port,
- (uintmax_t)io->io_hdr.nexus.targ_target.id,
- io->io_hdr.nexus.targ_lun /* XXX */,
- (io->io_hdr.io_type == CTL_IO_TASK) ?
- io->taskio.tag_num : io->scsiio.tag_num);
- STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
- ctl_io_hdr, links);
- ctl_free_io(io);
- break;
- }
- }
+ retval = ctl_lun_reset(lun, io,
+ CTL_UA_LUN_RESET);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ break;
}
-
- ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING;
+ case CTL_TASK_TARGET_RESET:
+ retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET);
+ break;
+ case CTL_TASK_BUS_RESET:
+ retval = ctl_bus_reset(ctl_softc, io);
+ break;
+ case CTL_TASK_PORT_LOGIN:
+ break;
+ case CTL_TASK_PORT_LOGOUT:
+ break;
+ default:
+ printf("ctl_run_task: got unknown task management event %d\n",
+ io->taskio.task_action);
+ break;
+ }
+ if (retval == 0)
+ io->io_hdr.status = CTL_SUCCESS;
+ else
+ io->io_hdr.status = CTL_ERROR;
+ ctl_done(io);
}
/*
@@ -11464,52 +12238,44 @@ ctl_handle_isc(union ctl_io *io)
ctl_softc = control_softc;
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
lun = ctl_softc->ctl_luns[targ_lun];
switch (io->io_hdr.msg_type) {
case CTL_MSG_SERIALIZE:
- free_io = ctl_serialize_other_sc_cmd(&io->scsiio,
- /*have_lock*/ 0);
+ free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
break;
case CTL_MSG_R2R: {
- uint8_t opcode;
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
/*
* This is only used in SER_ONLY mode.
*/
free_io = 0;
- opcode = io->scsiio.cdb[0];
- entry = &ctl_cmd_table[opcode];
- mtx_lock(&ctl_softc->ctl_lock);
+ entry = ctl_get_cmd_entry(&io->scsiio);
+ mtx_lock(&lun->lun_lock);
if (ctl_scsiio_lun_check(ctl_softc, lun,
entry, (struct ctl_scsiio *)io) != 0) {
- ctl_done_lock(io, /*have_lock*/ 1);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
+ ctl_done(io);
break;
}
io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
- &io->io_hdr, links);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
+ ctl_enqueue_rtr(io);
break;
}
case CTL_MSG_FINISH_IO:
if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
free_io = 0;
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_done(io);
} else {
free_io = 1;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
ooa_links);
- STAILQ_REMOVE(&ctl_softc->task_queue,
- &io->io_hdr, ctl_io_hdr, links);
ctl_check_blocked(lun);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
break;
case CTL_MSG_PERS_ACTION:
@@ -11519,7 +12285,7 @@ ctl_handle_isc(union ctl_io *io)
break;
case CTL_MSG_BAD_JUJU:
free_io = 0;
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_done(io);
break;
case CTL_MSG_DATAMOVE:
/* Only used in XFER mode */
@@ -11550,9 +12316,8 @@ ctl_handle_isc(union ctl_io *io)
static ctl_lun_error_pattern
ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
{
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
ctl_lun_error_pattern filtered_pattern, pattern;
- uint8_t opcode;
pattern = desc->error_pattern;
@@ -11567,8 +12332,7 @@ ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
return (CTL_LUN_PAT_ANY);
- opcode = ctsio->cdb[0];
- entry = &ctl_cmd_table[opcode];
+ entry = ctl_get_cmd_entry(ctsio);
filtered_pattern = entry->pattern & pattern;
@@ -11616,7 +12380,7 @@ ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
{
struct ctl_error_desc *desc, *desc2;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
+ mtx_assert(&lun->lun_lock, MA_OWNED);
STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
ctl_lun_error_pattern pattern;
@@ -11732,7 +12496,6 @@ ctl_datamove(union ctl_io *io)
}
#endif /* CTL_TIME_IO */
- mtx_lock(&control_softc->ctl_lock);
#ifdef CTL_IO_DELAY
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
struct ctl_lun *lun;
@@ -11757,31 +12520,10 @@ ctl_datamove(union ctl_io *io)
if (lun->delay_info.datamove_type ==
CTL_DELAY_TYPE_ONESHOT)
lun->delay_info.datamove_delay = 0;
- mtx_unlock(&control_softc->ctl_lock);
return;
}
}
#endif
- /*
- * If we have any pending task management commands, process them
- * first. This is necessary to eliminate a race condition with the
- * FETD:
- *
- * - FETD submits a task management command, like an abort.
- * - Back end calls fe_datamove() to move the data for the aborted
- * command. The FETD can't really accept it, but if it did, it
- * would end up transmitting data for a command that the initiator
- * told us to abort.
- *
- * We close the race by processing all pending task management
- * commands here (we can't block!), and then check this I/O to see
- * if it has been aborted. If so, return it to the back end with
- * bad status, so the back end can say return an error to the back end
- * and then when the back end returns an error, we can return the
- * aborted command to the FETD, so it can clean up its resources.
- */
- if (control_softc->flags & CTL_FLAG_TASK_PENDING)
- ctl_run_task_queue(control_softc);
/*
* This command has been aborted. Set the port status, so we fail
@@ -11793,9 +12535,7 @@ ctl_datamove(union ctl_io *io)
io->io_hdr.nexus.targ_port,
(uintmax_t)io->io_hdr.nexus.targ_target.id,
io->io_hdr.nexus.targ_lun);
- io->io_hdr.status = CTL_CMD_ABORTED;
io->io_hdr.port_status = 31337;
- mtx_unlock(&control_softc->ctl_lock);
/*
* Note that the backend, in this case, will get the
* callback in its context. In other cases it may get
@@ -11961,7 +12701,7 @@ ctl_datamove(union ctl_io *io)
}
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
- ctl_failover_io(io, /*have_lock*/ 1);
+ ctl_failover_io(io, /*have_lock*/ 0);
} else {
@@ -11971,7 +12711,6 @@ ctl_datamove(union ctl_io *io)
*/
fe_datamove =
control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
- mtx_unlock(&control_softc->ctl_lock);
fe_datamove(io);
}
@@ -12551,36 +13290,24 @@ ctl_datamove_remote(union ctl_io *io)
/*
* Note that we look for an aborted I/O here, but don't do some of
- * the other checks that ctl_datamove() normally does. We don't
- * need to run the task queue, because this I/O is on the ISC
- * queue, which is executed by the work thread after the task queue.
+ * the other checks that ctl_datamove() normally does.
* We don't need to run the datamove delay code, since that should
* have been done if need be on the other controller.
*/
- mtx_lock(&softc->ctl_lock);
-
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
-
printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.targ_target.id,
io->io_hdr.nexus.targ_lun);
- io->io_hdr.status = CTL_CMD_ABORTED;
io->io_hdr.port_status = 31338;
-
- mtx_unlock(&softc->ctl_lock);
-
ctl_send_datamove_done(io, /*have_lock*/ 0);
-
return;
}
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
- mtx_unlock(&softc->ctl_lock);
ctl_datamove_remote_write(io);
} else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
- mtx_unlock(&softc->ctl_lock);
ctl_datamove_remote_read(io);
} else {
union ctl_ha_msg msg;
@@ -12616,12 +13343,9 @@ ctl_datamove_remote(union ctl_io *io)
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
ctl_failover_io(io, /*have_lock*/ 1);
- mtx_unlock(&softc->ctl_lock);
return;
}
- mtx_unlock(&softc->ctl_lock);
-
if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
CTL_HA_STATUS_SUCCESS) {
/* XXX KDM what to do if this fails? */
@@ -12632,7 +13356,7 @@ ctl_datamove_remote(union ctl_io *io)
}
static int
-ctl_process_done(union ctl_io *io, int have_lock)
+ctl_process_done(union ctl_io *io)
{
struct ctl_lun *lun;
struct ctl_softc *ctl_softc;
@@ -12703,17 +13427,13 @@ ctl_process_done(union ctl_io *io, int have_lock)
lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
if (lun == NULL) {
CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
- io->io_hdr.nexus.targ_lun));
+ io->io_hdr.nexus.targ_mapped_lun));
fe_done(io);
goto bailout;
}
ctl_softc = lun->ctl_softc;
- /*
- * Remove this from the OOA queue.
- */
- if (have_lock == 0)
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* Check to see if we have any errors to inject here. We only
@@ -12729,134 +13449,39 @@ ctl_process_done(union ctl_io *io, int have_lock)
*
* XXX KDM should we also track I/O latency?
*/
- if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
- uint32_t blocksize;
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
+ io->io_hdr.io_type == CTL_IO_SCSI) {
#ifdef CTL_TIME_IO
struct bintime cur_bt;
#endif
+ int type;
- if ((lun->be_lun != NULL)
- && (lun->be_lun->blocksize != 0))
- blocksize = lun->be_lun->blocksize;
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN)
+ type = CTL_STATS_READ;
+ else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_OUT)
+ type = CTL_STATS_WRITE;
else
- blocksize = 512;
-
- switch (io->io_hdr.io_type) {
- case CTL_IO_SCSI: {
- int isread;
- struct ctl_lba_len lbalen;
-
- isread = 0;
- switch (io->scsiio.cdb[0]) {
- case READ_6:
- case READ_10:
- case READ_12:
- case READ_16:
- isread = 1;
- /* FALLTHROUGH */
- case WRITE_6:
- case WRITE_10:
- case WRITE_12:
- case WRITE_16:
- case WRITE_VERIFY_10:
- case WRITE_VERIFY_12:
- case WRITE_VERIFY_16:
- memcpy(&lbalen, io->io_hdr.ctl_private[
- CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen));
-
- if (isread) {
- lun->stats.ports[targ_port].bytes[CTL_STATS_READ] +=
- lbalen.len * blocksize;
- lun->stats.ports[targ_port].operations[CTL_STATS_READ]++;
+ type = CTL_STATS_NO_IO;
+ lun->stats.ports[targ_port].bytes[type] +=
+ io->scsiio.kern_total_len;
+ lun->stats.ports[targ_port].operations[type]++;
#ifdef CTL_TIME_IO
- bintime_add(
- &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ],
- &io->io_hdr.dma_bt);
- lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] +=
- io->io_hdr.num_dmas;
- getbintime(&cur_bt);
- bintime_sub(&cur_bt,
- &io->io_hdr.start_bt);
-
- bintime_add(
- &lun->stats.ports[targ_port].time[CTL_STATS_READ],
- &cur_bt);
-
-#if 0
- cs_prof_gettime(&cur_ticks);
- lun->stats.time[CTL_STATS_READ] +=
- cur_ticks -
- io->io_hdr.start_ticks;
-#endif
-#if 0
- lun->stats.time[CTL_STATS_READ] +=
- jiffies - io->io_hdr.start_time;
-#endif
-#endif /* CTL_TIME_IO */
- } else {
- lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] +=
- lbalen.len * blocksize;
- lun->stats.ports[targ_port].operations[
- CTL_STATS_WRITE]++;
-
-#ifdef CTL_TIME_IO
- bintime_add(
- &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE],
- &io->io_hdr.dma_bt);
- lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] +=
- io->io_hdr.num_dmas;
- getbintime(&cur_bt);
- bintime_sub(&cur_bt,
- &io->io_hdr.start_bt);
-
- bintime_add(
- &lun->stats.ports[targ_port].time[CTL_STATS_WRITE],
- &cur_bt);
-#if 0
- cs_prof_gettime(&cur_ticks);
- lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
- cur_ticks -
- io->io_hdr.start_ticks;
- lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
- jiffies - io->io_hdr.start_time;
+ bintime_add(&lun->stats.ports[targ_port].dma_time[type],
+ &io->io_hdr.dma_bt);
+ lun->stats.ports[targ_port].num_dmas[type] +=
+ io->io_hdr.num_dmas;
+ getbintime(&cur_bt);
+ bintime_sub(&cur_bt, &io->io_hdr.start_bt);
+ bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
#endif
-#endif /* CTL_TIME_IO */
- }
- break;
- default:
- lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++;
-
-#ifdef CTL_TIME_IO
- bintime_add(
- &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO],
- &io->io_hdr.dma_bt);
- lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] +=
- io->io_hdr.num_dmas;
- getbintime(&cur_bt);
- bintime_sub(&cur_bt, &io->io_hdr.start_bt);
-
- bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO],
- &cur_bt);
-
-#if 0
- cs_prof_gettime(&cur_ticks);
- lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
- cur_ticks -
- io->io_hdr.start_ticks;
- lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
- jiffies - io->io_hdr.start_time;
-#endif
-#endif /* CTL_TIME_IO */
- break;
- }
- break;
- }
- default:
- break;
- }
}
+ /*
+ * Remove this from the OOA queue.
+ */
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
/*
@@ -12870,8 +13495,13 @@ ctl_process_done(union ctl_io *io, int have_lock)
* left on its OOA queue.
*/
if ((lun->flags & CTL_LUN_INVALID)
- && (TAILQ_FIRST(&lun->ooa_queue) == NULL))
+ && TAILQ_EMPTY(&lun->ooa_queue)) {
+ mtx_unlock(&lun->lun_lock);
+ mtx_lock(&ctl_softc->ctl_lock);
ctl_free_lun(lun);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ } else
+ mtx_unlock(&lun->lun_lock);
/*
* If this command has been aborted, make sure we set the status
@@ -12879,7 +13509,7 @@ ctl_process_done(union ctl_io *io, int have_lock)
* whatever it needs to do to clean up its state.
*/
if (io->io_hdr.flags & CTL_FLAG_ABORT)
- io->io_hdr.status = CTL_CMD_ABORTED;
+ ctl_set_task_aborted(&io->scsiio);
/*
* We print out status for every task management command. For SCSI
@@ -12916,8 +13546,6 @@ ctl_process_done(union ctl_io *io, int have_lock)
if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){
ctl_softc->skipped_prints++;
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
} else {
uint32_t skipped_prints;
@@ -12926,8 +13554,6 @@ ctl_process_done(union ctl_io *io, int have_lock)
ctl_softc->skipped_prints = 0;
ctl_softc->last_print_jiffies = time_uptime;
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
if (skipped_prints > 0) {
#ifdef NEEDTOPORT
csevent_log(CSC_CTL | CSC_SHELF_SW |
@@ -12944,21 +13570,14 @@ ctl_process_done(union ctl_io *io, int have_lock)
if (bootverbose || verbose > 0)
ctl_io_error_print(io, NULL);
}
- } else {
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
}
break;
}
case CTL_IO_TASK:
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
if (bootverbose || verbose > 0)
ctl_io_error_print(io, NULL);
break;
default:
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
break;
}
@@ -13017,6 +13636,7 @@ bailout:
return (CTL_RETVAL_COMPLETE);
}
+#ifdef CTL_WITH_CA
/*
* Front end should call this if it doesn't do autosense. When the request
* sense comes back in from the initiator, we'll dequeue this and send it.
@@ -13046,8 +13666,7 @@ ctl_queue_sense(union ctl_io *io)
* information.
*/
targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = ctl_map_lun(io->io_hdr.nexus.targ_port, targ_lun);
if ((targ_lun < CTL_MAX_LUNS)
&& (ctl_softc->ctl_luns[targ_lun] != NULL))
lun = ctl_softc->ctl_luns[targ_lun];
@@ -13056,16 +13675,20 @@ ctl_queue_sense(union ctl_io *io)
initidx = ctl_get_initindex(&io->io_hdr.nexus);
+ mtx_lock(&lun->lun_lock);
/*
* Already have CA set for this LUN...toss the sense information.
*/
- if (ctl_is_set(lun->have_ca, initidx))
+ if (ctl_is_set(lun->have_ca, initidx)) {
+ mtx_unlock(&lun->lun_lock);
goto bailout;
+ }
- memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data,
- ctl_min(sizeof(lun->pending_sense[initidx].sense),
+ memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data,
+ ctl_min(sizeof(lun->pending_sense[initidx]),
sizeof(io->scsiio.sense_data)));
ctl_set_mask(lun->have_ca, initidx);
+ mtx_unlock(&lun->lun_lock);
bailout:
mtx_unlock(&ctl_softc->ctl_lock);
@@ -13074,6 +13697,7 @@ bailout:
return (CTL_RETVAL_COMPLETE);
}
+#endif
/*
* Primary command inlet from frontend ports. All SCSI and task I/O
@@ -13093,42 +13717,19 @@ ctl_queue(union ctl_io *io)
getbintime(&io->io_hdr.start_bt);
#endif /* CTL_TIME_IO */
- mtx_lock(&ctl_softc->ctl_lock);
+ /* Map FE-specific LUN ID into global one. */
+ io->io_hdr.nexus.targ_mapped_lun =
+ ctl_map_lun(io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun);
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
- STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr,
- links);
- break;
case CTL_IO_TASK:
- STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links);
- /*
- * Set the task pending flag. This is necessary to close a
- * race condition with the FETD:
- *
- * - FETD submits a task management command, like an abort.
- * - Back end calls fe_datamove() to move the data for the
- * aborted command. The FETD can't really accept it, but
- * if it did, it would end up transmitting data for a
- * command that the initiator told us to abort.
- *
- * We close the race condition by setting the flag here,
- * and checking it in ctl_datamove(), before calling the
- * FETD's fe_datamove routine. If we've got a task
- * pending, we run the task queue and then check to see
- * whether our particular I/O has been aborted.
- */
- ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
+ ctl_enqueue_incoming(io);
break;
default:
- mtx_unlock(&ctl_softc->ctl_lock);
printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
- return (-EINVAL);
- break; /* NOTREACHED */
+ return (EINVAL);
}
- mtx_unlock(&ctl_softc->ctl_lock);
-
- ctl_wakeup_thread();
return (CTL_RETVAL_COMPLETE);
}
@@ -13140,23 +13741,17 @@ ctl_done_timer_wakeup(void *arg)
union ctl_io *io;
io = (union ctl_io *)arg;
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_done(io);
}
#endif /* CTL_IO_DELAY */
void
-ctl_done_lock(union ctl_io *io, int have_lock)
+ctl_done(union ctl_io *io)
{
struct ctl_softc *ctl_softc;
-#ifndef CTL_DONE_THREAD
- union ctl_io *xio;
-#endif /* !CTL_DONE_THREAD */
ctl_softc = control_softc;
- if (have_lock == 0)
- mtx_lock(&ctl_softc->ctl_lock);
-
/*
* Enable this to catch duplicate completion issues.
*/
@@ -13187,11 +13782,8 @@ ctl_done_lock(union ctl_io *io, int have_lock)
* This is an internal copy of an I/O, and should not go through
* the normal done processing logic.
*/
- if (io->io_hdr.flags & CTL_FLAG_INT_COPY) {
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
+ if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
return;
- }
/*
* We need to send a msg to the serializing shelf to finish the IO
@@ -13236,38 +13828,12 @@ ctl_done_lock(union ctl_io *io, int have_lock)
ctl_done_timer_wakeup, io);
if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
lun->delay_info.done_delay = 0;
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
return;
}
}
#endif /* CTL_IO_DELAY */
- STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links);
-
-#ifdef CTL_DONE_THREAD
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
-
- ctl_wakeup_thread();
-#else /* CTL_DONE_THREAD */
- for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue);
- xio != NULL;
- xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) {
-
- STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links);
-
- ctl_process_done(xio, /*have_lock*/ 1);
- }
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
-#endif /* CTL_DONE_THREAD */
-}
-
-void
-ctl_done(union ctl_io *io)
-{
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_enqueue_done(io);
}
int
@@ -13291,24 +13857,18 @@ ctl_isc(struct ctl_scsiio *ctsio)
static void
ctl_work_thread(void *arg)
{
- struct ctl_softc *softc;
+ struct ctl_thread *thr = (struct ctl_thread *)arg;
+ struct ctl_softc *softc = thr->ctl_softc;
union ctl_io *io;
- struct ctl_be_lun *be_lun;
int retval;
CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
- softc = (struct ctl_softc *)arg;
- if (softc == NULL)
- return;
-
- mtx_lock(&softc->ctl_lock);
for (;;) {
retval = 0;
/*
* We handle the queues in this order:
- * - task management
* - ISC
* - done queue (to free up resources, unblock other commands)
* - RtR queue
@@ -13317,84 +13877,128 @@ ctl_work_thread(void *arg)
* If those queues are empty, we break out of the loop and
* go to sleep.
*/
- io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue);
+ mtx_lock(&thr->queue_lock);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
if (io != NULL) {
- ctl_run_task_queue(softc);
+ STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
+ mtx_unlock(&thr->queue_lock);
+ ctl_handle_isc(io);
continue;
}
- io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->isc_queue, links);
- ctl_handle_isc(io);
+ STAILQ_REMOVE_HEAD(&thr->done_queue, links);
+ /* clear any blocked commands, call fe_done */
+ mtx_unlock(&thr->queue_lock);
+ retval = ctl_process_done(io);
continue;
}
- io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->done_queue, links);
- /* clear any blocked commands, call fe_done */
- mtx_unlock(&softc->ctl_lock);
- /*
- * XXX KDM
- * Call this without a lock for now. This will
- * depend on whether there is any way the FETD can
- * sleep or deadlock if called with the CTL lock
- * held.
- */
- retval = ctl_process_done(io, /*have_lock*/ 0);
- mtx_lock(&softc->ctl_lock);
+ STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
+ mtx_unlock(&thr->queue_lock);
+ if (io->io_hdr.io_type == CTL_IO_TASK)
+ ctl_run_task(io);
+ else
+ ctl_scsiio_precheck(softc, &io->scsiio);
continue;
}
if (!ctl_pause_rtr) {
- io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->rtr_queue, links);
- mtx_unlock(&softc->ctl_lock);
+ STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
+ mtx_unlock(&thr->queue_lock);
retval = ctl_scsiio(&io->scsiio);
if (retval != CTL_RETVAL_COMPLETE)
CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
- mtx_lock(&softc->ctl_lock);
continue;
}
}
- io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue);
- if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->incoming_queue, links);
- mtx_unlock(&softc->ctl_lock);
- ctl_scsiio_precheck(softc, &io->scsiio);
- mtx_lock(&softc->ctl_lock);
- continue;
- }
- /*
- * We might want to move this to a separate thread, so that
- * configuration requests (in this case LUN creations)
- * won't impact the I/O path.
- */
+
+ /* Sleep until we have something to do. */
+ mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
+ }
+}
+
+static void
+ctl_lun_thread(void *arg)
+{
+ struct ctl_softc *softc = (struct ctl_softc *)arg;
+ struct ctl_be_lun *be_lun;
+ int retval;
+
+ CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
+
+ for (;;) {
+ retval = 0;
+ mtx_lock(&softc->ctl_lock);
be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
if (be_lun != NULL) {
STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
mtx_unlock(&softc->ctl_lock);
ctl_create_lun(be_lun);
- mtx_lock(&softc->ctl_lock);
continue;
}
- /* XXX KDM use the PDROP flag?? */
/* Sleep until we have something to do. */
- mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "-", 0);
-
- /* Back to the top of the loop to see what woke us up. */
- continue;
+ mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
+ PDROP | PRIBIO, "-", 0);
}
}
-void
-ctl_wakeup_thread()
+static void
+ctl_enqueue_incoming(union ctl_io *io)
{
- struct ctl_softc *softc;
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
+ u_int idx;
- softc = control_softc;
+ idx = (io->io_hdr.nexus.targ_port * 127 +
+ io->io_hdr.nexus.initid.id) % worker_threads;
+ thr = &softc->threads[idx];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
+}
+
+static void
+ctl_enqueue_rtr(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
+
+ thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
+}
+
+static void
+ctl_enqueue_done(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
+
+ thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
+}
+
+static void
+ctl_enqueue_isc(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
- wakeup_one(softc);
+ thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
}
/* Initialization and failover */