aboutsummaryrefslogtreecommitdiff
path: root/sys/cam
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cam')
-rw-r--r--sys/cam/ata/ata_da.c222
-rw-r--r--sys/cam/ata/ata_pmp.c9
-rw-r--r--sys/cam/cam.c1
-rw-r--r--sys/cam/cam_periph.c6
-rw-r--r--sys/cam/cam_xpt.c8
-rw-r--r--sys/cam/ctl/ctl.c4198
-rw-r--r--sys/cam/ctl/ctl.h71
-rw-r--r--sys/cam/ctl/ctl_backend.c49
-rw-r--r--sys/cam/ctl/ctl_backend.h8
-rw-r--r--sys/cam/ctl/ctl_backend_block.c599
-rw-r--r--sys/cam/ctl/ctl_backend_ramdisk.c77
-rw-r--r--sys/cam/ctl/ctl_cmd_table.c636
-rw-r--r--sys/cam/ctl/ctl_error.c17
-rw-r--r--sys/cam/ctl/ctl_error.h1
-rw-r--r--sys/cam/ctl/ctl_frontend.c206
-rw-r--r--sys/cam/ctl/ctl_frontend.h130
-rw-r--r--sys/cam/ctl/ctl_frontend_cam_sim.c136
-rw-r--r--sys/cam/ctl/ctl_frontend_internal.c97
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.c781
-rw-r--r--sys/cam/ctl/ctl_frontend_iscsi.h17
-rw-r--r--sys/cam/ctl/ctl_io.h11
-rw-r--r--sys/cam/ctl/ctl_ioctl.h44
-rw-r--r--sys/cam/ctl/ctl_private.h99
-rw-r--r--sys/cam/ctl/ctl_ser_table.c32
-rw-r--r--sys/cam/ctl/ctl_tpc.c1370
-rw-r--r--sys/cam/ctl/ctl_tpc.h38
-rw-r--r--sys/cam/ctl/ctl_tpc_local.c387
-rw-r--r--sys/cam/ctl/ctl_util.c1
-rw-r--r--sys/cam/ctl/scsi_ctl.c172
-rw-r--r--sys/cam/scsi/scsi_all.c1045
-rw-r--r--sys/cam/scsi/scsi_all.h825
-rw-r--r--sys/cam/scsi/scsi_cd.c9
-rw-r--r--sys/cam/scsi/scsi_da.c30
-rw-r--r--sys/cam/scsi/scsi_da.h43
-rw-r--r--sys/cam/scsi/scsi_enc_safte.c3
-rw-r--r--sys/cam/scsi/scsi_sa.c5
-rw-r--r--sys/cam/scsi/scsi_sg.c149
-rw-r--r--sys/cam/scsi/scsi_sg.h67
-rw-r--r--sys/cam/scsi/scsi_xpt.c3
39 files changed, 8238 insertions, 3364 deletions
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c
index c30be2d81132f..9c99e88140be8 100644
--- a/sys/cam/ata/ata_da.c
+++ b/sys/cam/ata/ata_da.c
@@ -546,30 +546,22 @@ static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RWTUN,
&ada_legacy_aliases, 0, "Create legacy-like device aliases");
-TUNABLE_INT("kern.cam.ada.legacy_aliases", &ada_legacy_aliases);
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&ada_retry_count, 0, "Normal I/O retry count");
-TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
&ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
-TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
&ada_send_ordered, 0, "Send Ordered Tags");
-TUNABLE_INT("kern.cam.ada.send_ordered", &ada_send_ordered);
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN,
&ada_spindown_shutdown, 0, "Spin down upon shutdown");
-TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN,
&ada_spindown_suspend, 0, "Spin down upon suspend");
-TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN,
&ada_read_ahead, 0, "Enable disk read-ahead");
-TUNABLE_INT("kern.cam.ada.read_ahead", &ada_read_ahead);
-SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN,
&ada_write_cache, 0, "Enable disk write cache");
-TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
/*
* ADA_ORDEREDTAG_INTERVAL determines how often, relative
@@ -727,12 +719,12 @@ adastrategy(struct bio *bp)
/*
* Place it in the queue of disk activities for this disk
*/
- if (bp->bio_cmd == BIO_DELETE &&
- (softc->flags & ADA_FLAG_CAN_TRIM)) {
- if (ADA_SIO)
- bioq_disksort(&softc->trim_queue, bp);
- else
- bioq_insert_tail(&softc->trim_queue, bp);
+ if (bp->bio_cmd == BIO_DELETE) {
+ KASSERT((softc->flags & ADA_FLAG_CAN_TRIM) ||
+ ((softc->flags & ADA_FLAG_CAN_CFA) &&
+ !(softc->flags & ADA_FLAG_CAN_48BIT)),
+ ("BIO_DELETE but no supported TRIM method."));
+ bioq_disksort(&softc->trim_queue, bp);
} else {
if (ADA_SIO)
bioq_disksort(&softc->bio_queue, bp);
@@ -1387,6 +1379,96 @@ adaregister(struct cam_periph *periph, void *arg)
}
static void
+ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
+{
+ struct trim_request *req = &softc->trim_req;
+ uint64_t lastlba = (uint64_t)-1;
+ int c, lastcount = 0, off, ranges = 0;
+
+ bzero(req, sizeof(*req));
+ TAILQ_INIT(&req->bps);
+ do {
+ uint64_t lba = bp->bio_pblkno;
+ int count = bp->bio_bcount / softc->params.secsize;
+
+ bioq_remove(&softc->trim_queue, bp);
+
+ /* Try to extend the previous range. */
+ if (lba == lastlba) {
+ c = min(count, ATA_DSM_RANGE_MAX - lastcount);
+ lastcount += c;
+ off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
+ req->data[off + 6] = lastcount & 0xff;
+ req->data[off + 7] =
+ (lastcount >> 8) & 0xff;
+ count -= c;
+ lba += c;
+ }
+
+ while (count > 0) {
+ c = min(count, ATA_DSM_RANGE_MAX);
+ off = ranges * ATA_DSM_RANGE_SIZE;
+ req->data[off + 0] = lba & 0xff;
+ req->data[off + 1] = (lba >> 8) & 0xff;
+ req->data[off + 2] = (lba >> 16) & 0xff;
+ req->data[off + 3] = (lba >> 24) & 0xff;
+ req->data[off + 4] = (lba >> 32) & 0xff;
+ req->data[off + 5] = (lba >> 40) & 0xff;
+ req->data[off + 6] = c & 0xff;
+ req->data[off + 7] = (c >> 8) & 0xff;
+ lba += c;
+ count -= c;
+ lastcount = c;
+ ranges++;
+ /*
+ * Its the caller's responsibility to ensure the
+ * request will fit so we don't need to check for
+ * overrun here
+ */
+ }
+ lastlba = lba;
+ TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
+ bp = bioq_first(&softc->trim_queue);
+ if (bp == NULL ||
+ bp->bio_bcount / softc->params.secsize >
+ (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX)
+ break;
+ } while (1);
+ cam_fill_ataio(ataio,
+ ada_retry_count,
+ adadone,
+ CAM_DIR_OUT,
+ 0,
+ req->data,
+ ((ranges + ATA_DSM_BLK_RANGES - 1) /
+ ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
+ ada_default_timeout * 1000);
+ ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
+ ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
+ 1) / ATA_DSM_BLK_RANGES);
+}
+
+static void
+ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
+{
+ uint64_t lba = bp->bio_pblkno;
+ uint16_t count = bp->bio_bcount / softc->params.secsize;
+
+ cam_fill_ataio(ataio,
+ ada_retry_count,
+ adadone,
+ CAM_DIR_NONE,
+ 0,
+ NULL,
+ 0,
+ ada_default_timeout*1000);
+
+ if (count >= 256)
+ count = 0;
+ ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
+}
+
+static void
adastart(struct cam_periph *periph, union ccb *start_ccb)
{
struct ada_softc *softc = (struct ada_softc *)periph->softc;
@@ -1403,76 +1485,15 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
/* Run TRIM if not running yet. */
if (!softc->trim_running &&
(bp = bioq_first(&softc->trim_queue)) != 0) {
- struct trim_request *req = &softc->trim_req;
- struct bio *bp1;
- uint64_t lastlba = (uint64_t)-1;
- int c, lastcount = 0, off, ranges = 0;
-
+ if (softc->flags & ADA_FLAG_CAN_TRIM) {
+ ada_dsmtrim(softc, bp, ataio);
+ } else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
+ !(softc->flags & ADA_FLAG_CAN_48BIT)) {
+ ada_cfaerase(softc, bp, ataio);
+ } else {
+ panic("adastart: BIO_DELETE without method, not possible.");
+ }
softc->trim_running = 1;
- bzero(req, sizeof(*req));
- TAILQ_INIT(&req->bps);
- bp1 = bp;
- do {
- uint64_t lba = bp1->bio_pblkno;
- int count = bp1->bio_bcount /
- softc->params.secsize;
-
- bioq_remove(&softc->trim_queue, bp1);
-
- /* Try to extend the previous range. */
- if (lba == lastlba) {
- c = min(count, ATA_DSM_RANGE_MAX - lastcount);
- lastcount += c;
- off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
- req->data[off + 6] = lastcount & 0xff;
- req->data[off + 7] =
- (lastcount >> 8) & 0xff;
- count -= c;
- lba += c;
- }
-
- while (count > 0) {
- c = min(count, ATA_DSM_RANGE_MAX);
- off = ranges * ATA_DSM_RANGE_SIZE;
- req->data[off + 0] = lba & 0xff;
- req->data[off + 1] = (lba >> 8) & 0xff;
- req->data[off + 2] = (lba >> 16) & 0xff;
- req->data[off + 3] = (lba >> 24) & 0xff;
- req->data[off + 4] = (lba >> 32) & 0xff;
- req->data[off + 5] = (lba >> 40) & 0xff;
- req->data[off + 6] = c & 0xff;
- req->data[off + 7] = (c >> 8) & 0xff;
- lba += c;
- count -= c;
- lastcount = c;
- ranges++;
- /*
- * Its the caller's responsibility to ensure the
- * request will fit so we don't need to check for
- * overrun here
- */
- }
- lastlba = lba;
- TAILQ_INSERT_TAIL(&req->bps, bp1, bio_queue);
- bp1 = bioq_first(&softc->trim_queue);
- if (bp1 == NULL ||
- bp1->bio_bcount / softc->params.secsize >
- (softc->trim_max_ranges - ranges) *
- ATA_DSM_RANGE_MAX)
- break;
- } while (1);
- cam_fill_ataio(ataio,
- ada_retry_count,
- adadone,
- CAM_DIR_OUT,
- 0,
- req->data,
- ((ranges + ATA_DSM_BLK_RANGES - 1) /
- ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
- ada_default_timeout * 1000);
- ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
- ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
- 1) / ATA_DSM_BLK_RANGES);
start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
goto out;
@@ -1607,25 +1628,6 @@ adastart(struct cam_periph *periph, union ccb *start_ccb)
}
break;
}
- case BIO_DELETE:
- {
- uint64_t lba = bp->bio_pblkno;
- uint16_t count = bp->bio_bcount / softc->params.secsize;
-
- cam_fill_ataio(ataio,
- ada_retry_count,
- adadone,
- CAM_DIR_NONE,
- 0,
- NULL,
- 0,
- ada_default_timeout*1000);
-
- if (count >= 256)
- count = 0;
- ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
- break;
- }
case BIO_FLUSH:
cam_fill_ataio(ataio,
1,
diff --git a/sys/cam/ata/ata_pmp.c b/sys/cam/ata/ata_pmp.c
index bd4d25a67defd..fab0e6f1f5743 100644
--- a/sys/cam/ata/ata_pmp.c
+++ b/sys/cam/ata/ata_pmp.c
@@ -139,15 +139,12 @@ static int pmp_hide_special = PMP_DEFAULT_HIDE_SPECIAL;
static SYSCTL_NODE(_kern_cam, OID_AUTO, pmp, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
-SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&pmp_retry_count, 0, "Normal I/O retry count");
-TUNABLE_INT("kern.cam.pmp.retry_count", &pmp_retry_count);
-SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
&pmp_default_timeout, 0, "Normal I/O timeout (in seconds)");
-TUNABLE_INT("kern.cam.pmp.default_timeout", &pmp_default_timeout);
-SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RWTUN,
&pmp_hide_special, 0, "Hide extra ports");
-TUNABLE_INT("kern.cam.pmp.hide_special", &pmp_hide_special);
static struct periph_driver pmpdriver =
{
diff --git a/sys/cam/cam.c b/sys/cam/cam.c
index f608d6f887290..939dd76c30689 100644
--- a/sys/cam/cam.c
+++ b/sys/cam/cam.c
@@ -116,7 +116,6 @@ SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem");
#endif
int cam_sort_io_queues = CAM_DEFAULT_SORT_IO_QUEUES;
-TUNABLE_INT("kern.cam.sort_io_queues", &cam_sort_io_queues);
SYSCTL_INT(_kern_cam, OID_AUTO, sort_io_queues, CTLFLAG_RWTUN,
&cam_sort_io_queues, 0, "Sort IO queues to try and optimise disk access patterns");
#endif
diff --git a/sys/cam/cam_periph.c b/sys/cam/cam_periph.c
index 1628ab0d1f5bb..2e23996966a83 100644
--- a/sys/cam/cam_periph.c
+++ b/sys/cam/cam_periph.c
@@ -597,7 +597,7 @@ cam_periph_invalidate(struct cam_periph *periph)
return;
CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
- if (periph->flags & CAM_PERIPH_ANNOUNCED)
+ if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
xpt_denounce_periph(periph);
periph->flags |= CAM_PERIPH_INVALID;
periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
@@ -663,9 +663,9 @@ camperiphfree(struct cam_periph *periph)
xpt_remove_periph(periph);
xpt_unlock_buses();
- if (periph->flags & CAM_PERIPH_ANNOUNCED) {
+ if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
xpt_print(periph->path, "Periph destroyed\n");
- } else
+ else
CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index bfad6dd95cbd6..4a8f14b0f7324 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -149,7 +149,6 @@ typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
/* Transport layer configuration information */
static struct xpt_softc xsoftc;
-TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
&xsoftc.boot_delay, 0, "Bus registration wait time");
@@ -163,7 +162,6 @@ static struct cam_doneq cam_doneqs[MAXCPU];
static int cam_num_doneqs;
static struct proc *cam_proc;
-TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
&cam_num_doneqs, 0, "Number of completion queues/threads");
@@ -197,12 +195,10 @@ static struct cdevsw xpt_cdevsw = {
/* Storage for debugging datastructures */
struct cam_path *cam_dpath;
u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
-TUNABLE_INT("kern.cam.dflags", &cam_dflags);
-SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
+SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
&cam_dflags, 0, "Enabled debug flags");
u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
-TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
-SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
+SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
&cam_debug_delay, 0, "Delay in us after each debug message");
/* Our boot-time initialization hook */
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 1e9a952eaedb1..f3dbc183bf50d 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -83,20 +83,6 @@ __FBSDID("$FreeBSD$");
struct ctl_softc *control_softc = NULL;
/*
- * The default is to run with CTL_DONE_THREAD turned on. Completed
- * transactions are queued for processing by the CTL work thread. When
- * CTL_DONE_THREAD is not defined, completed transactions are processed in
- * the caller's context.
- */
-#define CTL_DONE_THREAD
-
-/*
- * Use the serial number and device ID provided by the backend, rather than
- * making up our own.
- */
-#define CTL_USE_BACKEND_SN
-
-/*
* Size and alignment macros needed for Copan-specific HA hardware. These
* can go away when the HA code is re-written, and uses busdma for any
* hardware.
@@ -296,8 +282,10 @@ static struct scsi_control_page control_page_default = {
/*rlec*/0,
/*queue_flags*/0,
/*eca_and_aen*/0,
- /*reserved*/0,
- /*aen_holdoff_period*/{0, 0}
+ /*flags4*/SCP_TAS,
+ /*aen_holdoff_period*/{0, 0},
+ /*busy_timeout_period*/{0, 0},
+ /*extended_selftest_completion_time*/{0, 0}
};
static struct scsi_control_page control_page_changeable = {
@@ -306,8 +294,10 @@ static struct scsi_control_page control_page_changeable = {
/*rlec*/SCP_DSENSE,
/*queue_flags*/0,
/*eca_and_aen*/0,
- /*reserved*/0,
- /*aen_holdoff_period*/{0, 0}
+ /*flags4*/0,
+ /*aen_holdoff_period*/{0, 0},
+ /*busy_timeout_period*/{0, 0},
+ /*extended_selftest_completion_time*/{0, 0}
};
@@ -321,20 +311,19 @@ static int ctl_is_single = 1;
static int index_to_aps_page;
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
-static int worker_threads = 1;
-TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
+static int worker_threads = -1;
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
&worker_threads, 1, "Number of worker threads");
static int verbose = 0;
-TUNABLE_INT("kern.cam.ctl.verbose", &verbose);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN,
&verbose, 0, "Show SCSI errors returned to initiator");
/*
- * Serial number (0x80), device id (0x83), supported pages (0x00),
- * Block limits (0xB0) and Logical Block Provisioning (0xB2)
+ * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
+ * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0) and
+ * Logical Block Provisioning (0xB2)
*/
-#define SCSI_EVPD_NUM_SUPPORTED_PAGES 5
+#define SCSI_EVPD_NUM_SUPPORTED_PAGES 7
static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
int param);
@@ -345,12 +334,10 @@ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
static void ctl_ioctl_online(void *arg);
static void ctl_ioctl_offline(void *arg);
-static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id);
-static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id);
static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
-static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock);
+static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
static int ctl_ioctl_submit_wait(union ctl_io *io);
static void ctl_ioctl_datamove(union ctl_io *io);
static void ctl_ioctl_done(union ctl_io *io);
@@ -362,8 +349,8 @@ static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
struct ctl_ooa_entry *kern_entries);
static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td);
-uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
-uint32_t ctl_port_idx(int port_num);
+static uint32_t ctl_map_lun(int port_num, uint32_t lun);
+static uint32_t ctl_map_lun_back(int port_num, uint32_t lun);
#ifdef unused
static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
uint32_t targ_target, uint32_t targ_lun,
@@ -392,6 +379,8 @@ static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
+static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
+ int alloc_len);
static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
int alloc_len);
static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
@@ -406,7 +395,7 @@ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
static int ctl_check_blocked(struct ctl_lun *lun);
static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc,
struct ctl_lun *lun,
- struct ctl_cmd_entry *entry,
+ const struct ctl_cmd_entry *entry,
struct ctl_scsiio *ctsio);
//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
static void ctl_failover(void);
@@ -420,7 +409,9 @@ static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
ctl_ua_type ua_type);
static int ctl_abort_task(union ctl_io *io);
-static void ctl_run_task_queue(struct ctl_softc *ctl_softc);
+static int ctl_abort_task_set(union ctl_io *io);
+static int ctl_i_t_nexus_reset(union ctl_io *io);
+static void ctl_run_task(union ctl_io *io);
#ifdef CTL_IO_DELAY
static void ctl_datamove_timer_wakeup(void *arg);
static void ctl_done_timer_wakeup(void *arg);
@@ -437,8 +428,19 @@ static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
ctl_ha_dt_cb callback);
static void ctl_datamove_remote_read(union ctl_io *io);
static void ctl_datamove_remote(union ctl_io *io);
-static int ctl_process_done(union ctl_io *io, int have_lock);
+static int ctl_process_done(union ctl_io *io);
+static void ctl_lun_thread(void *arg);
static void ctl_work_thread(void *arg);
+static void ctl_enqueue_incoming(union ctl_io *io);
+static void ctl_enqueue_rtr(union ctl_io *io);
+static void ctl_enqueue_done(union ctl_io *io);
+static void ctl_enqueue_isc(union ctl_io *io);
+static const struct ctl_cmd_entry *
+ ctl_get_cmd_entry(struct ctl_scsiio *ctsio);
+static const struct ctl_cmd_entry *
+ ctl_validate_command(struct ctl_scsiio *ctsio);
+static int ctl_cmd_applicable(uint8_t lun_type,
+ const struct ctl_cmd_entry *entry);
/*
* Load the serialization table. This isn't very pretty, but is probably
@@ -460,6 +462,7 @@ static struct cdevsw ctl_cdevsw = {
MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
+MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests");
static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
@@ -472,6 +475,11 @@ static moduledata_t ctl_moduledata = {
DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
MODULE_VERSION(ctl, 1);
+static struct ctl_frontend ioctl_frontend =
+{
+ .name = "ioctl",
+};
+
static void
ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
union ctl_ha_msg *msg_info)
@@ -496,8 +504,7 @@ ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
sizeof(ctsio->sense_data));
memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
&msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc((union ctl_io *)ctsio);
}
static void
@@ -543,8 +550,7 @@ ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
}
#endif
ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc((union ctl_io *)ctsio);
}
/*
@@ -579,7 +585,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
isc_status);
return;
}
- mtx_lock(&ctl_softc->ctl_lock);
switch (msg_info.hdr.msg_type) {
case CTL_MSG_SERIALIZE:
@@ -592,7 +597,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
"ctl_io!\n");
/* Bad Juju */
/* Need to set busy and send msg back */
- mtx_unlock(&ctl_softc->ctl_lock);
msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
msg_info.hdr.status = CTL_SCSI_ERROR;
msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
@@ -634,18 +638,14 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
CTL_MAX_CDBLEN);
if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
- struct ctl_cmd_entry *entry;
- uint8_t opcode;
+ const struct ctl_cmd_entry *entry;
- opcode = io->scsiio.cdb[0];
- entry = &ctl_cmd_table[opcode];
+ entry = ctl_get_cmd_entry(&io->scsiio);
io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
io->io_hdr.flags |=
entry->flags & CTL_FLAG_DATA_MASK;
}
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
/* Performed on the Originating SC, XFER mode only */
@@ -749,11 +749,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
* the full S/G list. Queue processing in the thread.
* Otherwise wait for the next piece.
*/
- if (msg_info.dt.sg_last != 0) {
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
- }
+ if (msg_info.dt.sg_last != 0)
+ ctl_enqueue_isc(io);
break;
}
/* Performed on the Serializing (primary) SC, XFER mode only */
@@ -779,10 +776,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
io->scsiio.residual = msg_info.scsi.residual;
memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
sizeof(io->scsiio.sense_data));
-
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
}
@@ -791,7 +785,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
io = msg_info.hdr.original_sc;
if (io == NULL) {
printf("%s: Major Bummer\n", __func__);
- mtx_unlock(&ctl_softc->ctl_lock);
return;
} else {
#if 0
@@ -800,9 +793,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
}
io->io_hdr.msg_type = CTL_MSG_R2R;
io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
/*
@@ -839,9 +830,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
/* io = msg_info.hdr.serializing_sc; */
io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &io->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc(io);
break;
/* Handle resets sent from the other side */
@@ -855,7 +844,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
/* Bad Juju */
/* should I just call the proper reset func
here??? */
- mtx_unlock(&ctl_softc->ctl_lock);
goto bailout;
}
ctl_zero_io((union ctl_io *)taskio);
@@ -872,10 +860,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
cs_prof_gettime(&taskio->io_hdr.start_ticks);
#endif
#endif /* CTL_TIME_IO */
- STAILQ_INSERT_TAIL(&ctl_softc->task_queue,
- &taskio->io_hdr, links);
- ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
- ctl_wakeup_thread();
+ ctl_run_task((union ctl_io *)taskio);
break;
}
/* Persistent Reserve action which needs attention */
@@ -887,15 +872,12 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
"ctl_io!\n");
/* Bad Juju */
/* Need to set busy and send msg back */
- mtx_unlock(&ctl_softc->ctl_lock);
goto bailout;
}
ctl_zero_io((union ctl_io *)presio);
presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
presio->pr_msg = msg_info.pr;
- STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
- &presio->io_hdr, links);
- ctl_wakeup_thread();
+ ctl_enqueue_isc((union ctl_io *)presio);
break;
case CTL_MSG_SYNC_FE:
rcv_sync_msg = 1;
@@ -908,23 +890,21 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
struct copan_aps_subpage *current_sp;
uint32_t targ_lun;
- targ_lun = msg_info.hdr.nexus.targ_lun;
- if (msg_info.hdr.nexus.lun_map_fn != NULL)
- targ_lun = msg_info.hdr.nexus.lun_map_fn(msg_info.hdr.nexus.lun_map_arg, targ_lun);
-
+ targ_lun = msg_info.hdr.nexus.targ_mapped_lun;
lun = ctl_softc->ctl_luns[targ_lun];
+ mtx_lock(&lun->lun_lock);
page_index = &lun->mode_pages.index[index_to_aps_page];
current_sp = (struct copan_aps_subpage *)
(page_index->page_data +
(page_index->page_len * CTL_PAGE_CURRENT));
current_sp->lock_active = msg_info.aps.lock_flag;
+ mtx_unlock(&lun->lun_lock);
break;
}
default:
printf("How did I get here?\n");
}
- mtx_unlock(&ctl_softc->ctl_lock);
} else if (event == CTL_HA_EVT_MSG_SENT) {
if (param != CTL_HA_STATUS_SUCCESS) {
printf("Bad status from ctl_ha_msg_send status %d\n",
@@ -960,8 +940,7 @@ ctl_init(void)
{
struct ctl_softc *softc;
struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
- struct ctl_frontend *fe;
- struct ctl_lun *lun;
+ struct ctl_port *port;
uint8_t sc_id =0;
int i, error, retval;
//int isc_retval;
@@ -1040,22 +1019,11 @@ ctl_init(void)
softc->target.wwid[1] = 0x87654321;
STAILQ_INIT(&softc->lun_list);
STAILQ_INIT(&softc->pending_lun_queue);
- STAILQ_INIT(&softc->task_queue);
- STAILQ_INIT(&softc->incoming_queue);
- STAILQ_INIT(&softc->rtr_queue);
- STAILQ_INIT(&softc->done_queue);
- STAILQ_INIT(&softc->isc_queue);
STAILQ_INIT(&softc->fe_list);
+ STAILQ_INIT(&softc->port_list);
STAILQ_INIT(&softc->be_list);
STAILQ_INIT(&softc->io_pools);
- lun = &softc->lun;
-
- /*
- * We don't bother calling these with ctl_lock held here, because,
- * in theory, no one else can try to do anything while we're in our
- * module init routine.
- */
if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
&internal_pool)!= 0){
printf("ctl: can't allocate %d entry internal pool, "
@@ -1085,76 +1053,65 @@ ctl_init(void)
softc->emergency_pool = emergency_pool;
softc->othersc_pool = other_pool;
- /*
- * We used to allocate a processor LUN here. The new scheme is to
- * just let the user allocate LUNs as he sees fit.
- */
-#if 0
- mtx_lock(&softc->ctl_lock);
- ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target);
- mtx_unlock(&softc->ctl_lock);
-#endif
-
- if (worker_threads > MAXCPU || worker_threads == 0) {
- printf("invalid kern.cam.ctl.worker_threads value; "
- "setting to 1");
- worker_threads = 1;
- } else if (worker_threads < 0) {
- if (mp_ncpus > 2) {
- /*
- * Using more than two worker threads actually hurts
- * performance due to lock contention.
- */
- worker_threads = 2;
- } else {
- worker_threads = 1;
- }
- }
+ if (worker_threads <= 0)
+ worker_threads = max(1, mp_ncpus / 4);
+ if (worker_threads > CTL_MAX_THREADS)
+ worker_threads = CTL_MAX_THREADS;
for (i = 0; i < worker_threads; i++) {
- error = kproc_kthread_add(ctl_work_thread, softc,
- &softc->work_thread, NULL, 0, 0, "ctl", "work%d", i);
+ struct ctl_thread *thr = &softc->threads[i];
+
+ mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
+ thr->ctl_softc = softc;
+ STAILQ_INIT(&thr->incoming_queue);
+ STAILQ_INIT(&thr->rtr_queue);
+ STAILQ_INIT(&thr->done_queue);
+ STAILQ_INIT(&thr->isc_queue);
+
+ error = kproc_kthread_add(ctl_work_thread, thr,
+ &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
if (error != 0) {
printf("error creating CTL work thread!\n");
- mtx_lock(&softc->ctl_lock);
- ctl_free_lun(lun);
- mtx_unlock(&softc->ctl_lock);
ctl_pool_free(internal_pool);
ctl_pool_free(emergency_pool);
ctl_pool_free(other_pool);
return (error);
}
}
+ error = kproc_kthread_add(ctl_lun_thread, softc,
+ &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
+ if (error != 0) {
+ printf("error creating CTL lun thread!\n");
+ ctl_pool_free(internal_pool);
+ ctl_pool_free(emergency_pool);
+ ctl_pool_free(other_pool);
+ return (error);
+ }
if (bootverbose)
printf("ctl: CAM Target Layer loaded\n");
/*
- * Initialize the initiator and portname mappings
- */
- memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid));
-
- /*
* Initialize the ioctl front end.
*/
- fe = &softc->ioctl_info.fe;
- sprintf(softc->ioctl_info.port_name, "CTL ioctl");
- fe->port_type = CTL_PORT_IOCTL;
- fe->num_requested_ctl_io = 100;
- fe->port_name = softc->ioctl_info.port_name;
- fe->port_online = ctl_ioctl_online;
- fe->port_offline = ctl_ioctl_offline;
- fe->onoff_arg = &softc->ioctl_info;
- fe->targ_enable = ctl_ioctl_targ_enable;
- fe->targ_disable = ctl_ioctl_targ_disable;
- fe->lun_enable = ctl_ioctl_lun_enable;
- fe->lun_disable = ctl_ioctl_lun_disable;
- fe->targ_lun_arg = &softc->ioctl_info;
- fe->fe_datamove = ctl_ioctl_datamove;
- fe->fe_done = ctl_ioctl_done;
- fe->max_targets = 15;
- fe->max_target_id = 15;
+ ctl_frontend_register(&ioctl_frontend);
+ port = &softc->ioctl_info.port;
+ port->frontend = &ioctl_frontend;
+ sprintf(softc->ioctl_info.port_name, "ioctl");
+ port->port_type = CTL_PORT_IOCTL;
+ port->num_requested_ctl_io = 100;
+ port->port_name = softc->ioctl_info.port_name;
+ port->port_online = ctl_ioctl_online;
+ port->port_offline = ctl_ioctl_offline;
+ port->onoff_arg = &softc->ioctl_info;
+ port->lun_enable = ctl_ioctl_lun_enable;
+ port->lun_disable = ctl_ioctl_lun_disable;
+ port->targ_lun_arg = &softc->ioctl_info;
+ port->fe_datamove = ctl_ioctl_datamove;
+ port->fe_done = ctl_ioctl_done;
+ port->max_targets = 15;
+ port->max_target_id = 15;
- if (ctl_frontend_register(&softc->ioctl_info.fe,
+ if (ctl_port_register(&softc->ioctl_info.port,
(softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) {
printf("ctl: ioctl front end registration failed, will "
"continue anyway\n");
@@ -1180,7 +1137,7 @@ ctl_shutdown(void)
softc = (struct ctl_softc *)control_softc;
- if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0)
+ if (ctl_port_deregister(&softc->ioctl_info.port) != 0)
printf("ctl: ioctl front end deregistration failed\n");
mtx_lock(&softc->ctl_lock);
@@ -1195,6 +1152,8 @@ ctl_shutdown(void)
mtx_unlock(&softc->ctl_lock);
+ ctl_frontend_deregister(&ioctl_frontend);
+
/*
* This will rip the rug out from under any FETDs or anyone else
* that has a pool allocated. Since we increment our module
@@ -1208,6 +1167,7 @@ ctl_shutdown(void)
#if 0
ctl_shutdown_thread(softc->work_thread);
+ mtx_destroy(&softc->queue_lock);
#endif
mtx_destroy(&softc->pool_lock);
@@ -1258,7 +1218,7 @@ int
ctl_port_enable(ctl_port_type port_type)
{
struct ctl_softc *softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
if (ctl_is_single == 0) {
union ctl_ha_msg msg_info;
@@ -1287,13 +1247,13 @@ ctl_port_enable(ctl_port_type port_type)
softc = control_softc;
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
- if (port_type & fe->port_type)
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port_type & port->port_type)
{
#if 0
- printf("port %d\n", fe->targ_port);
+ printf("port %d\n", port->targ_port);
#endif
- ctl_frontend_online(fe);
+ ctl_port_online(port);
}
}
@@ -1304,13 +1264,13 @@ int
ctl_port_disable(ctl_port_type port_type)
{
struct ctl_softc *softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
softc = control_softc;
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
- if (port_type & fe->port_type)
- ctl_frontend_offline(fe);
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if (port_type & port->port_type)
+ ctl_port_offline(port);
}
return (0);
@@ -1328,7 +1288,7 @@ ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
ctl_port_type port_type, int no_virtual)
{
struct ctl_softc *softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
int entries_dropped, entries_filled;
int retval;
int i;
@@ -1341,14 +1301,14 @@ ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
i = 0;
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
struct ctl_port_entry *entry;
- if ((fe->port_type & port_type) == 0)
+ if ((port->port_type & port_type) == 0)
continue;
if ((no_virtual != 0)
- && (fe->virtual_port != 0))
+ && (port->virtual_port != 0))
continue;
if (entries_filled >= num_entries_alloced) {
@@ -1357,13 +1317,13 @@ ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
}
entry = &entries[i];
- entry->port_type = fe->port_type;
- strlcpy(entry->port_name, fe->port_name,
+ entry->port_type = port->port_type;
+ strlcpy(entry->port_name, port->port_name,
sizeof(entry->port_name));
- entry->physical_port = fe->physical_port;
- entry->virtual_port = fe->virtual_port;
- entry->wwnn = fe->wwnn;
- entry->wwpn = fe->wwpn;
+ entry->physical_port = port->physical_port;
+ entry->virtual_port = port->virtual_port;
+ entry->wwnn = port->wwnn;
+ entry->wwpn = port->wwpn;
i++;
entries_filled++;
@@ -1402,32 +1362,24 @@ ctl_ioctl_offline(void *arg)
/*
* Remove an initiator by port number and initiator ID.
- * Returns 0 for success, 1 for failure.
+ * Returns 0 for success, -1 for failure.
*/
int
-ctl_remove_initiator(int32_t targ_port, uint32_t iid)
+ctl_remove_initiator(struct ctl_port *port, int iid)
{
- struct ctl_softc *softc;
-
- softc = control_softc;
+ struct ctl_softc *softc = control_softc;
mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
- if ((targ_port < 0)
- || (targ_port > CTL_MAX_PORTS)) {
- printf("%s: invalid port number %d\n", __func__, targ_port);
- return (1);
- }
if (iid > CTL_MAX_INIT_PER_PORT) {
printf("%s: initiator ID %u > maximun %u!\n",
__func__, iid, CTL_MAX_INIT_PER_PORT);
- return (1);
+ return (-1);
}
mtx_lock(&softc->ctl_lock);
-
- softc->wwpn_iid[targ_port][iid].in_use = 0;
-
+ port->wwpn_iid[iid].in_use--;
+ port->wwpn_iid[iid].last_use = time_uptime;
mtx_unlock(&softc->ctl_lock);
return (0);
@@ -1435,41 +1387,91 @@ ctl_remove_initiator(int32_t targ_port, uint32_t iid)
/*
* Add an initiator to the initiator map.
- * Returns 0 for success, 1 for failure.
+ * Returns iid for success, < 0 for failure.
*/
int
-ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
+ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
{
- struct ctl_softc *softc;
- int retval;
-
- softc = control_softc;
+ struct ctl_softc *softc = control_softc;
+ time_t best_time;
+ int i, best;
mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
- retval = 0;
-
- if ((targ_port < 0)
- || (targ_port > CTL_MAX_PORTS)) {
- printf("%s: invalid port number %d\n", __func__, targ_port);
- return (1);
- }
- if (iid > CTL_MAX_INIT_PER_PORT) {
- printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n",
+ if (iid >= CTL_MAX_INIT_PER_PORT) {
+ printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
__func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
- return (1);
+ free(name, M_CTL);
+ return (-1);
}
mtx_lock(&softc->ctl_lock);
- if (softc->wwpn_iid[targ_port][iid].in_use != 0) {
+ if (iid < 0 && (wwpn != 0 || name != NULL)) {
+ for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+ if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
+ iid = i;
+ break;
+ }
+ if (name != NULL && port->wwpn_iid[i].name != NULL &&
+ strcmp(name, port->wwpn_iid[i].name) == 0) {
+ iid = i;
+ break;
+ }
+ }
+ }
+
+ if (iid < 0) {
+ for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+ if (port->wwpn_iid[i].in_use == 0 &&
+ port->wwpn_iid[i].wwpn == 0 &&
+ port->wwpn_iid[i].name == NULL) {
+ iid = i;
+ break;
+ }
+ }
+ }
+
+ if (iid < 0) {
+ best = -1;
+ best_time = INT32_MAX;
+ for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
+ if (port->wwpn_iid[i].in_use == 0) {
+ if (port->wwpn_iid[i].last_use < best_time) {
+ best = i;
+ best_time = port->wwpn_iid[i].last_use;
+ }
+ }
+ }
+ iid = best;
+ }
+
+ if (iid < 0) {
+ mtx_unlock(&softc->ctl_lock);
+ free(name, M_CTL);
+ return (-2);
+ }
+
+ if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
/*
- * We don't treat this as an error.
+ * This is not an error yet.
*/
- if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) {
- printf("%s: port %d iid %u WWPN %#jx arrived again?\n",
- __func__, targ_port, iid, (uintmax_t)wwpn);
- goto bailout;
+ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
+#if 0
+ printf("%s: port %d iid %u WWPN %#jx arrived"
+ " again\n", __func__, port->targ_port,
+ iid, (uintmax_t)wwpn);
+#endif
+ goto take;
+ }
+ if (name != NULL && port->wwpn_iid[iid].name != NULL &&
+ strcmp(name, port->wwpn_iid[iid].name) == 0) {
+#if 0
+ printf("%s: port %d iid %u name '%s' arrived"
+ " again\n", __func__, port->targ_port,
+ iid, name);
+#endif
+ goto take;
}
/*
@@ -1477,42 +1479,80 @@ ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
* driver is telling us we have a new WWPN for this
* initiator ID, so we pretty much need to use it.
*/
- printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is "
- "still at that address\n", __func__, targ_port, iid,
- (uintmax_t)wwpn,
- (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn);
+ printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
+ " but WWPN %#jx '%s' is still at that address\n",
+ __func__, port->targ_port, iid, wwpn, name,
+ (uintmax_t)port->wwpn_iid[iid].wwpn,
+ port->wwpn_iid[iid].name);
/*
* XXX KDM clear have_ca and ua_pending on each LUN for
* this initiator.
*/
}
- softc->wwpn_iid[targ_port][iid].in_use = 1;
- softc->wwpn_iid[targ_port][iid].iid = iid;
- softc->wwpn_iid[targ_port][iid].wwpn = wwpn;
- softc->wwpn_iid[targ_port][iid].port = targ_port;
-
-bailout:
-
+take:
+ free(port->wwpn_iid[iid].name, M_CTL);
+ port->wwpn_iid[iid].name = name;
+ port->wwpn_iid[iid].wwpn = wwpn;
+ port->wwpn_iid[iid].in_use++;
mtx_unlock(&softc->ctl_lock);
- return (retval);
+ return (iid);
}
-/*
- * XXX KDM should we pretend to do something in the target/lun
- * enable/disable functions?
- */
static int
-ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id)
+ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
{
- return (0);
-}
+ int len;
-static int
-ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id)
-{
- return (0);
+ switch (port->port_type) {
+ case CTL_PORT_FC:
+ {
+ struct scsi_transportid_fcp *id =
+ (struct scsi_transportid_fcp *)buf;
+ if (port->wwpn_iid[iid].wwpn == 0)
+ return (0);
+ memset(id, 0, sizeof(*id));
+ id->format_protocol = SCSI_PROTO_FC;
+ scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
+ return (sizeof(*id));
+ }
+ case CTL_PORT_ISCSI:
+ {
+ struct scsi_transportid_iscsi_port *id =
+ (struct scsi_transportid_iscsi_port *)buf;
+ if (port->wwpn_iid[iid].name == NULL)
+ return (0);
+ memset(id, 0, 256);
+ id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
+ SCSI_PROTO_ISCSI;
+ len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
+ len = roundup2(min(len, 252), 4);
+ scsi_ulto2b(len, id->additional_length);
+ return (sizeof(*id) + len);
+ }
+ case CTL_PORT_SAS:
+ {
+ struct scsi_transportid_sas *id =
+ (struct scsi_transportid_sas *)buf;
+ if (port->wwpn_iid[iid].wwpn == 0)
+ return (0);
+ memset(id, 0, sizeof(*id));
+ id->format_protocol = SCSI_PROTO_SAS;
+ scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
+ return (sizeof(*id));
+ }
+ default:
+ {
+ struct scsi_transportid_spi *id =
+ (struct scsi_transportid_spi *)buf;
+ memset(id, 0, sizeof(*id));
+ id->format_protocol = SCSI_PROTO_SPI;
+ scsi_ulto2b(iid, id->scsi_addr);
+ scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
+ return (sizeof(*id));
+ }
+ }
}
static int
@@ -1699,7 +1739,7 @@ bailout:
* (SER_ONLY mode).
*/
static int
-ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
+ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
{
struct ctl_softc *ctl_softc;
union ctl_ha_msg msg_info;
@@ -1708,12 +1748,8 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
uint32_t targ_lun;
ctl_softc = control_softc;
- if (have_lock == 0)
- mtx_lock(&ctl_softc->ctl_lock);
- targ_lun = ctsio->io_hdr.nexus.targ_lun;
- if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
lun = ctl_softc->ctl_luns[targ_lun];
if (lun==NULL)
{
@@ -1742,12 +1778,11 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
}
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
return(1);
}
+ mtx_lock(&lun->lun_lock);
TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
@@ -1762,8 +1797,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
case CTL_ACTION_SKIP:
if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
- &ctsio->io_hdr, links);
+ ctl_enqueue_rtr((union ctl_io *)ctsio);
} else {
/* send msg back to other side */
@@ -1858,8 +1892,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
}
break;
}
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -2018,8 +2051,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
retval = 0;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
-
+ mtx_lock(&lun->lun_lock);
for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
(*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
ooa_links)) {
@@ -2056,6 +2088,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
}
+ mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -2080,40 +2113,40 @@ ctl_copyin_alloc(void *user_addr, int len, char *error_str,
}
static void
-ctl_free_args(int num_be_args, struct ctl_be_arg *be_args)
+ctl_free_args(int num_args, struct ctl_be_arg *args)
{
int i;
- if (be_args == NULL)
+ if (args == NULL)
return;
- for (i = 0; i < num_be_args; i++) {
- free(be_args[i].kname, M_CTL);
- free(be_args[i].kvalue, M_CTL);
+ for (i = 0; i < num_args; i++) {
+ free(args[i].kname, M_CTL);
+ free(args[i].kvalue, M_CTL);
}
- free(be_args, M_CTL);
+ free(args, M_CTL);
}
static struct ctl_be_arg *
-ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
+ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
char *error_str, size_t error_str_len)
{
struct ctl_be_arg *args;
int i;
- args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args),
+ args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
error_str, error_str_len);
if (args == NULL)
goto bailout;
- for (i = 0; i < num_be_args; i++) {
+ for (i = 0; i < num_args; i++) {
args[i].kname = NULL;
args[i].kvalue = NULL;
}
- for (i = 0; i < num_be_args; i++) {
+ for (i = 0; i < num_args; i++) {
uint8_t *tmpptr;
args[i].kname = ctl_copyin_alloc(args[i].name,
@@ -2127,31 +2160,43 @@ ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
goto bailout;
}
- args[i].kvalue = NULL;
-
- tmpptr = ctl_copyin_alloc(args[i].value,
- args[i].vallen, error_str, error_str_len);
- if (tmpptr == NULL)
- goto bailout;
-
- args[i].kvalue = tmpptr;
-
- if ((args[i].flags & CTL_BEARG_ASCII)
- && (tmpptr[args[i].vallen - 1] != '\0')) {
- snprintf(error_str, error_str_len, "Argument %d "
- "value is not NUL-terminated", i);
- goto bailout;
+ if (args[i].flags & CTL_BEARG_RD) {
+ tmpptr = ctl_copyin_alloc(args[i].value,
+ args[i].vallen, error_str, error_str_len);
+ if (tmpptr == NULL)
+ goto bailout;
+ if ((args[i].flags & CTL_BEARG_ASCII)
+ && (tmpptr[args[i].vallen - 1] != '\0')) {
+ snprintf(error_str, error_str_len, "Argument "
+ "%d value is not NUL-terminated", i);
+ goto bailout;
+ }
+ args[i].kvalue = tmpptr;
+ } else {
+ args[i].kvalue = malloc(args[i].vallen,
+ M_CTL, M_WAITOK | M_ZERO);
}
}
return (args);
bailout:
- ctl_free_args(num_be_args, args);
+ ctl_free_args(num_args, args);
return (NULL);
}
+static void
+ctl_copyout_args(int num_args, struct ctl_be_arg *args)
+{
+ int i;
+
+ for (i = 0; i < num_args; i++) {
+ if (args[i].flags & CTL_BEARG_WR)
+ copyout(args[i].kvalue, args[i].value, args[i].vallen);
+ }
+}
+
/*
* Escape characters that are illegal or not recommended in XML.
*/
@@ -2207,14 +2252,14 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* to this FETD.
*/
if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
- retval = -EPERM;
+ retval = EPERM;
break;
}
- io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref);
+ io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref);
if (io == NULL) {
printf("ctl_ioctl: can't allocate ctl_io!\n");
- retval = -ENOSPC;
+ retval = ENOSPC;
break;
}
@@ -2235,7 +2280,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
/*
* The user sets the initiator ID, target and LUN IDs.
*/
- io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port;
+ io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port;
io->io_hdr.flags |= CTL_FLAG_USER_REQ;
if ((io->io_hdr.io_type == CTL_IO_SCSI)
&& (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
@@ -2258,20 +2303,20 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
case CTL_ENABLE_PORT:
case CTL_DISABLE_PORT:
case CTL_SET_PORT_WWNS: {
- struct ctl_frontend *fe;
+ struct ctl_port *port;
struct ctl_port_entry *entry;
entry = (struct ctl_port_entry *)addr;
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
int action, done;
action = 0;
done = 0;
if ((entry->port_type == CTL_PORT_NONE)
- && (entry->targ_port == fe->targ_port)) {
+ && (entry->targ_port == port->targ_port)) {
/*
* If the user only wants to enable or
* disable or set WWNs on a specific port,
@@ -2279,7 +2324,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
*/
action = 1;
done = 1;
- } else if (entry->port_type & fe->port_type) {
+ } else if (entry->port_type & port->port_type) {
/*
* Compare the user's type mask with the
* particular frontend type to see if we
@@ -2314,21 +2359,21 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
STAILQ_FOREACH(lun, &softc->lun_list,
links) {
- fe->lun_enable(fe->targ_lun_arg,
+ port->lun_enable(port->targ_lun_arg,
lun->target,
lun->lun);
}
- ctl_frontend_online(fe);
+ ctl_port_online(port);
} else if (cmd == CTL_DISABLE_PORT) {
struct ctl_lun *lun;
- ctl_frontend_offline(fe);
+ ctl_port_offline(port);
STAILQ_FOREACH(lun, &softc->lun_list,
links) {
- fe->lun_disable(
- fe->targ_lun_arg,
+ port->lun_disable(
+ port->targ_lun_arg,
lun->target,
lun->lun);
}
@@ -2337,7 +2382,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
mtx_lock(&softc->ctl_lock);
if (cmd == CTL_SET_PORT_WWNS)
- ctl_frontend_set_wwns(fe,
+ ctl_port_set_wwns(port,
(entry->flags & CTL_PORT_WWNN_VALID) ?
1 : 0, entry->wwnn,
(entry->flags & CTL_PORT_WWPN_VALID) ?
@@ -2350,7 +2395,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_GET_PORT_LIST: {
- struct ctl_frontend *fe;
+ struct ctl_port *port;
struct ctl_port_list *list;
int i;
@@ -2370,7 +2415,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
list->dropped_num = 0;
i = 0;
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
struct ctl_port_entry entry, *list_entry;
if (list->fill_num >= list->alloc_num) {
@@ -2378,15 +2423,15 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
continue;
}
- entry.port_type = fe->port_type;
- strlcpy(entry.port_name, fe->port_name,
+ entry.port_type = port->port_type;
+ strlcpy(entry.port_name, port->port_name,
sizeof(entry.port_name));
- entry.targ_port = fe->targ_port;
- entry.physical_port = fe->physical_port;
- entry.virtual_port = fe->virtual_port;
- entry.wwnn = fe->wwnn;
- entry.wwpn = fe->wwpn;
- if (fe->status & CTL_PORT_STATUS_ONLINE)
+ entry.targ_port = port->targ_port;
+ entry.physical_port = port->physical_port;
+ entry.virtual_port = port->virtual_port;
+ entry.wwnn = port->wwnn;
+ entry.wwpn = port->wwpn;
+ if (port->status & CTL_PORT_STATUS_ONLINE)
entry.online = 1;
else
entry.online = 0;
@@ -2428,6 +2473,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
mtx_lock(&softc->ctl_lock);
printf("Dumping OOA queues:\n");
STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
for (io = (union ctl_io *)TAILQ_FIRST(
&lun->ooa_queue); io != NULL;
io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
@@ -2449,6 +2495,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
sbuf_finish(&sb);
printf("%s\n", sbuf_data(&sb));
}
+ mtx_unlock(&lun->lun_lock);
}
printf("OOA queues dump done\n");
mtx_unlock(&softc->ctl_lock);
@@ -2564,15 +2611,16 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
ooa_info->status = CTL_OOA_INVALID_LUN;
break;
}
-
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
ooa_info->num_entries = 0;
for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
io != NULL; io = (union ctl_io *)TAILQ_NEXT(
&io->io_hdr, ooa_links)) {
ooa_info->num_entries++;
}
+ mtx_unlock(&lun->lun_lock);
- mtx_unlock(&softc->ctl_lock);
ooa_info->status = CTL_OOA_SUCCESS;
break;
@@ -2690,6 +2738,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
} else {
lun = softc->ctl_luns[delay_info->lun_id];
+ mtx_lock(&lun->lun_lock);
delay_info->status = CTL_DELAY_STATUS_OK;
@@ -2722,6 +2771,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
CTL_DELAY_STATUS_INVALID_LOC;
break;
}
+ mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
@@ -2744,7 +2794,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
softc->flags |= CTL_FLAG_REAL_SYNC;
break;
default:
- retval = -EINVAL;
+ retval = EINVAL;
break;
}
mtx_unlock(&softc->ctl_lock);
@@ -2782,12 +2832,13 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* in the set case, hopefully the user won't do something
* silly.
*/
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
if (cmd == CTL_GETSYNC)
sync_info->sync_interval = lun->sync_interval;
else
lun->sync_interval = sync_info->sync_interval;
-
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
sync_info->status = CTL_GS_SYNC_OK;
@@ -2848,6 +2899,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = EINVAL;
break;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
/*
* We could do some checking here to verify the validity
@@ -2870,7 +2923,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
err_desc->serial = lun->error_serial;
lun->error_serial++;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
break;
}
case CTL_ERROR_INJECT_DELETE: {
@@ -2890,6 +2943,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = EINVAL;
break;
}
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
if (desc->serial != delete_desc->serial)
continue;
@@ -2899,7 +2954,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
free(desc, M_CTL);
delete_done = 1;
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
if (delete_done == 0) {
printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
"error serial %ju on LUN %u\n", __func__,
@@ -2910,22 +2965,11 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_DUMP_STRUCTS: {
- int i, j, k;
+ int i, j, k, idx;
+ struct ctl_port *port;
struct ctl_frontend *fe;
- printf("CTL IID to WWPN map start:\n");
- for (i = 0; i < CTL_MAX_PORTS; i++) {
- for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
- if (softc->wwpn_iid[i][j].in_use == 0)
- continue;
-
- printf("port %d iid %u WWPN %#jx\n",
- softc->wwpn_iid[i][j].port,
- softc->wwpn_iid[i][j].iid,
- (uintmax_t)softc->wwpn_iid[i][j].wwpn);
- }
- }
- printf("CTL IID to WWPN map end\n");
+ mtx_lock(&softc->ctl_lock);
printf("CTL Persistent Reservation information start:\n");
for (i = 0; i < CTL_MAX_LUNS; i++) {
struct ctl_lun *lun;
@@ -2938,36 +2982,48 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
- if (lun->per_res[j+k].registered == 0)
+ idx = j * CTL_MAX_INIT_PER_PORT + k;
+ if (lun->per_res[idx].registered == 0)
continue;
- printf("LUN %d port %d iid %d key "
+ printf(" LUN %d port %d iid %d key "
"%#jx\n", i, j, k,
(uintmax_t)scsi_8btou64(
- lun->per_res[j+k].res_key.key));
+ lun->per_res[idx].res_key.key));
}
}
}
printf("CTL Persistent Reservation information end\n");
- printf("CTL Frontends:\n");
+ printf("CTL Ports:\n");
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
+ "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
+ port->frontend->name, port->port_type,
+ port->physical_port, port->virtual_port,
+ (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
+ for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
+ if (port->wwpn_iid[j].in_use == 0 &&
+ port->wwpn_iid[j].wwpn == 0 &&
+ port->wwpn_iid[j].name == NULL)
+ continue;
+
+ printf(" iid %u use %d WWPN %#jx '%s'\n",
+ j, port->wwpn_iid[j].in_use,
+ (uintmax_t)port->wwpn_iid[j].wwpn,
+ port->wwpn_iid[j].name);
+ }
+ }
+ printf("CTL Port information end\n");
+ mtx_unlock(&softc->ctl_lock);
/*
* XXX KDM calling this without a lock. We'd likely want
* to drop the lock before calling the frontend's dump
* routine anyway.
*/
+ printf("CTL Frontends:\n");
STAILQ_FOREACH(fe, &softc->fe_list, links) {
- printf("Frontend %s Type %u pport %d vport %d WWNN "
- "%#jx WWPN %#jx\n", fe->port_name, fe->port_type,
- fe->physical_port, fe->virtual_port,
- (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn);
-
- /*
- * Frontends are not required to support the dump
- * routine.
- */
- if (fe->fe_dump == NULL)
- continue;
-
- fe->fe_dump();
+ printf(" Frontend '%s'\n", fe->name);
+ if (fe->fe_dump != NULL)
+ fe->fe_dump();
}
printf("CTL Frontend information end\n");
break;
@@ -3002,6 +3058,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = backend->ioctl(dev, cmd, addr, flag, td);
if (lun_req->num_be_args > 0) {
+ ctl_copyout_args(lun_req->num_be_args,
+ lun_req->kern_be_args);
ctl_free_args(lun_req->num_be_args,
lun_req->kern_be_args);
}
@@ -3011,7 +3069,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct sbuf *sb;
struct ctl_lun *lun;
struct ctl_lun_list *list;
- struct ctl_be_lun_option *opt;
+ struct ctl_option *opt;
list = (struct ctl_lun_list *)addr;
@@ -3052,8 +3110,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
sbuf_printf(sb, "<ctllunlist>\n");
mtx_lock(&softc->ctl_lock);
-
STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
(uintmax_t)lun->lun);
@@ -3064,7 +3122,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<backend_type>%s"
+ retval = sbuf_printf(sb, "\t<backend_type>%s"
"</backend_type>\n",
(lun->backend == NULL) ? "none" :
lun->backend->name);
@@ -3072,7 +3130,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n",
+ retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
lun->be_lun->lun_type);
if (retval != 0)
@@ -3085,20 +3143,20 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
continue;
}
- retval = sbuf_printf(sb, "<size>%ju</size>\n",
+ retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
(lun->be_lun->maxlba > 0) ?
lun->be_lun->maxlba + 1 : 0);
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n",
+ retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
lun->be_lun->blocksize);
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<serial_number>");
+ retval = sbuf_printf(sb, "\t<serial_number>");
if (retval != 0)
break;
@@ -3114,7 +3172,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
- retval = sbuf_printf(sb, "<device_id>");
+ retval = sbuf_printf(sb, "\t<device_id>");
if (retval != 0)
break;
@@ -3135,7 +3193,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
- retval = sbuf_printf(sb, "<%s>%s</%s>", opt->name, opt->value, opt->name);
+ retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
+ opt->name, opt->value, opt->name);
if (retval != 0)
break;
}
@@ -3144,7 +3203,10 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (retval != 0)
break;
+ mtx_unlock(&lun->lun_lock);
}
+ if (lun != NULL)
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
if ((retval != 0)
@@ -3174,20 +3236,155 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
ci = (struct ctl_iscsi *)addr;
+ fe = ctl_frontend_find("iscsi");
+ if (fe == NULL) {
+ ci->status = CTL_ISCSI_ERROR;
+ snprintf(ci->error_str, sizeof(ci->error_str),
+ "Frontend \"iscsi\" not found.");
+ break;
+ }
+
+ retval = fe->ioctl(dev, cmd, addr, flag, td);
+ break;
+ }
+ case CTL_PORT_REQ: {
+ struct ctl_req *req;
+ struct ctl_frontend *fe;
+
+ req = (struct ctl_req *)addr;
+
+ fe = ctl_frontend_find(req->driver);
+ if (fe == NULL) {
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "Frontend \"%s\" not found.", req->driver);
+ break;
+ }
+ if (req->num_args > 0) {
+ req->kern_args = ctl_copyin_args(req->num_args,
+ req->args, req->error_str, sizeof(req->error_str));
+ if (req->kern_args == NULL) {
+ req->status = CTL_LUN_ERROR;
+ break;
+ }
+ }
+
+ retval = fe->ioctl(dev, cmd, addr, flag, td);
+
+ if (req->num_args > 0) {
+ ctl_copyout_args(req->num_args, req->kern_args);
+ ctl_free_args(req->num_args, req->kern_args);
+ }
+ break;
+ }
+ case CTL_PORT_LIST: {
+ struct sbuf *sb;
+ struct ctl_port *port;
+ struct ctl_lun_list *list;
+ struct ctl_option *opt;
+
+ list = (struct ctl_lun_list *)addr;
+
+ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
+ if (sb == NULL) {
+ list->status = CTL_LUN_LIST_ERROR;
+ snprintf(list->error_str, sizeof(list->error_str),
+ "Unable to allocate %d bytes for LUN list",
+ list->alloc_len);
+ break;
+ }
+
+ sbuf_printf(sb, "<ctlportlist>\n");
+
mtx_lock(&softc->ctl_lock);
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
- if (strcmp(fe->port_name, "iscsi") == 0)
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
+ (uintmax_t)port->targ_port);
+
+ /*
+ * Bail out as soon as we see that we've overfilled
+ * the buffer.
+ */
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<frontend_type>%s"
+ "</frontend_type>\n", port->frontend->name);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
+ port->port_type);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<online>%s</online>\n",
+ (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
+ port->port_name);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
+ port->physical_port);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
+ port->virtual_port);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<wwnn>%#jx</wwnn>\n",
+ (uintmax_t)port->wwnn);
+ if (retval != 0)
+ break;
+
+ retval = sbuf_printf(sb, "\t<wwpn>%#jx</wwpn>\n",
+ (uintmax_t)port->wwpn);
+ if (retval != 0)
+ break;
+
+ if (port->port_info != NULL) {
+ retval = port->port_info(port->onoff_arg, sb);
+ if (retval != 0)
+ break;
+ }
+ STAILQ_FOREACH(opt, &port->options, links) {
+ retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
+ opt->name, opt->value, opt->name);
+ if (retval != 0)
+ break;
+ }
+
+ retval = sbuf_printf(sb, "</targ_port>\n");
+ if (retval != 0)
break;
}
mtx_unlock(&softc->ctl_lock);
- if (fe == NULL) {
- ci->status = CTL_ISCSI_ERROR;
- snprintf(ci->error_str, sizeof(ci->error_str), "Backend \"iscsi\" not found.");
+ if ((retval != 0)
+ || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
+ retval = 0;
+ sbuf_delete(sb);
+ list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
+ snprintf(list->error_str, sizeof(list->error_str),
+ "Out of space, %d bytes is too small",
+ list->alloc_len);
break;
}
- retval = fe->ioctl(dev, cmd, addr, flag, td);
+ sbuf_finish(sb);
+
+ retval = copyout(sbuf_data(sb), list->lun_xml,
+ sbuf_len(sb) + 1);
+
+ list->fill_len = sbuf_len(sb) + 1;
+ list->status = CTL_LUN_LIST_OK;
+ sbuf_delete(sb);
break;
}
default: {
@@ -3215,7 +3412,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (found == 0) {
printf("ctl: unknown ioctl command %#lx or backend "
"%d\n", cmd, type);
- retval = -EINVAL;
+ retval = EINVAL;
break;
}
retval = backend->ioctl(dev, cmd, addr, flag, td);
@@ -3254,6 +3451,35 @@ ctl_port_idx(int port_num)
return(port_num - CTL_MAX_PORTS);
}
+static uint32_t
+ctl_map_lun(int port_num, uint32_t lun_id)
+{
+ struct ctl_port *port;
+
+ port = control_softc->ctl_ports[ctl_port_idx(port_num)];
+ if (port == NULL)
+ return (UINT32_MAX);
+ if (port->lun_map == NULL)
+ return (lun_id);
+ return (port->lun_map(port->targ_lun_arg, lun_id));
+}
+
+static uint32_t
+ctl_map_lun_back(int port_num, uint32_t lun_id)
+{
+ struct ctl_port *port;
+ uint32_t i;
+
+ port = control_softc->ctl_ports[ctl_port_idx(port_num)];
+ if (port->lun_map == NULL)
+ return (lun_id);
+ for (i = 0; i < CTL_MAX_LUNS; i++) {
+ if (port->lun_map(port->targ_lun_arg, i) == lun_id)
+ return (i);
+ }
+ return (UINT32_MAX);
+}
+
/*
* Note: This only works for bitmask sizes that are at least 32 bits, and
* that are a power of 2.
@@ -3380,7 +3606,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
M_NOWAIT | M_ZERO);
if (pool == NULL) {
- retval = -ENOMEM;
+ retval = ENOMEM;
goto bailout;
}
@@ -3403,7 +3629,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
* tracking.
*/
for (i = 0; i < total_ctl_io; i++) {
- cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL,
+ cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO,
M_NOWAIT);
if (cur_io == NULL) {
retval = ENOMEM;
@@ -3422,7 +3648,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
links);
STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
ctl_io_hdr, links);
- free(cur_io, M_CTL);
+ free(cur_io, M_CTLIO);
}
free(pool, M_CTL);
@@ -3442,7 +3668,6 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
#if 0
if ((pool_type != CTL_POOL_EMERGENCY)
&& (pool_type != CTL_POOL_INTERNAL)
- && (pool_type != CTL_POOL_IOCTL)
&& (pool_type != CTL_POOL_4OTHERSC))
MOD_INC_USE_COUNT;
#endif
@@ -3463,7 +3688,7 @@ ctl_pool_acquire(struct ctl_io_pool *pool)
mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
if (pool->flags & CTL_POOL_FLAG_INVALID)
- return (-EINVAL);
+ return (EINVAL);
pool->refcount++;
@@ -3484,7 +3709,7 @@ ctl_pool_release(struct ctl_io_pool *pool)
while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
links);
- free(io, M_CTL);
+ free(io, M_CTLIO);
}
STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
@@ -3496,7 +3721,7 @@ ctl_pool_release(struct ctl_io_pool *pool)
#if 0
if ((pool->type != CTL_POOL_EMERGENCY)
&& (pool->type != CTL_POOL_INTERNAL)
- && (pool->type != CTL_POOL_IOCTL))
+ && (pool->type != CTL_POOL_4OTHERSC))
MOD_DEC_USE_COUNT;
#endif
@@ -3588,7 +3813,7 @@ ctl_alloc_io(void *pool_ref)
* The emergency pool (if it exists) didn't have one, so try an
* atomic (i.e. nonblocking) malloc and see if we get lucky.
*/
- io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
+ io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT);
if (io != NULL) {
/*
* If the emergency pool exists but is empty, add this
@@ -3626,49 +3851,9 @@ ctl_free_io(union ctl_io *io)
*/
if (io->io_hdr.pool != NULL) {
struct ctl_io_pool *pool;
-#if 0
- struct ctl_softc *ctl_softc;
- union ctl_io *tmp_io;
- unsigned long xflags;
- int i;
-
- ctl_softc = control_softc;
-#endif
pool = (struct ctl_io_pool *)io->io_hdr.pool;
-
mtx_lock(&pool->ctl_softc->pool_lock);
-#if 0
- save_flags(xflags);
-
- for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST(
- &ctl_softc->task_queue); tmp_io != NULL; i++,
- tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr,
- links)) {
- if (tmp_io == io) {
- printf("%s: %p is still on the task queue!\n",
- __func__, tmp_io);
- printf("%s: (%d): type %d "
- "msg %d cdb %x iptl: "
- "%d:%d:%d:%d tag 0x%04x "
- "flg %#lx\n",
- __func__, i,
- tmp_io->io_hdr.io_type,
- tmp_io->io_hdr.msg_type,
- tmp_io->scsiio.cdb[0],
- tmp_io->io_hdr.nexus.initid.id,
- tmp_io->io_hdr.nexus.targ_port,
- tmp_io->io_hdr.nexus.targ_target.id,
- tmp_io->io_hdr.nexus.targ_lun,
- (tmp_io->io_hdr.io_type ==
- CTL_IO_TASK) ?
- tmp_io->taskio.tag_num :
- tmp_io->scsiio.tag_num,
- xflags);
- panic("I/O still on the task queue!");
- }
- }
-#endif
io->io_hdr.io_type = 0xff;
STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
pool->total_freed++;
@@ -3680,7 +3865,7 @@ ctl_free_io(union ctl_io *io)
* Otherwise, just free it. We probably malloced it and
* the emergency pool wasn't available.
*/
- free(io, M_CTL);
+ free(io, M_CTLIO);
}
}
@@ -4242,8 +4427,12 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
struct ctl_be_lun *const be_lun, struct ctl_id target_id)
{
struct ctl_lun *nlun, *lun;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
+ struct scsi_vpd_id_descriptor *desc;
+ struct scsi_vpd_id_t10 *t10id;
+ const char *eui, *naa, *scsiname, *vendor;
int lun_number, i, lun_malloced;
+ int devidlen, idlen1, idlen2 = 0, len;
if (be_lun == NULL)
return (EINVAL);
@@ -4275,6 +4464,69 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
if (lun_malloced)
lun->flags = CTL_LUN_MALLOCED;
+ /* Generate LUN ID. */
+ devidlen = max(CTL_DEVID_MIN_LEN,
+ strnlen(be_lun->device_id, CTL_DEVID_LEN));
+ idlen1 = sizeof(*t10id) + devidlen;
+ len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
+ scsiname = ctl_get_opt(&be_lun->options, "scsiname");
+ if (scsiname != NULL) {
+ idlen2 = roundup2(strlen(scsiname) + 1, 4);
+ len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
+ }
+ eui = ctl_get_opt(&be_lun->options, "eui");
+ if (eui != NULL) {
+ len += sizeof(struct scsi_vpd_id_descriptor) + 8;
+ }
+ naa = ctl_get_opt(&be_lun->options, "naa");
+ if (naa != NULL) {
+ len += sizeof(struct scsi_vpd_id_descriptor) + 8;
+ }
+ lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
+ M_CTL, M_WAITOK | M_ZERO);
+ lun->lun_devid->len = len;
+ desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
+ desc->proto_codeset = SVPD_ID_CODESET_ASCII;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
+ desc->length = idlen1;
+ t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
+ memset(t10id->vendor, ' ', sizeof(t10id->vendor));
+ if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
+ strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
+ } else {
+ strncpy(t10id->vendor, vendor,
+ min(sizeof(t10id->vendor), strlen(vendor)));
+ }
+ strncpy((char *)t10id->vendor_spec_id,
+ (char *)be_lun->device_id, devidlen);
+ if (scsiname != NULL) {
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ desc->length);
+ desc->proto_codeset = SVPD_ID_CODESET_UTF8;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+ SVPD_ID_TYPE_SCSI_NAME;
+ desc->length = idlen2;
+ strlcpy(desc->identifier, scsiname, idlen2);
+ }
+ if (eui != NULL) {
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ desc->length);
+ desc->proto_codeset = SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+ SVPD_ID_TYPE_EUI64;
+ desc->length = 8;
+ scsi_u64to8b(strtouq(eui, NULL, 0), desc->identifier);
+ }
+ if (naa != NULL) {
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ desc->length);
+ desc->proto_codeset = SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
+ SVPD_ID_TYPE_NAA;
+ desc->length = 8;
+ scsi_u64to8b(strtouq(naa, NULL, 0), desc->identifier);
+ }
+
mtx_lock(&ctl_softc->ctl_lock);
/*
* See if the caller requested a particular LUN number. If so, see
@@ -4318,6 +4570,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
}
ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
+ mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
lun->target = target_id;
lun->lun = lun_number;
lun->be_lun = be_lun;
@@ -4343,6 +4596,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
TAILQ_INIT(&lun->ooa_queue);
TAILQ_INIT(&lun->blocked_queue);
STAILQ_INIT(&lun->error_list);
+ ctl_tpc_init(lun);
/*
* Initialize the mode page index.
@@ -4353,7 +4607,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
* Set the poweron UA for all initiators on this LUN only.
*/
for (i = 0; i < CTL_MAX_INITIATORS; i++)
- lun->pending_sense[i].ua_pending = CTL_UA_POWERON;
+ lun->pending_ua[i] = CTL_UA_POWERON;
/*
* Now, before we insert this lun on the lun list, set the lun
@@ -4361,7 +4615,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
*/
STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
+ nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE;
}
}
@@ -4390,35 +4644,17 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
* already. Enable the target ID if it hasn't been enabled, and
* enable this particular LUN.
*/
- STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
+ STAILQ_FOREACH(port, &ctl_softc->port_list, links) {
int retval;
- /*
- * XXX KDM this only works for ONE TARGET ID. We'll need
- * to do things differently if we go to a multiple target
- * ID scheme.
- */
- if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) {
-
- retval = fe->targ_enable(fe->targ_lun_arg, target_id);
- if (retval != 0) {
- printf("ctl_alloc_lun: FETD %s port %d "
- "returned error %d for targ_enable on "
- "target %ju\n", fe->port_name,
- fe->targ_port, retval,
- (uintmax_t)target_id.id);
- } else
- fe->status |= CTL_PORT_STATUS_TARG_ONLINE;
- }
-
- retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number);
+ retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number);
if (retval != 0) {
printf("ctl_alloc_lun: FETD %s port %d returned error "
"%d for lun_enable on target %ju lun %d\n",
- fe->port_name, fe->targ_port, retval,
+ port->port_name, port->targ_port, retval,
(uintmax_t)target_id.id, lun_number);
} else
- fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
+ port->status |= CTL_PORT_STATUS_LUN_ONLINE;
}
return (0);
}
@@ -4434,10 +4670,9 @@ ctl_free_lun(struct ctl_lun *lun)
{
struct ctl_softc *softc;
#if 0
- struct ctl_frontend *fe;
+ struct ctl_port *port;
#endif
struct ctl_lun *nlun;
- union ctl_io *io, *next_io;
int i;
softc = lun->ctl_softc;
@@ -4450,49 +4685,8 @@ ctl_free_lun(struct ctl_lun *lun)
softc->ctl_luns[lun->lun] = NULL;
- if (TAILQ_FIRST(&lun->ooa_queue) != NULL) {
- printf("ctl_free_lun: aieee!! freeing a LUN with "
- "outstanding I/O!!\n");
- }
-
- /*
- * If we have anything pending on the RtR queue, remove it.
- */
- for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
- io = next_io) {
- uint32_t targ_lun;
-
- next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
- if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
- && (targ_lun == lun->lun))
- STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
- ctl_io_hdr, links);
- }
-
- /*
- * Then remove everything from the blocked queue.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL;
- io = next_io) {
- next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links);
- TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links);
- io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
- }
-
- /*
- * Now clear out the OOA queue, and free all the I/O.
- * XXX KDM should we notify the FETD here? We probably need to
- * quiesce the LUN before deleting it.
- */
- for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL;
- io = next_io) {
- next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
- TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
- ctl_free_io(io);
- }
+ if (!TAILQ_EMPTY(&lun->ooa_queue))
+ panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
softc->num_luns--;
@@ -4500,7 +4694,7 @@ ctl_free_lun(struct ctl_lun *lun)
* XXX KDM this scheme only works for a single target/multiple LUN
* setup. It needs to be revamped for a multiple target scheme.
*
- * XXX KDM this results in fe->lun_disable() getting called twice,
+ * XXX KDM this results in port->lun_disable() getting called twice,
* once when ctl_disable_lun() is called, and a second time here.
* We really need to re-think the LUN disable semantics. There
* should probably be several steps/levels to LUN removal:
@@ -4512,37 +4706,37 @@ ctl_free_lun(struct ctl_lun *lun)
* the front end ports, at least for individual LUNs.
*/
#if 0
- STAILQ_FOREACH(fe, &softc->fe_list, links) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
int retval;
- retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
+ retval = port->lun_disable(port->targ_lun_arg, lun->target,
lun->lun);
if (retval != 0) {
printf("ctl_free_lun: FETD %s port %d returned error "
"%d for lun_disable on target %ju lun %jd\n",
- fe->port_name, fe->targ_port, retval,
+ port->port_name, port->targ_port, retval,
(uintmax_t)lun->target.id, (intmax_t)lun->lun);
}
if (STAILQ_FIRST(&softc->lun_list) == NULL) {
- fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
+ port->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
- retval = fe->targ_disable(fe->targ_lun_arg,lun->target);
+ retval = port->targ_disable(port->targ_lun_arg,lun->target);
if (retval != 0) {
printf("ctl_free_lun: FETD %s port %d "
"returned error %d for targ_disable on "
- "target %ju\n", fe->port_name,
- fe->targ_port, retval,
+ "target %ju\n", port->port_name,
+ port->targ_port, retval,
(uintmax_t)lun->target.id);
} else
- fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
+ port->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
- if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
+ if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
continue;
#if 0
- fe->port_offline(fe->onoff_arg);
- fe->status &= ~CTL_PORT_STATUS_ONLINE;
+ port->port_offline(port->onoff_arg);
+ port->status &= ~CTL_PORT_STATUS_ONLINE;
#endif
}
}
@@ -4554,12 +4748,15 @@ ctl_free_lun(struct ctl_lun *lun)
atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
+ ctl_tpc_shutdown(lun);
+ mtx_destroy(&lun->lun_lock);
+ free(lun->lun_devid, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
free(lun, M_CTL);
STAILQ_FOREACH(nlun, &softc->lun_list, links) {
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
+ nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE;
}
}
@@ -4582,15 +4779,12 @@ ctl_create_lun(struct ctl_be_lun *be_lun)
int
ctl_add_lun(struct ctl_be_lun *be_lun)
{
- struct ctl_softc *ctl_softc;
-
- ctl_softc = control_softc;
+ struct ctl_softc *ctl_softc = control_softc;
mtx_lock(&ctl_softc->ctl_lock);
STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
mtx_unlock(&ctl_softc->ctl_lock);
-
- ctl_wakeup_thread();
+ wakeup(&ctl_softc->pending_lun_queue);
return (0);
}
@@ -4599,7 +4793,7 @@ int
ctl_enable_lun(struct ctl_be_lun *be_lun)
{
struct ctl_softc *ctl_softc;
- struct ctl_frontend *fe, *nfe;
+ struct ctl_port *port, *nport;
struct ctl_lun *lun;
int retval;
@@ -4608,18 +4802,21 @@ ctl_enable_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if ((lun->flags & CTL_LUN_DISABLED) == 0) {
/*
* eh? Why did we get called if the LUN is already
* enabled?
*/
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&ctl_softc->ctl_lock);
return (0);
}
lun->flags &= ~CTL_LUN_DISABLED;
+ mtx_unlock(&lun->lun_lock);
- for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) {
- nfe = STAILQ_NEXT(fe, links);
+ for (port = STAILQ_FIRST(&ctl_softc->port_list); port != NULL; port = nport) {
+ nport = STAILQ_NEXT(port, links);
/*
* Drop the lock while we call the FETD's enable routine.
@@ -4627,18 +4824,18 @@ ctl_enable_lun(struct ctl_be_lun *be_lun)
* case of the internal initiator frontend.
*/
mtx_unlock(&ctl_softc->ctl_lock);
- retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun);
+ retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun);
mtx_lock(&ctl_softc->ctl_lock);
if (retval != 0) {
printf("%s: FETD %s port %d returned error "
"%d for lun_enable on target %ju lun %jd\n",
- __func__, fe->port_name, fe->targ_port, retval,
+ __func__, port->port_name, port->targ_port, retval,
(uintmax_t)lun->target.id, (intmax_t)lun->lun);
}
#if 0
else {
/* NOTE: TODO: why does lun enable affect port status? */
- fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
+ port->status |= CTL_PORT_STATUS_LUN_ONLINE;
}
#endif
}
@@ -4652,7 +4849,7 @@ int
ctl_disable_lun(struct ctl_be_lun *be_lun)
{
struct ctl_softc *ctl_softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
struct ctl_lun *lun;
int retval;
@@ -4661,14 +4858,16 @@ ctl_disable_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
mtx_lock(&ctl_softc->ctl_lock);
-
+ mtx_lock(&lun->lun_lock);
if (lun->flags & CTL_LUN_DISABLED) {
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&ctl_softc->ctl_lock);
return (0);
}
lun->flags |= CTL_LUN_DISABLED;
+ mtx_unlock(&lun->lun_lock);
- STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
+ STAILQ_FOREACH(port, &ctl_softc->port_list, links) {
mtx_unlock(&ctl_softc->ctl_lock);
/*
* Drop the lock before we call the frontend's disable
@@ -4677,13 +4876,13 @@ ctl_disable_lun(struct ctl_be_lun *be_lun)
* XXX KDM what happens if the frontend list changes while
* we're traversing it? It's unlikely, but should be handled.
*/
- retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
+ retval = port->lun_disable(port->targ_lun_arg, lun->target,
lun->lun);
mtx_lock(&ctl_softc->ctl_lock);
if (retval != 0) {
printf("ctl_alloc_lun: FETD %s port %d returned error "
"%d for lun_disable on target %ju lun %jd\n",
- fe->port_name, fe->targ_port, retval,
+ port->port_name, port->targ_port, retval,
(uintmax_t)lun->target.id, (intmax_t)lun->lun);
}
}
@@ -4703,9 +4902,9 @@ ctl_start_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_STOPPED;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4720,9 +4919,9 @@ ctl_stop_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags |= CTL_LUN_STOPPED;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4737,9 +4936,9 @@ ctl_lun_offline(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags |= CTL_LUN_OFFLINE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4754,9 +4953,9 @@ ctl_lun_online(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_OFFLINE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4771,13 +4970,13 @@ ctl_invalidate_lun(struct ctl_be_lun *be_lun)
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* The LUN needs to be disabled before it can be marked invalid.
*/
if ((lun->flags & CTL_LUN_DISABLED) == 0) {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (-1);
}
/*
@@ -4790,9 +4989,13 @@ ctl_invalidate_lun(struct ctl_be_lun *be_lun)
* If we have something in the OOA queue, we'll free it when the
* last I/O completes.
*/
- if (TAILQ_FIRST(&lun->ooa_queue) == NULL)
+ if (TAILQ_EMPTY(&lun->ooa_queue)) {
+ mtx_unlock(&lun->lun_lock);
+ mtx_lock(&ctl_softc->ctl_lock);
ctl_free_lun(lun);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ } else
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4806,9 +5009,9 @@ ctl_lun_inoperable(struct ctl_be_lun *be_lun)
ctl_softc = control_softc;
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags |= CTL_LUN_INOPERABLE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4822,9 +5025,9 @@ ctl_lun_operable(struct ctl_be_lun *be_lun)
ctl_softc = control_softc;
lun = (struct ctl_lun *)be_lun->ctl_lun;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_INOPERABLE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -4844,6 +5047,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
mtx_lock(&softc->ctl_lock);
lun = (struct ctl_lun *)be_lun->ctl_lun;
+ mtx_lock(&lun->lun_lock);
page_index = NULL;
for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
@@ -4857,6 +5061,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
}
if (page_index == NULL) {
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
printf("%s: APS subpage not found for lun %ju!\n", __func__,
(uintmax_t)lun->lun);
@@ -4867,6 +5072,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
&& (softc->aps_locked_lun != lun->lun)) {
printf("%s: attempt to lock LUN %llu when %llu is already "
"locked\n");
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (1);
}
@@ -4903,11 +5109,13 @@ ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
if (isc_retval > CTL_HA_STATUS_SUCCESS) {
printf("%s: APS (lock=%d) error returned from "
"ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (1);
}
}
+ mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (0);
@@ -4922,14 +5130,14 @@ ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
softc = control_softc;
- mtx_lock(&softc->ctl_lock);
-
lun = (struct ctl_lun *)be_lun->ctl_lun;
+ mtx_lock(&lun->lun_lock);
+
for (i = 0; i < CTL_MAX_INITIATORS; i++)
- lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED;
+ lun->pending_ua[i] |= CTL_UA_CAPACITY_CHANGED;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
/*
@@ -4970,7 +5178,8 @@ ctl_config_move_done(union ctl_io *io)
/*sks_valid*/ 1,
/*retry_count*/
io->io_hdr.port_status);
- free(io->scsiio.kern_data_ptr, M_CTL);
+ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
+ free(io->scsiio.kern_data_ptr, M_CTL);
ctl_done(io);
goto bailout;
}
@@ -4983,7 +5192,8 @@ ctl_config_move_done(union ctl_io *io)
* S/G list. If we start using S/G lists for config data,
* we'll need to know how to clean them up here as well.
*/
- free(io->scsiio.kern_data_ptr, M_CTL);
+ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
+ free(io->scsiio.kern_data_ptr, M_CTL);
/* Hopefully the user has already set the status... */
ctl_done(io);
} else {
@@ -5012,6 +5222,31 @@ bailout:
/*
* This gets called by a backend driver when it is done with a
+ * data_submit method.
+ */
+void
+ctl_data_submit_done(union ctl_io *io)
+{
+ /*
+ * If the IO_CONT flag is set, we need to call the supplied
+ * function to continue processing the I/O, instead of completing
+ * the I/O just yet.
+ *
+ * If there is an error, though, we don't want to keep processing.
+ * Instead, just send status back to the initiator.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
+ (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
+ (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
+ io->scsiio.io_cont(io);
+ return;
+ }
+ ctl_done(io);
+}
+
+/*
+ * This gets called by a backend driver when it is done with a
* configuration write.
*/
void
@@ -5060,50 +5295,11 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
ctl_softc = control_softc;
switch (ctsio->cdb[0]) {
- case RELEASE: {
- struct scsi_release *cdb;
-
- cdb = (struct scsi_release *)ctsio->cdb;
- if ((cdb->byte2 & 0x1f) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 0,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
- break;
- }
case RELEASE_10: {
struct scsi_release_10 *cdb;
cdb = (struct scsi_release_10 *)ctsio->cdb;
- if ((cdb->byte2 & SR10_EXTENT) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
-
- }
-
- if ((cdb->byte2 & SR10_3RDPTY) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
-
if (cdb->byte2 & SR10_LONGID)
longid = 1;
else
@@ -5143,7 +5339,7 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
if (length > 0)
thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* According to SPC, it is not an error for an intiator to attempt
@@ -5161,6 +5357,8 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
}
}
+ mtx_unlock(&lun->lun_lock);
+
ctsio->scsi_status = SCSI_STATUS_OK;
ctsio->io_hdr.status = CTL_SUCCESS;
@@ -5169,8 +5367,6 @@ ctl_scsi_release(struct ctl_scsiio *ctsio)
ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
}
- mtx_unlock(&ctl_softc->ctl_lock);
-
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
@@ -5197,49 +5393,11 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsio)
ctl_softc = control_softc;
switch (ctsio->cdb[0]) {
- case RESERVE: {
- struct scsi_reserve *cdb;
-
- cdb = (struct scsi_reserve *)ctsio->cdb;
- if ((cdb->byte2 & 0x1f) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 0,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
- resv_id = cdb->resv_id;
- length = scsi_2btoul(cdb->length);
- break;
- }
case RESERVE_10: {
struct scsi_reserve_10 *cdb;
cdb = (struct scsi_reserve_10 *)ctsio->cdb;
- if ((cdb->byte2 & SR10_EXTENT) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
- if ((cdb->byte2 & SR10_3RDPTY) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
if (cdb->byte2 & SR10_LONGID)
longid = 1;
else
@@ -5278,7 +5436,7 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsio)
if (length > 0)
thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (lun->flags & CTL_LUN_RESERVED) {
if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
|| (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
@@ -5297,13 +5455,13 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsio)
ctsio->io_hdr.status = CTL_SUCCESS;
bailout:
+ mtx_unlock(&lun->lun_lock);
+
if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
free(ctsio->kern_data_ptr, M_CTL);
ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
}
- mtx_unlock(&ctl_softc->ctl_lock);
-
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
@@ -5352,35 +5510,6 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- /*
- * We don't support the power conditions field. We need to check
- * this prior to checking the load/eject and start/stop bits.
- */
- if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 4,
- /*bit_valid*/ 1,
- /*bit*/ 4);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
-
- /*
- * Media isn't removable, so we can't load or eject it.
- */
- if ((cdb->how & SSS_LOEJ) != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 4,
- /*bit_valid*/ 1,
- /*bit*/ 1);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- }
-
if ((lun->flags & CTL_LUN_PR_RESERVED)
&& ((cdb->how & SSS_START)==0)) {
uint32_t residx;
@@ -5412,7 +5541,7 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
* Figure out a reasonable way to port this?
*/
#ifdef NEEDTOPORT
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
&& (lun->flags & CTL_LUN_OFFLINE)) {
@@ -5420,11 +5549,11 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
* If the LUN is offline, and the on/offline bit isn't set,
* reject the start or stop. Otherwise, let it through.
*/
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_lun_not_ready(ctsio);
ctl_done((union ctl_io *)ctsio);
} else {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
#endif /* NEEDTOPORT */
/*
* This could be a start or a stop when we're online,
@@ -5478,7 +5607,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
struct ctl_softc *ctl_softc;
uint64_t starting_lba;
uint32_t block_count;
- int reladr, immed;
int retval;
CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
@@ -5486,20 +5614,12 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
ctl_softc = control_softc;
retval = 0;
- reladr = 0;
- immed = 0;
switch (ctsio->cdb[0]) {
case SYNCHRONIZE_CACHE: {
struct scsi_sync_cache *cdb;
cdb = (struct scsi_sync_cache *)ctsio->cdb;
- if (cdb->byte2 & SSC_RELADR)
- reladr = 1;
-
- if (cdb->byte2 & SSC_IMMED)
- immed = 1;
-
starting_lba = scsi_4btoul(cdb->begin_lba);
block_count = scsi_2btoul(cdb->lb_count);
break;
@@ -5508,12 +5628,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
struct scsi_sync_cache_16 *cdb;
cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
- if (cdb->byte2 & SSC_RELADR)
- reladr = 1;
-
- if (cdb->byte2 & SSC_IMMED)
- immed = 1;
-
starting_lba = scsi_8btou64(cdb->begin_lba);
block_count = scsi_4btoul(cdb->lb_count);
break;
@@ -5525,41 +5639,6 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
break; /* NOTREACHED */
}
- if (immed) {
- /*
- * We don't support the immediate bit. Since it's in the
- * same place for the 10 and 16 byte SYNCHRONIZE CACHE
- * commands, we can just return the same error in either
- * case.
- */
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 1);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
-
- if (reladr) {
- /*
- * We don't support the reladr bit either. It can only be
- * used with linked commands, and we don't support linked
- * commands. Since the bit is in the same place for the
- * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can
- * just return the same error in either case.
- */
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
-
/*
* We check the LBA and length, but don't do anything with them.
* A SYNCHRONIZE CACHE will cause the entire cache for this lun to
@@ -5585,14 +5664,14 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
* Check to see whether we're configured to send the SYNCHRONIZE
* CACHE command directly to the back end.
*/
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
&& (++(lun->sync_count) >= lun->sync_interval)) {
lun->sync_count = 0;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
retval = lun->backend->config_write((union ctl_io *)ctsio);
} else {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_success(ctsio);
ctl_done((union ctl_io *)ctsio);
}
@@ -5685,9 +5764,9 @@ ctl_format(struct ctl_scsiio *ctsio)
* get them to issue a command that will basically make them think
* they're blowing away the media.
*/
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_INOPERABLE;
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctsio->scsi_status = SCSI_STATUS_OK;
ctsio->io_hdr.status = CTL_SUCCESS;
@@ -5703,26 +5782,40 @@ bailout:
}
int
-ctl_write_buffer(struct ctl_scsiio *ctsio)
+ctl_read_buffer(struct ctl_scsiio *ctsio)
{
- struct scsi_write_buffer *cdb;
- struct copan_page_header *header;
+ struct scsi_read_buffer *cdb;
struct ctl_lun *lun;
- struct ctl_softc *ctl_softc;
int buffer_offset, len;
- int retval;
+ static uint8_t descr[4];
+ static uint8_t echo_descr[4] = { 0 };
- header = NULL;
+ CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
- retval = CTL_RETVAL_COMPLETE;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_read_buffer *)ctsio->cdb;
- CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
+ if (lun->flags & CTL_LUN_PR_RESERVED) {
+ uint32_t residx;
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- ctl_softc = control_softc;
- cdb = (struct scsi_write_buffer *)ctsio->cdb;
+ /*
+ * XXX KDM need a lock here.
+ */
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ if ((lun->res_type == SPR_TYPE_EX_AC
+ && residx != lun->pr_res_idx)
+ || ((lun->res_type == SPR_TYPE_EX_AC_RO
+ || lun->res_type == SPR_TYPE_EX_AC_AR)
+ && !lun->per_res[residx].registered)) {
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
- if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
+ if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA &&
+ (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR &&
+ (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -5732,36 +5825,73 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- if (cdb->buffer_id != 0) {
+
+ len = scsi_3btoul(cdb->length);
+ buffer_offset = scsi_3btoul(cdb->offset);
+
+ if (buffer_offset + len > sizeof(lun->write_buffer)) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 2,
+ /*field*/ 6,
/*bit_valid*/ 0,
/*bit*/ 0);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- len = scsi_3btoul(cdb->length);
- buffer_offset = scsi_3btoul(cdb->offset);
+ if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) {
+ descr[0] = 0;
+ scsi_ulto3b(sizeof(lun->write_buffer), &descr[1]);
+ ctsio->kern_data_ptr = descr;
+ len = min(len, sizeof(descr));
+ } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
+ ctsio->kern_data_ptr = echo_descr;
+ len = min(len, sizeof(echo_descr));
+ } else
+ ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
+ ctsio->kern_data_len = len;
+ ctsio->kern_total_len = len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_write_buffer(struct ctl_scsiio *ctsio)
+{
+ struct scsi_write_buffer *cdb;
+ struct ctl_lun *lun;
+ int buffer_offset, len;
- if (len > sizeof(lun->write_buffer)) {
+ CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_write_buffer *)ctsio->cdb;
+
+ if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 6,
- /*bit_valid*/ 0,
- /*bit*/ 0);
+ /*field*/ 1,
+ /*bit_valid*/ 1,
+ /*bit*/ 4);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- if (buffer_offset != 0) {
+ len = scsi_3btoul(cdb->length);
+ buffer_offset = scsi_3btoul(cdb->offset);
+
+ if (buffer_offset + len > sizeof(lun->write_buffer)) {
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 3,
+ /*field*/ 6,
/*bit_valid*/ 0,
/*bit*/ 0);
ctl_done((union ctl_io *)ctsio);
@@ -5773,7 +5903,7 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
* malloc it and tell the caller the data buffer is here.
*/
if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
- ctsio->kern_data_ptr = lun->write_buffer;
+ ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
@@ -5795,7 +5925,7 @@ int
ctl_write_same(struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
- struct ctl_lba_len_flags lbalen;
+ struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
int len, retval;
@@ -5888,11 +6018,10 @@ ctl_write_same(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lbalen.lba = lba;
- lbalen.len = num_blocks;
- lbalen.flags = byte2;
- memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
- sizeof(lbalen));
+ lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ lbalen->flags = byte2;
retval = lun->backend->config_write((union ctl_io *)ctsio);
return (retval);
@@ -5903,7 +6032,7 @@ ctl_unmap(struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
struct scsi_unmap *cdb;
- struct ctl_ptr_len_flags ptrlen;
+ struct ctl_ptr_len_flags *ptrlen;
struct scsi_unmap_header *hdr;
struct scsi_unmap_desc *buf, *end;
uint64_t lba;
@@ -5958,11 +6087,10 @@ ctl_unmap(struct ctl_scsiio *ctsio)
buf = (struct scsi_unmap_desc *)(hdr + 1);
end = buf + len / sizeof(*buf);
- ptrlen.ptr = (void *)buf;
- ptrlen.len = len;
- ptrlen.flags = byte2;
- memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &ptrlen,
- sizeof(ptrlen));
+ ptrlen = (struct ctl_ptr_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ ptrlen->ptr = (void *)buf;
+ ptrlen->len = len;
+ ptrlen->flags = byte2;
for (; buf < end; buf++) {
lba = scsi_8btou64(buf->lba);
@@ -6017,7 +6145,7 @@ ctl_control_page_handler(struct ctl_scsiio *ctsio,
softc = control_softc;
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (((current_cp->rlec & SCP_DSENSE) == 0)
&& ((user_cp->rlec & SCP_DSENSE) != 0)) {
/*
@@ -6109,11 +6237,10 @@ ctl_control_page_handler(struct ctl_scsiio *ctsio,
if (i == initidx)
continue;
- lun->pending_sense[i].ua_pending |=
- CTL_UA_MODE_CHANGE;
+ lun->pending_ua[i] |= CTL_UA_MODE_CHANGE;
}
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
return (0);
}
@@ -6692,6 +6819,24 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
else
control_dev = 0;
+ if (lun->flags & CTL_LUN_PR_RESERVED) {
+ uint32_t residx;
+
+ /*
+ * XXX KDM need a lock here.
+ */
+ residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ if ((lun->res_type == SPR_TYPE_EX_AC
+ && residx != lun->pr_res_idx)
+ || ((lun->res_type == SPR_TYPE_EX_AC_RO
+ || lun->res_type == SPR_TYPE_EX_AC_AR)
+ && !lun->per_res[residx].registered)) {
+ ctl_set_reservation_conflict(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
+
switch (ctsio->cdb[0]) {
case MODE_SENSE_6: {
struct scsi_mode_sense_6 *cdb;
@@ -6986,6 +7131,7 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -7045,13 +7191,14 @@ ctl_read_capacity(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
-static int
+int
ctl_read_capacity_16(struct ctl_scsiio *ctsio)
{
struct scsi_read_capacity_16 *cdb;
@@ -7107,6 +7254,7 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -7114,83 +7262,330 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
}
int
-ctl_service_action_in(struct ctl_scsiio *ctsio)
+ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
{
- struct scsi_service_action_in *cdb;
+ struct scsi_maintenance_in *cdb;
int retval;
+ int alloc_len, ext, total_len = 0, g, p, pc, pg;
+ int num_target_port_groups, num_target_ports, single;
+ struct ctl_lun *lun;
+ struct ctl_softc *softc;
+ struct ctl_port *port;
+ struct scsi_target_group_data *rtg_ptr;
+ struct scsi_target_group_data_extended *rtg_ext_ptr;
+ struct scsi_target_port_group_descriptor *tpg_desc;
- CTL_DEBUG_PRINT(("ctl_service_action_in\n"));
+ CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
- cdb = (struct scsi_service_action_in *)ctsio->cdb;
+ cdb = (struct scsi_maintenance_in *)ctsio->cdb;
+ softc = control_softc;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
retval = CTL_RETVAL_COMPLETE;
- switch (cdb->service_action) {
- case SRC16_SERVICE_ACTION:
- retval = ctl_read_capacity_16(ctsio);
+ switch (cdb->byte2 & STG_PDF_MASK) {
+ case STG_PDF_LENGTH:
+ ext = 0;
+ break;
+ case STG_PDF_EXTENDED:
+ ext = 1;
break;
default:
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 1,
+ /*field*/ 2,
/*bit_valid*/ 1,
- /*bit*/ 4);
+ /*bit*/ 5);
ctl_done((union ctl_io *)ctsio);
- break;
+ return(retval);
}
- return (retval);
+ single = ctl_is_single;
+ if (single)
+ num_target_port_groups = 1;
+ else
+ num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+ num_target_ports = 0;
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (ctl_map_lun_back(port->targ_port, lun->lun) >= CTL_MAX_LUNS)
+ continue;
+ num_target_ports++;
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ if (ext)
+ total_len = sizeof(struct scsi_target_group_data_extended);
+ else
+ total_len = sizeof(struct scsi_target_group_data);
+ total_len += sizeof(struct scsi_target_port_group_descriptor) *
+ num_target_port_groups +
+ sizeof(struct scsi_target_port_descriptor) *
+ num_target_ports * num_target_port_groups;
+
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ if (ext) {
+ rtg_ext_ptr = (struct scsi_target_group_data_extended *)
+ ctsio->kern_data_ptr;
+ scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
+ rtg_ext_ptr->format_type = 0x10;
+ rtg_ext_ptr->implicit_transition_time = 0;
+ tpg_desc = &rtg_ext_ptr->groups[0];
+ } else {
+ rtg_ptr = (struct scsi_target_group_data *)
+ ctsio->kern_data_ptr;
+ scsi_ulto4b(total_len - 4, rtg_ptr->length);
+ tpg_desc = &rtg_ptr->groups[0];
+ }
+
+ pg = ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS;
+ mtx_lock(&softc->ctl_lock);
+ for (g = 0; g < num_target_port_groups; g++) {
+ if (g == pg)
+ tpg_desc->pref_state = TPG_PRIMARY |
+ TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
+ else
+ tpg_desc->pref_state =
+ TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
+ tpg_desc->support = TPG_AO_SUP;
+ if (!single)
+ tpg_desc->support |= TPG_AN_SUP;
+ scsi_ulto2b(g + 1, tpg_desc->target_port_group);
+ tpg_desc->status = TPG_IMPLICIT;
+ pc = 0;
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (ctl_map_lun_back(port->targ_port, lun->lun) >=
+ CTL_MAX_LUNS)
+ continue;
+ p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
+ scsi_ulto2b(p, tpg_desc->descriptors[pc].
+ relative_target_port_identifier);
+ pc++;
+ }
+ tpg_desc->target_port_count = pc;
+ tpg_desc = (struct scsi_target_port_group_descriptor *)
+ &tpg_desc->descriptors[pc];
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
+ ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
+ ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
+ ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
+ ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return(retval);
}
int
-ctl_maintenance_in(struct ctl_scsiio *ctsio)
+ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
{
- struct scsi_maintenance_in *cdb;
- int retval;
- int alloc_len, total_len = 0;
- int num_target_port_groups, single;
struct ctl_lun *lun;
- struct ctl_softc *softc;
- struct scsi_target_group_data *rtg_ptr;
- struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2;
- struct scsi_target_port_descriptor *tp_desc_ptr1_1, *tp_desc_ptr1_2,
- *tp_desc_ptr2_1, *tp_desc_ptr2_2;
+ struct scsi_report_supported_opcodes *cdb;
+ const struct ctl_cmd_entry *entry, *sentry;
+ struct scsi_report_supported_opcodes_all *all;
+ struct scsi_report_supported_opcodes_descr *descr;
+ struct scsi_report_supported_opcodes_one *one;
+ int retval;
+ int alloc_len, total_len;
+ int opcode, service_action, i, j, num;
- CTL_DEBUG_PRINT(("ctl_maintenance_in\n"));
+ CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
- cdb = (struct scsi_maintenance_in *)ctsio->cdb;
- softc = control_softc;
+ cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
retval = CTL_RETVAL_COMPLETE;
- if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) {
+ opcode = cdb->requested_opcode;
+ service_action = scsi_2btoul(cdb->requested_service_action);
+ switch (cdb->options & RSO_OPTIONS_MASK) {
+ case RSO_OPTIONS_ALL:
+ num = 0;
+ for (i = 0; i < 256; i++) {
+ entry = &ctl_cmd_table[i];
+ if (entry->flags & CTL_CMD_FLAG_SA5) {
+ for (j = 0; j < 32; j++) {
+ sentry = &((const struct ctl_cmd_entry *)
+ entry->execute)[j];
+ if (ctl_cmd_applicable(
+ lun->be_lun->lun_type, sentry))
+ num++;
+ }
+ } else {
+ if (ctl_cmd_applicable(lun->be_lun->lun_type,
+ entry))
+ num++;
+ }
+ }
+ total_len = sizeof(struct scsi_report_supported_opcodes_all) +
+ num * sizeof(struct scsi_report_supported_opcodes_descr);
+ break;
+ case RSO_OPTIONS_OC:
+ if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 2);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
+ break;
+ case RSO_OPTIONS_OC_SA:
+ if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
+ service_action >= 32) {
+ ctl_set_invalid_field(/*ctsio*/ ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ 2,
+ /*bit_valid*/ 1,
+ /*bit*/ 2);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
+ break;
+ default:
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
- /*field*/ 1,
+ /*field*/ 2,
/*bit_valid*/ 1,
- /*bit*/ 4);
+ /*bit*/ 2);
ctl_done((union ctl_io *)ctsio);
- return(retval);
+ return (CTL_RETVAL_COMPLETE);
}
- mtx_lock(&softc->ctl_lock);
- single = ctl_is_single;
- mtx_unlock(&softc->ctl_lock);
+ alloc_len = scsi_4btoul(cdb->length);
- if (single)
- num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1;
- else
- num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
- total_len = sizeof(struct scsi_target_group_data) +
- sizeof(struct scsi_target_port_group_descriptor) *
- num_target_port_groups +
- sizeof(struct scsi_target_port_descriptor) *
- NUM_PORTS_PER_GRP * num_target_port_groups;
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ switch (cdb->options & RSO_OPTIONS_MASK) {
+ case RSO_OPTIONS_ALL:
+ all = (struct scsi_report_supported_opcodes_all *)
+ ctsio->kern_data_ptr;
+ num = 0;
+ for (i = 0; i < 256; i++) {
+ entry = &ctl_cmd_table[i];
+ if (entry->flags & CTL_CMD_FLAG_SA5) {
+ for (j = 0; j < 32; j++) {
+ sentry = &((const struct ctl_cmd_entry *)
+ entry->execute)[j];
+ if (!ctl_cmd_applicable(
+ lun->be_lun->lun_type, sentry))
+ continue;
+ descr = &all->descr[num++];
+ descr->opcode = i;
+ scsi_ulto2b(j, descr->service_action);
+ descr->flags = RSO_SERVACTV;
+ scsi_ulto2b(sentry->length,
+ descr->cdb_length);
+ }
+ } else {
+ if (!ctl_cmd_applicable(lun->be_lun->lun_type,
+ entry))
+ continue;
+ descr = &all->descr[num++];
+ descr->opcode = i;
+ scsi_ulto2b(0, descr->service_action);
+ descr->flags = 0;
+ scsi_ulto2b(entry->length, descr->cdb_length);
+ }
+ }
+ scsi_ulto4b(
+ num * sizeof(struct scsi_report_supported_opcodes_descr),
+ all->length);
+ break;
+ case RSO_OPTIONS_OC:
+ one = (struct scsi_report_supported_opcodes_one *)
+ ctsio->kern_data_ptr;
+ entry = &ctl_cmd_table[opcode];
+ goto fill_one;
+ case RSO_OPTIONS_OC_SA:
+ one = (struct scsi_report_supported_opcodes_one *)
+ ctsio->kern_data_ptr;
+ entry = &ctl_cmd_table[opcode];
+ entry = &((const struct ctl_cmd_entry *)
+ entry->execute)[service_action];
+fill_one:
+ if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
+ one->support = 3;
+ scsi_ulto2b(entry->length, one->cdb_length);
+ one->cdb_usage[0] = opcode;
+ memcpy(&one->cdb_usage[1], entry->usage,
+ entry->length - 1);
+ } else
+ one->support = 1;
+ break;
+ }
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return(retval);
+}
+
+int
+ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_report_supported_tmf *cdb;
+ struct scsi_report_supported_tmf_data *data;
+ int retval;
+ int alloc_len, total_len;
+ CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
+
+ cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ total_len = sizeof(struct scsi_report_supported_tmf_data);
alloc_len = scsi_4btoul(cdb->length);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
@@ -7209,87 +7604,67 @@ ctl_maintenance_in(struct ctl_scsiio *ctsio)
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
- rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr;
+ data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
+ data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS;
+ data->byte2 |= RST_ITNRS;
- tpg_desc_ptr1 = &rtg_ptr->groups[0];
- tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0];
- tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *)
- &tp_desc_ptr1_1->desc_list[0];
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
- if (single == 0) {
- tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *)
- &tp_desc_ptr1_2->desc_list[0];
- tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0];
- tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *)
- &tp_desc_ptr2_1->desc_list[0];
- } else {
- tpg_desc_ptr2 = NULL;
- tp_desc_ptr2_1 = NULL;
- tp_desc_ptr2_2 = NULL;
- }
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
- scsi_ulto4b(total_len - 4, rtg_ptr->length);
- if (single == 0) {
- if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
- if (lun->flags & CTL_LUN_PRIMARY_SC) {
- tpg_desc_ptr1->pref_state = TPG_PRIMARY;
- tpg_desc_ptr2->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- } else {
- tpg_desc_ptr1->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- tpg_desc_ptr2->pref_state = TPG_PRIMARY;
- }
- } else {
- if (lun->flags & CTL_LUN_PRIMARY_SC) {
- tpg_desc_ptr1->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- tpg_desc_ptr2->pref_state = TPG_PRIMARY;
- } else {
- tpg_desc_ptr1->pref_state = TPG_PRIMARY;
- tpg_desc_ptr2->pref_state =
- TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
- }
- }
- } else {
- tpg_desc_ptr1->pref_state = TPG_PRIMARY;
- }
- tpg_desc_ptr1->support = 0;
- tpg_desc_ptr1->target_port_group[1] = 1;
- tpg_desc_ptr1->status = TPG_IMPLICIT;
- tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP;
+int
+ctl_report_timestamp(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_report_timestamp *cdb;
+ struct scsi_report_timestamp_data *data;
+ struct timeval tv;
+ int64_t timestamp;
+ int retval;
+ int alloc_len, total_len;
+
+ CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
+
+ cdb = (struct scsi_report_timestamp *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- if (single == 0) {
- tpg_desc_ptr2->support = 0;
- tpg_desc_ptr2->target_port_group[1] = 2;
- tpg_desc_ptr2->status = TPG_IMPLICIT;
- tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP;
+ retval = CTL_RETVAL_COMPLETE;
+
+ total_len = sizeof(struct scsi_report_timestamp_data);
+ alloc_len = scsi_4btoul(cdb->length);
- tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
- tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
- tp_desc_ptr2_1->relative_target_port_identifier[1] = 9;
- tp_desc_ptr2_2->relative_target_port_identifier[1] = 10;
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
} else {
- if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
- tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
- tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
- } else {
- tp_desc_ptr1_1->relative_target_port_identifier[1] = 9;
- tp_desc_ptr1_2->relative_target_port_identifier[1] = 10;
- }
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
}
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
- ctsio->be_move_done = ctl_config_move_done;
+ data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
+ scsi_ulto2b(sizeof(*data) - 2, data->length);
+ data->origin = RTS_ORIG_OUTSIDE;
+ getmicrotime(&tv);
+ timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
+ scsi_ulto4b(timestamp >> 16, data->timestamp);
+ scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
- CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
- ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
- ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
- ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
- ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
- return(retval);
+ return (retval);
}
int
@@ -7312,7 +7687,7 @@ ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
retry:
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
switch (cdb->action) {
case SPRI_RK: /* read keys */
total_len = sizeof(struct scsi_per_res_in_keys) +
@@ -7329,19 +7704,14 @@ retry:
total_len = sizeof(struct scsi_per_res_cap);
break;
case SPRI_RS: /* read full status */
+ total_len = sizeof(struct scsi_per_res_in_header) +
+ (sizeof(struct scsi_per_res_in_full_desc) + 256) *
+ lun->pr_key_count;
+ break;
default:
- mtx_unlock(&softc->ctl_lock);
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- break; /* NOTREACHED */
+ panic("Invalid PR type %x", cdb->action);
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
@@ -7359,7 +7729,7 @@ retry:
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
switch (cdb->action) {
case SPRI_RK: { // read keys
struct scsi_per_res_in_keys *res_keys;
@@ -7377,7 +7747,7 @@ retry:
if (total_len != (sizeof(struct scsi_per_res_in_keys) +
(lun->pr_key_count *
sizeof(struct scsi_per_res_key)))){
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
printf("%s: reservation length changed, retrying\n",
__func__);
@@ -7452,7 +7822,7 @@ retry:
* command active right now.)
*/
if (tmp_len != total_len) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
printf("%s: reservation status changed, retrying\n",
__func__);
@@ -7484,7 +7854,7 @@ retry:
res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
scsi_ulto2b(sizeof(*res_cap), res_cap->length);
- res_cap->flags2 |= SPRI_TMV;
+ res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_3;
type_mask = SPRI_TM_WR_EX_AR |
SPRI_TM_EX_AC_RO |
SPRI_TM_WR_EX_RO |
@@ -7494,7 +7864,62 @@ retry:
scsi_ulto2b(type_mask, res_cap->type_mask);
break;
}
- case SPRI_RS: //read full status
+ case SPRI_RS: { // read full status
+ struct scsi_per_res_in_full *res_status;
+ struct scsi_per_res_in_full_desc *res_desc;
+ struct ctl_port *port;
+ int i, len;
+
+ res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
+
+ /*
+ * We had to drop the lock to allocate our buffer, which
+ * leaves time for someone to come in with another
+ * persistent reservation. (That is unlikely, though,
+ * since this should be the only persistent reservation
+ * command active right now.)
+ */
+ if (total_len < (sizeof(struct scsi_per_res_in_header) +
+ (sizeof(struct scsi_per_res_in_full_desc) + 256) *
+ lun->pr_key_count)){
+ mtx_unlock(&lun->lun_lock);
+ free(ctsio->kern_data_ptr, M_CTL);
+ printf("%s: reservation length changed, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ scsi_ulto4b(lun->PRGeneration, res_status->header.generation);
+
+ res_desc = &res_status->desc[0];
+ for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) {
+ if (!lun->per_res[i].registered)
+ continue;
+
+ memcpy(&res_desc->res_key, &lun->per_res[i].res_key.key,
+ sizeof(res_desc->res_key));
+ if ((lun->flags & CTL_LUN_PR_RESERVED) &&
+ (lun->pr_res_idx == i ||
+ lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
+ res_desc->flags = SPRI_FULL_R_HOLDER;
+ res_desc->scopetype = lun->res_type;
+ }
+ scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
+ res_desc->rel_trgt_port_id);
+ len = 0;
+ port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
+ if (port != NULL)
+ len = ctl_create_iid(port,
+ i % CTL_MAX_INIT_PER_PORT,
+ res_desc->transport_id);
+ scsi_ulto4b(len, res_desc->additional_length);
+ res_desc = (struct scsi_per_res_in_full_desc *)
+ &res_desc->transport_id[len];
+ }
+ scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
+ res_status->header.length);
+ break;
+ }
default:
/*
* This is a bug, because we just checked for this above,
@@ -7503,8 +7928,9 @@ retry:
panic("Invalid PR type %x", cdb->action);
break; /* NOTREACHED */
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
@@ -7534,13 +7960,13 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
retval = 0;
+ mtx_lock(&lun->lun_lock);
if (sa_res_key == 0) {
- mtx_lock(&softc->ctl_lock);
if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
/* validate scope and type */
if ((cdb->scope_type & SPR_SCOPE_MASK) !=
SPR_LU_SCOPE) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7552,7 +7978,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
if (type>8 || type==2 || type==4 || type==0) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7576,12 +8002,11 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
if (!persis_offset
&& i <CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i-persis_offset
- ].ua_pending |=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
lun->per_res[i].registered = 0;
memset(&lun->per_res[i].res_key, 0,
@@ -7594,7 +8019,6 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = residx;
- mtx_unlock(&softc->ctl_lock);
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7613,7 +8037,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
} else {
/* not all registrants */
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
@@ -7628,7 +8052,6 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
|| !(lun->flags & CTL_LUN_PR_RESERVED)) {
int found = 0;
- mtx_lock(&softc->ctl_lock);
if (res_key == sa_res_key) {
/* special case */
/*
@@ -7640,7 +8063,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
* zero I'll take that approach since this has
* to do with the sa_res_key.
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_invalid_field(ctsio,
/*sks_valid*/ 1,
@@ -7665,17 +8088,14 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
sizeof(struct scsi_per_res_key));
lun->pr_key_count--;
- if (!persis_offset
- && i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
- CTL_UA_REG_PREEMPT;
- else if (persis_offset
- && i >= persis_offset)
- lun->pending_sense[i-persis_offset].ua_pending|=
+ if (!persis_offset && i < CTL_MAX_INITIATORS)
+ lun->pending_ua[i] |= CTL_UA_REG_PREEMPT;
+ else if (persis_offset && i >= persis_offset)
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
}
- mtx_unlock(&softc->ctl_lock);
if (!found) {
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -7705,6 +8125,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
/* validate scope and type */
if ((cdb->scope_type & SPR_SCOPE_MASK) !=
SPR_LU_SCOPE) {
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7716,6 +8137,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
if (type>8 || type==2 || type==4 || type==0) {
+ mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(/*ctsio*/ ctsio,
/*sks_valid*/ 1,
/*command*/ 1,
@@ -7760,27 +8182,23 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[
- i-persis_offset].ua_pending |=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
} else if (type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type ==SPR_TYPE_EX_AC_RO)){
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[
- i-persis_offset
- ].ua_pending |=
+ lun->pending_ua[
+ i-persis_offset] |=
CTL_UA_RES_RELEASE;
}
}
@@ -7790,8 +8208,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
&& lun->res_type != SPR_TYPE_EX_AC_AR)
lun->pr_res_idx = residx;
else
- lun->pr_res_idx =
- CTL_PR_ALL_REGISTRANTS;
+ lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -7814,7 +8231,6 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
* remove registrants
*/
int found=0;
- mtx_lock(&softc->ctl_lock);
for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
if (memcmp(param->serv_act_res_key,
@@ -7830,23 +8246,21 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[
- i-persis_offset].ua_pending |=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_REG_PREEMPT;
}
if (!found) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
return (1);
}
- mtx_unlock(&softc->ctl_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
@@ -7866,6 +8280,7 @@ ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
}
lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
return (retval);
}
@@ -7897,11 +8312,10 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset && i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_REG_PREEMPT;
lun->per_res[i].registered = 0;
memset(&lun->per_res[i].res_key, 0,
@@ -7928,12 +8342,11 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
if (!persis_offset
&& i < persis_offset)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_REG_PREEMPT;
}
}
@@ -7956,25 +8369,22 @@ ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
lun->pr_key_count--;
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_REG_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_REG_PREEMPT;
} else if (msg->pr.pr_info.res_type != lun->res_type
&& (lun->res_type == SPR_TYPE_WR_EX_RO
|| lun->res_type == SPR_TYPE_EX_AC_RO)) {
if (!persis_offset
&& i < persis_offset)
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i -
- persis_offset].ua_pending |=
+ lun->pending_ua[i - persis_offset] |=
CTL_UA_RES_RELEASE;
}
}
@@ -8048,28 +8458,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
}
}
- switch (cdb->action & SPRO_ACTION_MASK) {
- case SPRO_REGISTER:
- case SPRO_RESERVE:
- case SPRO_RELEASE:
- case SPRO_CLEAR:
- case SPRO_PREEMPT:
- case SPRO_REG_IGNO:
- break;
- case SPRO_REG_MOVE:
- case SPRO_PRE_ABO:
- default:
- ctl_set_invalid_field(/*ctsio*/ ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- break; /* NOTREACHED */
- }
-
param_len = scsi_4btoul(cdb->length);
if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
@@ -8097,7 +8485,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* This must be done for all other service actions
*/
if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (lun->per_res[residx].registered) {
if (memcmp(param->res_key.key,
lun->per_res[residx].res_key.key,
@@ -8108,7 +8496,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* the one the initiator previously
* registered.
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -8118,7 +8506,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
/*
* We are not registered
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -8128,13 +8516,13 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* We are not registered and trying to register but
* the register key isn't zero.
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
switch (cdb->action & SPRO_ACTION_MASK) {
@@ -8173,7 +8561,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* The initiator wants to clear the
@@ -8184,7 +8572,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
&& (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
|| ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
&& !lun->per_res[residx].registered)) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
goto done;
}
@@ -8213,8 +8601,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
i+persis_offset].registered
== 0)
continue;
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
}
@@ -8236,7 +8623,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
printf("CTL:Persis Out error returned from "
"ctl_ha_msg_send %d\n", isc_retval);
}
- mtx_unlock(&softc->ctl_lock);
} else /* sa_res_key != 0 */ {
/*
@@ -8260,7 +8646,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
memcpy(persis_io.pr.pr_info.sa_res_key,
param->serv_act_res_key,
sizeof(param->serv_act_res_key));
- mtx_unlock(&softc->ctl_lock);
if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
&persis_io, sizeof(persis_io), 0)) >
CTL_HA_STATUS_SUCCESS) {
@@ -8269,6 +8654,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
}
}
lun->PRGeneration++;
+ mtx_unlock(&lun->lun_lock);
break;
}
@@ -8276,7 +8662,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
#if 0
printf("Reserve executed type %d\n", type);
#endif
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if (lun->flags & CTL_LUN_PR_RESERVED) {
/*
* if this isn't the reservation holder and it's
@@ -8286,13 +8672,13 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
if ((lun->pr_res_idx != residx
&& lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
|| lun->res_type != type) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_reservation_conflict(ctsio);
ctl_done((union ctl_io *)ctsio);
return (CTL_RETVAL_COMPLETE);
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
} else /* create a reservation */ {
/*
* If it's not an "all registrants" type record
@@ -8307,7 +8693,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
lun->flags |= CTL_LUN_PR_RESERVED;
lun->res_type = type;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
/* send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
@@ -8325,10 +8711,10 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
break;
case SPRO_RELEASE:
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
/* No reservation exists return good status */
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
goto done;
}
/*
@@ -8340,12 +8726,12 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
* not a res holder return good status but
* do nothing
*/
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
goto done;
}
if (lun->res_type != type) {
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
free(ctsio->kern_data_ptr, M_CTL);
ctl_set_illegal_pr_release(ctsio);
ctl_done((union ctl_io *)ctsio);
@@ -8373,13 +8759,13 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
if (lun->per_res[i+persis_offset].registered
== 0)
continue;
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
lun->per_res[residx].registered = 1;
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
/* Send msg to other side */
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
@@ -8394,7 +8780,7 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
case SPRO_CLEAR:
/* send msg to other side */
- mtx_lock(&softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_PR_RESERVED;
lun->res_type = 0;
lun->pr_key_count = 0;
@@ -8408,18 +8794,18 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
if (lun->per_res[i].registered) {
if (!persis_offset && i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_PREEMPT;
else if (persis_offset && i >= persis_offset)
- lun->pending_sense[i-persis_offset
- ].ua_pending |= CTL_UA_RES_PREEMPT;
+ lun->pending_ua[i-persis_offset] |=
+ CTL_UA_RES_PREEMPT;
memset(&lun->per_res[i].res_key,
0, sizeof(struct scsi_per_res_key));
lun->per_res[i].registered = 0;
}
lun->PRGeneration++;
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
persis_io.hdr.nexus = ctsio->io_hdr.nexus;
persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
persis_io.pr.pr_info.action = CTL_PR_CLEAR;
@@ -8439,19 +8825,8 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
break;
}
- case SPRO_REG_MOVE:
- case SPRO_PRE_ABO:
default:
- free(ctsio->kern_data_ptr, M_CTL);
- ctl_set_invalid_field(/*ctsio*/ ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
- ctl_done((union ctl_io *)ctsio);
- return (CTL_RETVAL_COMPLETE);
- break; /* NOTREACHED */
+ panic("Invalid PR type %x", cdb->action);
}
done:
@@ -8478,12 +8853,9 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
softc = control_softc;
- mtx_lock(&softc->ctl_lock);
-
- targ_lun = msg->hdr.nexus.targ_lun;
- if (msg->hdr.nexus.lun_map_fn != NULL)
- targ_lun = msg->hdr.nexus.lun_map_fn(msg->hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = msg->hdr.nexus.targ_mapped_lun;
lun = softc->ctl_luns[targ_lun];
+ mtx_lock(&lun->lun_lock);
switch(msg->pr.pr_info.action) {
case CTL_PR_REG_KEY:
if (!lun->per_res[msg->pr.pr_info.residx].registered) {
@@ -8524,8 +8896,7 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
persis_offset].registered == 0)
continue;
- lun->pending_sense[i
- ].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
}
@@ -8556,7 +8927,7 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
&& lun->res_type != SPR_TYPE_WR_EX) {
for (i = 0; i < CTL_MAX_INITIATORS; i++)
if (lun->per_res[i+persis_offset].registered)
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_RES_RELEASE;
}
@@ -8579,11 +8950,10 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
continue;
if (!persis_offset
&& i < CTL_MAX_INITIATORS)
- lun->pending_sense[i].ua_pending |=
- CTL_UA_RES_PREEMPT;
+ lun->pending_ua[i] |= CTL_UA_RES_PREEMPT;
else if (persis_offset
&& i >= persis_offset)
- lun->pending_sense[i-persis_offset].ua_pending|=
+ lun->pending_ua[i-persis_offset] |=
CTL_UA_RES_PREEMPT;
memset(&lun->per_res[i].res_key, 0,
sizeof(struct scsi_per_res_key));
@@ -8593,17 +8963,17 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
break;
}
- mtx_unlock(&softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
int
ctl_read_write(struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
- struct ctl_lba_len lbalen;
+ struct ctl_lba_len_flags *lbalen;
uint64_t lba;
uint32_t num_blocks;
- int reladdr, fua, dpo, ebp;
+ int fua, dpo;
int retval;
int isread;
@@ -8611,10 +8981,8 @@ ctl_read_write(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
- reladdr = 0;
fua = 0;
dpo = 0;
- ebp = 0;
retval = CTL_RETVAL_COMPLETE;
@@ -8662,17 +9030,11 @@ ctl_read_write(struct ctl_scsiio *ctsio)
cdb = (struct scsi_rw_10 *)ctsio->cdb;
- if (cdb->byte2 & SRW10_RELADDR)
- reladdr = 1;
if (cdb->byte2 & SRW10_FUA)
fua = 1;
if (cdb->byte2 & SRW10_DPO)
dpo = 1;
- if ((cdb->opcode == WRITE_10)
- && (cdb->byte2 & SRW10_EBP))
- ebp = 1;
-
lba = scsi_4btoul(cdb->addr);
num_blocks = scsi_2btoul(cdb->length);
break;
@@ -8702,8 +9064,6 @@ ctl_read_write(struct ctl_scsiio *ctsio)
cdb = (struct scsi_rw_12 *)ctsio->cdb;
- if (cdb->byte2 & SRW12_RELADDR)
- reladdr = 1;
if (cdb->byte2 & SRW12_FUA)
fua = 1;
if (cdb->byte2 & SRW12_DPO)
@@ -8731,8 +9091,6 @@ ctl_read_write(struct ctl_scsiio *ctsio)
cdb = (struct scsi_rw_16 *)ctsio->cdb;
- if (cdb->byte2 & SRW12_RELADDR)
- reladdr = 1;
if (cdb->byte2 & SRW12_FUA)
fua = 1;
if (cdb->byte2 & SRW12_DPO)
@@ -8772,22 +9130,121 @@ ctl_read_write(struct ctl_scsiio *ctsio)
* getting it to do write-through for a particular transaction may
* not be possible.
*/
+
/*
- * We don't support relative addressing. That also requires
- * supporting linked commands, which we don't do.
+ * The first check is to make sure we're in bounds, the second
+ * check is to catch wrap-around problems. If the lba + num blocks
+ * is less than the lba, then we've wrapped around and the block
+ * range is invalid anyway.
*/
- if (reladdr != 0) {
- ctl_set_invalid_field(ctsio,
- /*sks_valid*/ 1,
- /*command*/ 1,
- /*field*/ 1,
- /*bit_valid*/ 1,
- /*bit*/ 0);
+ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+ || ((lba + num_blocks) < lba)) {
+ ctl_set_lba_out_of_range(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * According to SBC-3, a transfer length of 0 is not an error.
+ * Note that this cannot happen with WRITE(6) or READ(6), since 0
+ * translates to 256 blocks for those commands.
+ */
+ if (num_blocks == 0) {
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ lbalen->flags = isread ? CTL_LLF_READ : CTL_LLF_WRITE;
+
+ ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
+ ctsio->kern_rel_offset = 0;
+
+ CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
+
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
+
+ return (retval);
+}
+
+static int
+ctl_cnw_cont(union ctl_io *io)
+{
+ struct ctl_scsiio *ctsio;
+ struct ctl_lun *lun;
+ struct ctl_lba_len_flags *lbalen;
+ int retval;
+
+ ctsio = &io->scsiio;
+ ctsio->io_hdr.status = CTL_STATUS_NONE;
+ ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->flags = CTL_LLF_WRITE;
+
+ CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_cnw(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct ctl_lba_len_flags *lbalen;
+ uint64_t lba;
+ uint32_t num_blocks;
+ int fua, dpo;
+ int retval;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
+
+ fua = 0;
+ dpo = 0;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ switch (ctsio->cdb[0]) {
+ case COMPARE_AND_WRITE: {
+ struct scsi_compare_and_write *cdb;
+
+ cdb = (struct scsi_compare_and_write *)ctsio->cdb;
+
+ if (cdb->byte2 & SRW10_FUA)
+ fua = 1;
+ if (cdb->byte2 & SRW10_DPO)
+ dpo = 1;
+ lba = scsi_8btou64(cdb->addr);
+ num_blocks = cdb->length;
+ break;
+ }
+ default:
+ /*
+ * We got a command we don't support. This shouldn't
+ * happen, commands should be filtered out above us.
+ */
+ ctl_set_invalid_opcode(ctsio);
ctl_done((union ctl_io *)ctsio);
+
return (CTL_RETVAL_COMPLETE);
+ break; /* NOTREACHED */
}
/*
+ * XXX KDM what do we do with the DPO and FUA bits? FUA might be
+ * interesting for us, but if RAIDCore is in write-back mode,
+ * getting it to do write-through for a particular transaction may
+ * not be possible.
+ */
+
+ /*
* The first check is to make sure we're in bounds, the second
* check is to catch wrap-around problems. If the lba + num blocks
* is less than the lba, then we've wrapped around and the block
@@ -8802,8 +9259,6 @@ ctl_read_write(struct ctl_scsiio *ctsio)
/*
* According to SBC-3, a transfer length of 0 is not an error.
- * Note that this cannot happen with WRITE(6) or READ(6), since 0
- * translates to 256 blocks for those commands.
*/
if (num_blocks == 0) {
ctl_set_success(ctsio);
@@ -8811,15 +9266,130 @@ ctl_read_write(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
- lbalen.lba = lba;
- lbalen.len = num_blocks;
- memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
- sizeof(lbalen));
+ ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
+ ctsio->kern_rel_offset = 0;
- CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
+ /*
+ * Set the IO_CONT flag, so that if this I/O gets passed to
+ * ctl_data_submit_done(), it'll get passed back to
+ * ctl_ctl_cnw_cont() for further processing.
+ */
+ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
+ ctsio->io_cont = ctl_cnw_cont;
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ lbalen->flags = CTL_LLF_COMPARE;
+
+ CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
retval = lun->backend->data_submit((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_verify(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct ctl_lba_len_flags *lbalen;
+ uint64_t lba;
+ uint32_t num_blocks;
+ int bytchk, dpo;
+ int retval;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
+
+ bytchk = 0;
+ dpo = 0;
+ retval = CTL_RETVAL_COMPLETE;
+
+ switch (ctsio->cdb[0]) {
+ case VERIFY_10: {
+ struct scsi_verify_10 *cdb;
+
+ cdb = (struct scsi_verify_10 *)ctsio->cdb;
+ if (cdb->byte2 & SVFY_BYTCHK)
+ bytchk = 1;
+ if (cdb->byte2 & SVFY_DPO)
+ dpo = 1;
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_2btoul(cdb->length);
+ break;
+ }
+ case VERIFY_12: {
+ struct scsi_verify_12 *cdb;
+
+ cdb = (struct scsi_verify_12 *)ctsio->cdb;
+ if (cdb->byte2 & SVFY_BYTCHK)
+ bytchk = 1;
+ if (cdb->byte2 & SVFY_DPO)
+ dpo = 1;
+ lba = scsi_4btoul(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ break;
+ }
+ case VERIFY_16: {
+ struct scsi_rw_16 *cdb;
+
+ cdb = (struct scsi_rw_16 *)ctsio->cdb;
+ if (cdb->byte2 & SVFY_BYTCHK)
+ bytchk = 1;
+ if (cdb->byte2 & SVFY_DPO)
+ dpo = 1;
+ lba = scsi_8btou64(cdb->addr);
+ num_blocks = scsi_4btoul(cdb->length);
+ break;
+ }
+ default:
+ /*
+ * We got a command we don't support. This shouldn't
+ * happen, commands should be filtered out above us.
+ */
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ /*
+ * The first check is to make sure we're in bounds, the second
+ * check is to catch wrap-around problems. If the lba + num blocks
+ * is less than the lba, then we've wrapped around and the block
+ * range is invalid anyway.
+ */
+ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
+ || ((lba + num_blocks) < lba)) {
+ ctl_set_lba_out_of_range(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ /*
+ * According to SBC-3, a transfer length of 0 is not an error.
+ */
+ if (num_blocks == 0) {
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ lbalen = (struct ctl_lba_len_flags *)
+ &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ lbalen->lba = lba;
+ lbalen->len = num_blocks;
+ if (bytchk) {
+ lbalen->flags = CTL_LLF_COMPARE;
+ ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
+ } else {
+ lbalen->flags = CTL_LLF_VERIFY;
+ ctsio->kern_total_len = 0;
+ }
+ ctsio->kern_rel_offset = 0;
+
+ CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
+ retval = lun->backend->data_submit((union ctl_io *)ctsio);
return (retval);
}
@@ -8897,9 +9467,7 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
mtx_lock(&control_softc->ctl_lock);
for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
- lun_id = targ_lun_id;
- if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
- lun_id = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, lun_id);
+ lun_id = ctl_map_lun(ctsio->io_hdr.nexus.targ_port, targ_lun_id);
if (lun_id >= CTL_MAX_LUNS)
continue;
lun = control_softc->ctl_luns[lun_id];
@@ -8951,9 +9519,11 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
* case, we shouldn't clear any pending lun change unit
* attention.
*/
- if (request_lun != NULL)
- lun->pending_sense[initidx].ua_pending &=
- ~CTL_UA_LUN_CHANGE;
+ if (request_lun != NULL) {
+ mtx_lock(&lun->lun_lock);
+ lun->pending_ua[initidx] &= ~CTL_UA_LUN_CHANGE;
+ mtx_unlock(&lun->lun_lock);
+ }
}
mtx_unlock(&control_softc->ctl_lock);
@@ -8992,6 +9562,7 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
*/
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9051,7 +9622,8 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
* Check for pending sense, and then for pending unit attentions.
* Pending sense gets returned first, then pending unit attentions.
*/
- mtx_lock(&lun->ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
+#ifdef CTL_WITH_CA
if (ctl_is_set(lun->have_ca, initidx)) {
scsi_sense_data_type stored_format;
@@ -9059,8 +9631,7 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
* Check to see which sense format was used for the stored
* sense data.
*/
- stored_format = scsi_sense_type(
- &lun->pending_sense[initidx].sense);
+ stored_format = scsi_sense_type(&lun->pending_sense[initidx]);
/*
* If the user requested a different sense format than the
@@ -9075,32 +9646,34 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
if ((stored_format == SSD_TYPE_FIXED)
&& (sense_format == SSD_TYPE_DESC))
ctl_sense_to_desc((struct scsi_sense_data_fixed *)
- &lun->pending_sense[initidx].sense,
+ &lun->pending_sense[initidx],
(struct scsi_sense_data_desc *)sense_ptr);
else if ((stored_format == SSD_TYPE_DESC)
&& (sense_format == SSD_TYPE_FIXED))
ctl_sense_to_fixed((struct scsi_sense_data_desc *)
- &lun->pending_sense[initidx].sense,
+ &lun->pending_sense[initidx],
(struct scsi_sense_data_fixed *)sense_ptr);
else
- memcpy(sense_ptr, &lun->pending_sense[initidx].sense,
+ memcpy(sense_ptr, &lun->pending_sense[initidx],
ctl_min(sizeof(*sense_ptr),
- sizeof(lun->pending_sense[initidx].sense)));
+ sizeof(lun->pending_sense[initidx])));
ctl_clear_mask(lun->have_ca, initidx);
have_error = 1;
- } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) {
+ } else
+#endif
+ if (lun->pending_ua[initidx] != CTL_UA_NONE) {
ctl_ua_type ua_type;
- ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending,
+ ua_type = ctl_build_ua(lun->pending_ua[initidx],
sense_ptr, sense_format);
if (ua_type != CTL_UA_NONE) {
have_error = 1;
/* We're reporting this UA, so clear it */
- lun->pending_sense[initidx].ua_pending &= ~ua_type;
+ lun->pending_ua[initidx] &= ~ua_type;
}
}
- mtx_unlock(&lun->ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
/*
* We already have a pending error, return it.
@@ -9118,7 +9691,7 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
* parameter data.
*/
ctsio->sense_len = 0;
-
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9147,6 +9720,7 @@ no_sense:
* autosense in this case. We're reporting sense as parameter data.
*/
ctsio->sense_len = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9163,7 +9737,7 @@ ctl_tur(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_tur\n"));
if (lun == NULL)
- return (-EINVAL);
+ return (EINVAL);
ctsio->scsi_status = SCSI_STATUS_OK;
ctsio->io_hdr.status = CTL_SUCCESS;
@@ -9227,13 +9801,18 @@ ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER;
/* Device Identification */
pages->page_list[2] = SVPD_DEVICE_ID;
+ /* SCSI Ports */
+ pages->page_list[3] = SVPD_SCSI_PORTS;
+ /* Third-party Copy */
+ pages->page_list[4] = SVPD_SCSI_TPC;
/* Block limits */
- pages->page_list[3] = SVPD_BLOCK_LIMITS;
+ pages->page_list[5] = SVPD_BLOCK_LIMITS;
/* Logical Block Provisioning */
- pages->page_list[4] = SVPD_LBP;
+ pages->page_list[6] = SVPD_LBP;
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9245,9 +9824,6 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
{
struct scsi_vpd_unit_serial_number *sn_ptr;
struct ctl_lun *lun;
-#ifndef CTL_USE_BACKEND_SN
- char tmpstr[32];
-#endif
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
@@ -9281,7 +9857,6 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN);
-#ifdef CTL_USE_BACKEND_SN
/*
* If we don't have a LUN, we just leave the serial number as
* all spaces.
@@ -9291,17 +9866,9 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
strncpy((char *)sn_ptr->serial_num,
(char *)lun->be_lun->serial_num, CTL_SN_LEN);
}
-#else
- /*
- * Note that we're using a non-unique serial number here,
- */
- snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000");
- memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
- strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN,
- ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4)));
-#endif
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9313,45 +9880,38 @@ static int
ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
{
struct scsi_vpd_device_id *devid_ptr;
- struct scsi_vpd_id_descriptor *desc, *desc1;
- struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */
- struct scsi_vpd_id_t10 *t10id;
+ struct scsi_vpd_id_descriptor *desc;
struct ctl_softc *ctl_softc;
struct ctl_lun *lun;
- struct ctl_frontend *fe;
-#ifndef CTL_USE_BACKEND_SN
- char tmpstr[32];
-#endif /* CTL_USE_BACKEND_SN */
- int devid_len;
+ struct ctl_port *port;
+ int data_len;
+ uint8_t proto;
ctl_softc = control_softc;
- mtx_lock(&ctl_softc->ctl_lock);
- fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
- mtx_unlock(&ctl_softc->ctl_lock);
-
- if (fe->devid != NULL)
- return ((fe->devid)(ctsio, alloc_len));
-
+ port = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- devid_len = sizeof(struct scsi_vpd_device_id) +
- sizeof(struct scsi_vpd_id_descriptor) +
- sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN +
- sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN +
- sizeof(struct scsi_vpd_id_descriptor) +
+ data_len = sizeof(struct scsi_vpd_device_id) +
+ sizeof(struct scsi_vpd_id_descriptor) +
sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
- sizeof(struct scsi_vpd_id_descriptor) +
+ sizeof(struct scsi_vpd_id_descriptor) +
sizeof(struct scsi_vpd_id_trgt_port_grp_id);
+ if (lun && lun->lun_devid)
+ data_len += lun->lun_devid->len;
+ if (port->port_devid)
+ data_len += port->port_devid->len;
+ if (port->target_devid)
+ data_len += port->target_devid->len;
- ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO);
+ ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
- if (devid_len < alloc_len) {
- ctsio->residual = alloc_len - devid_len;
- ctsio->kern_data_len = devid_len;
- ctsio->kern_total_len = devid_len;
+ if (data_len < alloc_len) {
+ ctsio->residual = alloc_len - data_len;
+ ctsio->kern_data_len = data_len;
+ ctsio->kern_total_len = data_len;
} else {
ctsio->residual = 0;
ctsio->kern_data_len = alloc_len;
@@ -9361,15 +9921,6 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
- desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
- t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
- desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
- sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN);
- desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] +
- CTL_WWPN_LEN);
- desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] +
- sizeof(struct scsi_vpd_id_rel_trgt_port_id));
-
/*
* The control device is always connected. The disk device, on the
* other hand, may not be online all the time.
@@ -9379,115 +9930,187 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
lun->be_lun->lun_type;
else
devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
-
devid_ptr->page_code = SVPD_DEVICE_ID;
+ scsi_ulto2b(data_len - 4, devid_ptr->length);
- scsi_ulto2b(devid_len - 4, devid_ptr->length);
-
- mtx_lock(&ctl_softc->ctl_lock);
-
- /*
- * For Fibre channel,
- */
- if (fe->port_type == CTL_PORT_FC)
- {
- desc->proto_codeset = (SCSI_PROTO_FC << 4) |
- SVPD_ID_CODESET_ASCII;
- desc1->proto_codeset = (SCSI_PROTO_FC << 4) |
- SVPD_ID_CODESET_BINARY;
- }
+ if (port->port_type == CTL_PORT_FC)
+ proto = SCSI_PROTO_FC << 4;
+ else if (port->port_type == CTL_PORT_ISCSI)
+ proto = SCSI_PROTO_ISCSI << 4;
else
- {
- desc->proto_codeset = (SCSI_PROTO_SPI << 4) |
- SVPD_ID_CODESET_ASCII;
- desc1->proto_codeset = (SCSI_PROTO_SPI << 4) |
- SVPD_ID_CODESET_BINARY;
- }
- desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset;
- mtx_unlock(&ctl_softc->ctl_lock);
+ proto = SCSI_PROTO_SPI << 4;
+ desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
/*
* We're using a LUN association here. i.e., this device ID is a
* per-LUN identifier.
*/
- desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
- desc->length = sizeof(*t10id) + CTL_DEVID_LEN;
- strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
+ if (lun && lun->lun_devid) {
+ memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
+ desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
+ lun->lun_devid->len);
+ }
/*
- * desc1 is for the WWPN which is a port asscociation.
+ * This is for the WWPN which is a port association.
*/
- desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA;
- desc1->length = CTL_WWPN_LEN;
- /* XXX Call Reggie's get_WWNN func here then add port # to the end */
- /* For testing just create the WWPN */
-#if 0
- ddb_GetWWNN((char *)desc1->identifier);
-
- /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
- /* This is so Copancontrol will return something sane */
- if (ctsio->io_hdr.nexus.targ_port!=0 &&
- ctsio->io_hdr.nexus.targ_port!=8)
- desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1;
- else
- desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port;
-#endif
+ if (port->port_devid) {
+ memcpy(desc, port->port_devid->data, port->port_devid->len);
+ desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
+ port->port_devid->len);
+ }
- be64enc(desc1->identifier, fe->wwpn);
+ /*
+ * This is for the Relative Target Port(type 4h) identifier
+ */
+ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+ SVPD_ID_TYPE_RELTARG;
+ desc->length = 4;
+ scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ sizeof(struct scsi_vpd_id_rel_trgt_port_id));
/*
- * desc2 is for the Relative Target Port(type 4h) identifier
+ * This is for the Target Port Group(type 5h) identifier
*/
- desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
- | SVPD_ID_TYPE_RELTARG;
- desc2->length = 4;
-//#if 0
- /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
- /* This is so Copancontrol will return something sane */
- if (ctsio->io_hdr.nexus.targ_port!=0 &&
- ctsio->io_hdr.nexus.targ_port!=8)
- desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1;
- else
- desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port;
-//#endif
+ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+ SVPD_ID_TYPE_TPORTGRP;
+ desc->length = 4;
+ scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1,
+ &desc->identifier[2]);
+ desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
+ sizeof(struct scsi_vpd_id_trgt_port_grp_id));
/*
- * desc3 is for the Target Port Group(type 5h) identifier
+ * This is for the Target identifier
*/
- desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
- | SVPD_ID_TYPE_TPORTGRP;
- desc3->length = 4;
- if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single)
- desc3->identifier[3] = 1;
+ if (port->target_devid) {
+ memcpy(desc, port->target_devid->data, port->target_devid->len);
+ }
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+static int
+ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
+{
+ struct ctl_softc *softc = control_softc;
+ struct scsi_vpd_scsi_ports *sp;
+ struct scsi_vpd_port_designation *pd;
+ struct scsi_vpd_port_designation_cont *pdc;
+ struct ctl_lun *lun;
+ struct ctl_port *port;
+ int data_len, num_target_ports, iid_len, id_len, g, pg, p;
+ int num_target_port_groups, single;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ single = ctl_is_single;
+ if (single)
+ num_target_port_groups = 1;
else
- desc3->identifier[3] = 2;
+ num_target_port_groups = NUM_TARGET_PORT_GROUPS;
+ num_target_ports = 0;
+ iid_len = 0;
+ id_len = 0;
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (lun != NULL &&
+ ctl_map_lun_back(port->targ_port, lun->lun) >=
+ CTL_MAX_LUNS)
+ continue;
+ num_target_ports++;
+ if (port->init_devid)
+ iid_len += port->init_devid->len;
+ if (port->port_devid)
+ id_len += port->port_devid->len;
+ }
+ mtx_unlock(&softc->ctl_lock);
+
+ data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups *
+ num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
+ sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
+ ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+ sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ if (data_len < alloc_len) {
+ ctsio->residual = alloc_len - data_len;
+ ctsio->kern_data_len = data_len;
+ ctsio->kern_total_len = data_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
-#ifdef CTL_USE_BACKEND_SN
/*
- * If we've actually got a backend, copy the device id from the
- * per-LUN data. Otherwise, set it to all spaces.
+ * The control device is always connected. The disk device, on the
+ * other hand, may not be online all the time. Need to change this
+ * to figure out whether the disk device is actually online or not.
*/
- if (lun != NULL) {
- /*
- * Copy the backend's LUN ID.
- */
- strncpy((char *)t10id->vendor_spec_id,
- (char *)lun->be_lun->device_id, CTL_DEVID_LEN);
- } else {
- /*
- * No backend, set this to spaces.
- */
- memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN);
+ if (lun != NULL)
+ sp->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ else
+ sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+
+ sp->page_code = SVPD_SCSI_PORTS;
+ scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
+ sp->page_length);
+ pd = &sp->design[0];
+
+ mtx_lock(&softc->ctl_lock);
+ if (softc->flags & CTL_FLAG_MASTER_SHELF)
+ pg = 0;
+ else
+ pg = 1;
+ for (g = 0; g < num_target_port_groups; g++) {
+ STAILQ_FOREACH(port, &softc->port_list, links) {
+ if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
+ continue;
+ if (lun != NULL &&
+ ctl_map_lun_back(port->targ_port, lun->lun) >=
+ CTL_MAX_LUNS)
+ continue;
+ p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
+ scsi_ulto2b(p, pd->relative_port_id);
+ if (port->init_devid && g == pg) {
+ iid_len = port->init_devid->len;
+ memcpy(pd->initiator_transportid,
+ port->init_devid->data, port->init_devid->len);
+ } else
+ iid_len = 0;
+ scsi_ulto2b(iid_len, pd->initiator_transportid_length);
+ pdc = (struct scsi_vpd_port_designation_cont *)
+ (&pd->initiator_transportid[iid_len]);
+ if (port->port_devid && g == pg) {
+ id_len = port->port_devid->len;
+ memcpy(pdc->target_port_descriptors,
+ port->port_devid->data, port->port_devid->len);
+ } else
+ id_len = 0;
+ scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
+ pd = (struct scsi_vpd_port_designation *)
+ ((uint8_t *)pdc->target_port_descriptors + id_len);
+ }
}
-#else
- snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d",
- (lun != NULL) ? (int)lun->lun : 0);
- strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN,
- sizeof(tmpstr)));
-#endif
+ mtx_unlock(&softc->ctl_lock);
ctsio->scsi_status = SCSI_STATUS_OK;
-
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9502,7 +10125,6 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
int bs;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- bs = lun->be_lun->blocksize;
ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
@@ -9534,15 +10156,20 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
bl_ptr->page_code = SVPD_BLOCK_LIMITS;
scsi_ulto2b(sizeof(*bl_ptr), bl_ptr->page_length);
+ bl_ptr->max_cmp_write_len = 0xff;
scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
- scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len);
- if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
- scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
- scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
+ if (lun != NULL) {
+ bs = lun->be_lun->blocksize;
+ scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len);
+ if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
+ scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
+ scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
+ }
}
scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length);
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9554,10 +10181,8 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
{
struct scsi_vpd_logical_block_prov *lbp_ptr;
struct ctl_lun *lun;
- int bs;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- bs = lun->be_lun->blocksize;
ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
@@ -9588,10 +10213,11 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
lbp_ptr->page_code = SVPD_LBP;
- if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
+ if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10;
ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
@@ -9622,6 +10248,12 @@ ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
case SVPD_DEVICE_ID:
retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
break;
+ case SVPD_SCSI_PORTS:
+ retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
+ break;
+ case SVPD_SCSI_TPC:
+ retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
+ break;
case SVPD_BLOCK_LIMITS:
retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
break;
@@ -9650,8 +10282,9 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
struct scsi_inquiry *cdb;
struct ctl_softc *ctl_softc;
struct ctl_lun *lun;
+ char *val;
uint32_t alloc_len;
- int is_fc;
+ ctl_port_type port_type;
ctl_softc = control_softc;
@@ -9660,13 +10293,10 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
* We treat the ioctl front end, and any SCSI adapters, as packetized
* SCSI front ends.
*/
- mtx_lock(&ctl_softc->ctl_lock);
- if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type !=
- CTL_PORT_FC)
- is_fc = 0;
- else
- is_fc = 1;
- mtx_unlock(&ctl_softc->ctl_lock);
+ port_type = ctl_softc->ctl_ports[
+ ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type;
+ if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
+ port_type = CTL_PORT_SCSI;
lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
cdb = (struct scsi_inquiry *)ctsio->cdb;
@@ -9745,7 +10375,7 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
/* RMB in byte 2 is 0 */
- inq_ptr->version = SCSI_REV_SPC3;
+ inq_ptr->version = SCSI_REV_SPC4;
/*
* According to SAM-3, even if a device only supports a single
@@ -9770,17 +10400,18 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("additional_length = %d\n",
inq_ptr->additional_length));
- inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT;
+ inq_ptr->spc3_flags = SPC3_SID_3PC;
+ if (!ctl_is_single)
+ inq_ptr->spc3_flags |= SPC3_SID_TPGS_IMPLICIT;
/* 16 bit addressing */
- if (is_fc == 0)
+ if (port_type == CTL_PORT_SCSI)
inq_ptr->spc2_flags = SPC2_SID_ADDR16;
/* XXX set the SID_MultiP bit here if we're actually going to
respond on multiple ports */
inq_ptr->spc2_flags |= SPC2_SID_MultiP;
/* 16 bit data bus, synchronous transfers */
- /* XXX these flags don't apply for FC */
- if (is_fc == 0)
+ if (port_type == CTL_PORT_SCSI)
inq_ptr->flags = SID_WBus16 | SID_Sync;
/*
* XXX KDM do we want to support tagged queueing on the control
@@ -9794,10 +10425,17 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
* We have 8 bytes for the vendor name, and 16 bytes for the device
* name and 4 bytes for the revision.
*/
- strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
+ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
+ "vendor")) == NULL) {
+ strcpy(inq_ptr->vendor, CTL_VENDOR);
+ } else {
+ memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
+ strncpy(inq_ptr->vendor, val,
+ min(sizeof(inq_ptr->vendor), strlen(val)));
+ }
if (lun == NULL) {
strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
- } else {
+ } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
switch (lun->be_lun->lun_type) {
case T_DIRECT:
strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
@@ -9809,13 +10447,24 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT);
break;
}
+ } else {
+ memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
+ strncpy(inq_ptr->product, val,
+ min(sizeof(inq_ptr->product), strlen(val)));
}
/*
* XXX make this a macro somewhere so it automatically gets
* incremented when we make changes.
*/
- strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
+ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
+ "revision")) == NULL) {
+ strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
+ } else {
+ memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
+ strncpy(inq_ptr->revision, val,
+ min(sizeof(inq_ptr->revision), strlen(val)));
+ }
/*
* For parallel SCSI, we support double transition and single
@@ -9823,33 +10472,36 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
* and Selection) and Information Unit transfers on both the
* control and array devices.
*/
- if (is_fc == 0)
+ if (port_type == CTL_PORT_SCSI)
inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
SID_SPI_IUS;
- /* SAM-3 */
- scsi_ulto2b(0x0060, inq_ptr->version1);
- /* SPC-3 (no version claimed) XXX should we claim a version? */
- scsi_ulto2b(0x0300, inq_ptr->version2);
- if (is_fc) {
+ /* SAM-5 (no version claimed) */
+ scsi_ulto2b(0x00A0, inq_ptr->version1);
+ /* SPC-4 (no version claimed) */
+ scsi_ulto2b(0x0460, inq_ptr->version2);
+ if (port_type == CTL_PORT_FC) {
/* FCP-2 ANSI INCITS.350:2003 */
scsi_ulto2b(0x0917, inq_ptr->version3);
- } else {
+ } else if (port_type == CTL_PORT_SCSI) {
/* SPI-4 ANSI INCITS.362:200x */
scsi_ulto2b(0x0B56, inq_ptr->version3);
+ } else if (port_type == CTL_PORT_ISCSI) {
+ /* iSCSI (no version claimed) */
+ scsi_ulto2b(0x0960, inq_ptr->version3);
+ } else if (port_type == CTL_PORT_SAS) {
+ /* SAS (no version claimed) */
+ scsi_ulto2b(0x0BE0, inq_ptr->version3);
}
if (lun == NULL) {
- /* SBC-2 (no version claimed) XXX should we claim a version? */
- scsi_ulto2b(0x0320, inq_ptr->version4);
+ /* SBC-3 (no version claimed) */
+ scsi_ulto2b(0x04C0, inq_ptr->version4);
} else {
switch (lun->be_lun->lun_type) {
case T_DIRECT:
- /*
- * SBC-2 (no version claimed) XXX should we claim a
- * version?
- */
- scsi_ulto2b(0x0320, inq_ptr->version4);
+ /* SBC-3 (no version claimed) */
+ scsi_ulto2b(0x04C0, inq_ptr->version4);
break;
case T_PROCESSOR:
default:
@@ -9859,6 +10511,7 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
ctsio->scsi_status = SCSI_STATUS_OK;
if (ctsio->kern_data_len > 0) {
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
ctl_datamove((union ctl_io *)ctsio);
} else {
@@ -9928,6 +10581,15 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
return (1);
switch (io->scsiio.cdb[0]) {
+ case COMPARE_AND_WRITE: {
+ struct scsi_compare_and_write *cdb;
+
+ cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
+
+ *lba = scsi_8btou64(cdb->addr);
+ *len = cdb->length;
+ break;
+ }
case READ_6:
case WRITE_6: {
struct scsi_rw_6 *cdb;
@@ -10016,6 +10678,33 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
*len = scsi_4btoul(cdb->length);
break;
}
+ case VERIFY_10: {
+ struct scsi_verify_10 *cdb;
+
+ cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_2btoul(cdb->length);
+ break;
+ }
+ case VERIFY_12: {
+ struct scsi_verify_12 *cdb;
+
+ cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
+
+ *lba = scsi_4btoul(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
+ case VERIFY_16: {
+ struct scsi_verify_16 *cdb;
+
+ cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
+
+ *lba = scsi_8btou64(cdb->addr);
+ *len = scsi_4btoul(cdb->length);
+ break;
+ }
default:
return (1);
break; /* NOTREACHED */
@@ -10060,7 +10749,7 @@ ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
static ctl_action
ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
{
- struct ctl_cmd_entry *pending_entry, *ooa_entry;
+ const struct ctl_cmd_entry *pending_entry, *ooa_entry;
ctl_serialize_action *serialize_row;
/*
@@ -10133,8 +10822,8 @@ ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
|| (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
return (CTL_ACTION_BLOCK);
- pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]];
- ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]];
+ pending_entry = ctl_get_cmd_entry(&pending_io->scsiio);
+ ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio);
serialize_row = ctl_serialize_table[ooa_entry->seridx];
@@ -10173,7 +10862,7 @@ ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
union ctl_io *ooa_io;
ctl_action action;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
+ mtx_assert(&lun->lun_lock, MA_OWNED);
/*
* Run back along the OOA queue, starting with the current
@@ -10221,7 +10910,7 @@ ctl_check_blocked(struct ctl_lun *lun)
{
union ctl_io *cur_blocked, *next_blocked;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
+ mtx_assert(&lun->lun_lock, MA_OWNED);
/*
* Run forward from the head of the blocked queue, checking each
@@ -10264,9 +10953,8 @@ ctl_check_blocked(struct ctl_lun *lun)
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP: {
struct ctl_softc *softc;
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
uint32_t initidx;
- uint8_t opcode;
int isc_retval;
/*
@@ -10303,8 +10991,7 @@ ctl_check_blocked(struct ctl_lun *lun)
}
break;
}
- opcode = cur_blocked->scsiio.cdb[0];
- entry = &ctl_cmd_table[opcode];
+ entry = ctl_get_cmd_entry(&cur_blocked->scsiio);
softc = control_softc;
initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus);
@@ -10321,28 +11008,9 @@ ctl_check_blocked(struct ctl_lun *lun)
&cur_blocked->scsiio) == 0) {
cur_blocked->io_hdr.flags |=
CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue,
- &cur_blocked->io_hdr, links);
- /*
- * In the non CTL_DONE_THREAD case, we need
- * to wake up the work thread here. When
- * we're processing completed requests from
- * the work thread context, we'll pop back
- * around and end up pulling things off the
- * RtR queue. When we aren't processing
- * things from the work thread context,
- * though, we won't ever check the RtR queue.
- * So we need to wake up the thread to clear
- * things off the queue. Otherwise this
- * transaction will just sit on the RtR queue
- * until a new I/O comes in. (Which may or
- * may not happen...)
- */
-#ifndef CTL_DONE_THREAD
- ctl_wakeup_thread();
-#endif
+ ctl_enqueue_rtr(cur_blocked);
} else
- ctl_done_lock(cur_blocked, /*have_lock*/ 1);
+ ctl_done(cur_blocked);
break;
}
default:
@@ -10371,12 +11039,14 @@ ctl_check_blocked(struct ctl_lun *lun)
*/
static int
ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
- struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
+ const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
{
int retval;
retval = 0;
+ mtx_assert(&lun->lun_lock, MA_OWNED);
+
/*
* If this shelf is a secondary shelf controller, we have to reject
* any media access commands.
@@ -10468,7 +11138,7 @@ static void
ctl_failover_io(union ctl_io *io, int have_lock)
{
ctl_set_busy(&io->scsiio);
- ctl_done_lock(io, have_lock);
+ ctl_done(io);
}
static void
@@ -10492,6 +11162,8 @@ ctl_failover(void)
* We'll either abort them or delete them below, depending on
* which HA mode we're in.
*/
+#ifdef notyet
+ mtx_lock(&ctl_softc->queue_lock);
for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue);
io != NULL; io = next_io) {
next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
@@ -10499,6 +11171,8 @@ ctl_failover(void)
STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr,
ctl_io_hdr, links);
}
+ mtx_unlock(&ctl_softc->queue_lock);
+#endif
for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) {
lun = ctl_softc->ctl_luns[lun_idx];
@@ -10606,8 +11280,7 @@ ctl_failover(void)
CTL_FLAG_FAILOVER;
} else {
ctl_set_busy(&pending_io->scsiio);
- ctl_done_lock(pending_io,
- /*have_lock*/1);
+ ctl_done(pending_io);
}
}
@@ -10615,7 +11288,7 @@ ctl_failover(void)
* Build Unit Attention
*/
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_ASYM_ACC_CHANGE;
}
} else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
@@ -10639,8 +11312,7 @@ ctl_failover(void)
CTL_FLAG_IS_WAS_ON_RTR) == 0) {
pending_io->io_hdr.flags |=
CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
- &pending_io->io_hdr, links);
+ ctl_enqueue_rtr(pending_io);
}
#if 0
else
@@ -10683,22 +11355,18 @@ ctl_failover(void)
case CTL_ACTION_SKIP:
pending_io->io_hdr.flags |=
CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(
- &ctl_softc->rtr_queue,
- &pending_io->io_hdr, links);
+ ctl_enqueue_rtr(pending_io);
break;
case CTL_ACTION_OVERLAP:
ctl_set_overlapped_cmd(
(struct ctl_scsiio *)pending_io);
- ctl_done_lock(pending_io,
- /*have_lock*/ 1);
+ ctl_done(pending_io);
break;
case CTL_ACTION_OVERLAP_TAG:
ctl_set_overlapped_tag(
(struct ctl_scsiio *)pending_io,
pending_io->scsiio.tag_num & 0xff);
- ctl_done_lock(pending_io,
- /*have_lock*/ 1);
+ ctl_done(pending_io);
break;
case CTL_ACTION_ERROR:
default:
@@ -10706,8 +11374,7 @@ ctl_failover(void)
(struct ctl_scsiio *)pending_io,
0, // sks_valid
0); //retry count
- ctl_done_lock(pending_io,
- /*have_lock*/ 1);
+ ctl_done(pending_io);
break;
}
}
@@ -10716,7 +11383,7 @@ ctl_failover(void)
* Build Unit Attention
*/
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
- lun->pending_sense[i].ua_pending |=
+ lun->pending_ua[i] |=
CTL_UA_ASYM_ACC_CHANGE;
}
} else {
@@ -10732,8 +11399,7 @@ static int
ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
- struct ctl_cmd_entry *entry;
- uint8_t opcode;
+ const struct ctl_cmd_entry *entry;
uint32_t initidx, targ_lun;
int retval;
@@ -10741,13 +11407,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
lun = NULL;
- opcode = ctsio->cdb[0];
-
- mtx_lock(&ctl_softc->ctl_lock);
-
- targ_lun = ctsio->io_hdr.nexus.targ_lun;
- if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
if ((targ_lun < CTL_MAX_LUNS)
&& (ctl_softc->ctl_luns[targ_lun] != NULL)) {
lun = ctl_softc->ctl_luns[targ_lun];
@@ -10765,13 +11425,27 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
if (lun->be_lun->lun_type == T_PROCESSOR) {
ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
}
+
+ /*
+ * Every I/O goes into the OOA queue for a
+ * particular LUN, and stays there until completion.
+ */
+ mtx_lock(&lun->lun_lock);
+ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
+ ooa_links);
}
} else {
ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
}
- entry = &ctl_cmd_table[opcode];
+ /* Get command entry and return error if it is unsuppotyed. */
+ entry = ctl_validate_command(ctsio);
+ if (entry == NULL) {
+ if (lun)
+ mtx_unlock(&lun->lun_lock);
+ return (retval);
+ }
ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
@@ -10784,66 +11458,42 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
* it on the rtr queue.
*/
if (lun == NULL) {
- if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
- goto queue_rtr;
+ if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) {
+ ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ ctl_enqueue_rtr((union ctl_io *)ctsio);
+ return (retval);
+ }
ctl_set_unsupported_lun(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
- goto bailout;
+ return (retval);
} else {
/*
- * Every I/O goes into the OOA queue for a particular LUN, and
- * stays there until completion.
- */
- TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
-
- /*
* Make sure we support this particular command on this LUN.
* e.g., we don't support writes to the control LUN.
*/
- switch (lun->be_lun->lun_type) {
- case T_PROCESSOR:
- if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
- && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
- == 0)) {
- ctl_set_invalid_opcode(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
- break;
- case T_DIRECT:
- if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0)
- && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
- == 0)){
- ctl_set_invalid_opcode(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
- ctl_done((union ctl_io *)ctsio);
- goto bailout;
- }
- break;
- default:
- printf("Unsupported CTL LUN type %d\n",
- lun->be_lun->lun_type);
- panic("Unsupported CTL LUN type %d\n",
- lun->be_lun->lun_type);
- break; /* NOTREACHED */
+ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
}
}
initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
+#ifdef CTL_WITH_CA
/*
* If we've got a request sense, it'll clear the contingent
* allegiance condition. Otherwise, if we have a CA condition for
* this initiator, clear it, because it sent down a command other
* than request sense.
*/
- if ((opcode != REQUEST_SENSE)
+ if ((ctsio->cdb[0] != REQUEST_SENSE)
&& (ctl_is_set(lun->have_ca, initidx)))
ctl_clear_mask(lun->have_ca, initidx);
+#endif
/*
* If the command has this flag set, it handles its own unit
@@ -10870,7 +11520,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
ctl_ua_type ua_type;
- ua_type = lun->pending_sense[initidx].ua_pending;
+ ua_type = lun->pending_ua[initidx];
if (ua_type != CTL_UA_NONE) {
scsi_sense_data_type sense_format;
@@ -10888,20 +11538,19 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
ctsio->io_hdr.status = CTL_SCSI_ERROR |
CTL_AUTOSENSE;
ctsio->sense_len = SSD_FULL_SIZE;
- lun->pending_sense[initidx].ua_pending &=
- ~ua_type;
- mtx_unlock(&ctl_softc->ctl_lock);
+ lun->pending_ua[initidx] &= ~ua_type;
+ mtx_unlock(&lun->lun_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
+ return (retval);
}
}
}
if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) {
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
+ return (retval);
}
/*
@@ -10937,7 +11586,7 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
CTL_HA_STATUS_SUCCESS) {
printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
isc_retval);
- printf("CTL:opcode is %x\n",opcode);
+ printf("CTL:opcode is %x\n", ctsio->cdb[0]);
} else {
#if 0
printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
@@ -10951,7 +11600,8 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
* so that we have an idea of what we're waiting for from
* the other side.
*/
- goto bailout_unlock;
+ mtx_unlock(&lun->lun_lock);
+ return (retval);
}
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
@@ -10961,59 +11611,115 @@ ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
blocked_links);
- goto bailout_unlock;
- break; /* NOTREACHED */
+ mtx_unlock(&lun->lun_lock);
+ return (retval);
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP:
- goto queue_rtr;
- break; /* NOTREACHED */
+ ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ mtx_unlock(&lun->lun_lock);
+ ctl_enqueue_rtr((union ctl_io *)ctsio);
+ break;
case CTL_ACTION_OVERLAP:
+ mtx_unlock(&lun->lun_lock);
ctl_set_overlapped_cmd(ctsio);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
- break; /* NOTREACHED */
+ break;
case CTL_ACTION_OVERLAP_TAG:
+ mtx_unlock(&lun->lun_lock);
ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
- break; /* NOTREACHED */
+ break;
case CTL_ACTION_ERROR:
default:
+ mtx_unlock(&lun->lun_lock);
ctl_set_internal_failure(ctsio,
/*sks_valid*/ 0,
/*retry_count*/ 0);
- mtx_unlock(&ctl_softc->ctl_lock);
ctl_done((union ctl_io *)ctsio);
- goto bailout;
- break; /* NOTREACHED */
+ break;
+ }
+ return (retval);
+}
+
+const struct ctl_cmd_entry *
+ctl_get_cmd_entry(struct ctl_scsiio *ctsio)
+{
+ const struct ctl_cmd_entry *entry;
+ int service_action;
+
+ entry = &ctl_cmd_table[ctsio->cdb[0]];
+ if (entry->flags & CTL_CMD_FLAG_SA5) {
+ service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
+ entry = &((const struct ctl_cmd_entry *)
+ entry->execute)[service_action];
}
+ return (entry);
+}
- goto bailout_unlock;
+const struct ctl_cmd_entry *
+ctl_validate_command(struct ctl_scsiio *ctsio)
+{
+ const struct ctl_cmd_entry *entry;
+ int i;
+ uint8_t diff;
-queue_rtr:
- ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links);
+ entry = ctl_get_cmd_entry(ctsio);
+ if (entry->execute == NULL) {
+ ctl_set_invalid_opcode(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (NULL);
+ }
+ KASSERT(entry->length > 0,
+ ("Not defined length for command 0x%02x/0x%02x",
+ ctsio->cdb[0], ctsio->cdb[1]));
+ for (i = 1; i < entry->length; i++) {
+ diff = ctsio->cdb[i] & ~entry->usage[i - 1];
+ if (diff == 0)
+ continue;
+ ctl_set_invalid_field(ctsio,
+ /*sks_valid*/ 1,
+ /*command*/ 1,
+ /*field*/ i,
+ /*bit_valid*/ 1,
+ /*bit*/ fls(diff) - 1);
+ ctl_done((union ctl_io *)ctsio);
+ return (NULL);
+ }
+ return (entry);
+}
-bailout_unlock:
- mtx_unlock(&ctl_softc->ctl_lock);
+static int
+ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
+{
-bailout:
- return (retval);
+ switch (lun_type) {
+ case T_PROCESSOR:
+ if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) &&
+ ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
+ return (0);
+ break;
+ case T_DIRECT:
+ if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) &&
+ ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
+ return (0);
+ break;
+ default:
+ return (0);
+ }
+ return (1);
}
static int
ctl_scsiio(struct ctl_scsiio *ctsio)
{
int retval;
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
retval = CTL_RETVAL_COMPLETE;
CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
- entry = &ctl_cmd_table[ctsio->cdb[0]];
+ entry = ctl_get_cmd_entry(ctsio);
/*
* If this I/O has been aborted, just send it straight to
@@ -11069,8 +11775,10 @@ ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
}
retval = 0;
+ mtx_lock(&ctl_softc->ctl_lock);
STAILQ_FOREACH(lun, &ctl_softc->lun_list, links)
retval += ctl_lun_reset(lun, io, ua_type);
+ mtx_unlock(&ctl_softc->ctl_lock);
return (retval);
}
@@ -11105,6 +11813,7 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
#endif
int i;
+ mtx_lock(&lun->lun_lock);
/*
* Run through the OOA queue and abort each I/O.
*/
@@ -11113,7 +11822,7 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
#endif
for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
- xio->io_hdr.flags |= CTL_FLAG_ABORT;
+ xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
}
/*
@@ -11124,7 +11833,7 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
if (initindex == i)
continue;
- lun->pending_sense[i].ua_pending |= ua_type;
+ lun->pending_ua[i] |= ua_type;
}
#endif
@@ -11139,10 +11848,116 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
lun->flags &= ~CTL_LUN_RESERVED;
for (i = 0; i < CTL_MAX_INITIATORS; i++) {
+#ifdef CTL_WITH_CA
ctl_clear_mask(lun->have_ca, i);
- lun->pending_sense[i].ua_pending |= ua_type;
+#endif
+ lun->pending_ua[i] |= ua_type;
+ }
+ mtx_unlock(&lun->lun_lock);
+
+ return (0);
+}
+
+static int
+ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
+ int other_sc)
+{
+ union ctl_io *xio;
+ int found;
+
+ mtx_assert(&lun->lun_lock, MA_OWNED);
+
+ /*
+ * Run through the OOA queue and attempt to find the given I/O.
+ * The target port, initiator ID, tag type and tag number have to
+ * match the values that we got from the initiator. If we have an
+ * untagged command to abort, simply abort the first untagged command
+ * we come to. We only allow one untagged command at a time of course.
+ */
+ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
+ xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
+
+ if ((targ_port == UINT32_MAX ||
+ targ_port == xio->io_hdr.nexus.targ_port) &&
+ (init_id == UINT32_MAX ||
+ init_id == xio->io_hdr.nexus.initid.id)) {
+ if (targ_port != xio->io_hdr.nexus.targ_port ||
+ init_id != xio->io_hdr.nexus.initid.id)
+ xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
+ xio->io_hdr.flags |= CTL_FLAG_ABORT;
+ found = 1;
+ if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
+ union ctl_ha_msg msg_info;
+
+ msg_info.hdr.nexus = xio->io_hdr.nexus;
+ msg_info.task.task_action = CTL_TASK_ABORT_TASK;
+ msg_info.task.tag_num = xio->scsiio.tag_num;
+ msg_info.task.tag_type = xio->scsiio.tag_type;
+ msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info, sizeof(msg_info), 0);
+ }
+ }
}
+ return (found);
+}
+static int
+ctl_abort_task_set(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_lun *lun;
+ uint32_t targ_lun;
+
+ /*
+ * Look up the LUN.
+ */
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+ mtx_lock(&softc->ctl_lock);
+ if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
+ lun = softc->ctl_luns[targ_lun];
+ else {
+ mtx_unlock(&softc->ctl_lock);
+ return (1);
+ }
+
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&softc->ctl_lock);
+ if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
+ ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.initid.id,
+ (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+ } else { /* CTL_TASK_CLEAR_TASK_SET */
+ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
+ (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+ }
+ mtx_unlock(&lun->lun_lock);
+ return (0);
+}
+
+static int
+ctl_i_t_nexus_reset(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_lun *lun;
+ uint32_t initindex;
+
+ initindex = ctl_get_initindex(&io->io_hdr.nexus);
+ mtx_lock(&softc->ctl_lock);
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ mtx_lock(&lun->lun_lock);
+ ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
+ io->io_hdr.nexus.initid.id,
+ (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
+#ifdef CTL_WITH_CA
+ ctl_clear_mask(lun->have_ca, initindex);
+#endif
+ lun->pending_ua[initindex] |= CTL_UA_I_T_NEXUS_LOSS;
+ mtx_unlock(&lun->lun_lock);
+ }
+ mtx_unlock(&softc->ctl_lock);
return (0);
}
@@ -11165,20 +11980,23 @@ ctl_abort_task(union ctl_io *io)
/*
* Look up the LUN.
*/
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+ mtx_lock(&ctl_softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS)
&& (ctl_softc->ctl_luns[targ_lun] != NULL))
lun = ctl_softc->ctl_luns[targ_lun];
- else
- goto bailout;
+ else {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (1);
+ }
#if 0
printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
lun->lun, io->taskio.tag_num, io->taskio.tag_type);
#endif
+ mtx_lock(&lun->lun_lock);
+ mtx_unlock(&ctl_softc->ctl_lock);
/*
* Run through the OOA queue and attempt to find the given I/O.
* The target port, initiator ID, tag type and tag number have to
@@ -11270,8 +12088,7 @@ ctl_abort_task(union ctl_io *io)
}
}
}
-
-bailout:
+ mtx_unlock(&lun->lun_lock);
if (found == 0) {
/*
@@ -11288,166 +12105,123 @@ bailout:
io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
io->taskio.tag_type);
#endif
- return (1);
- } else
- return (0);
+ }
+ return (0);
}
-/*
- * This routine cannot block! It must be callable from an interrupt
- * handler as well as from the work thread.
- */
static void
-ctl_run_task_queue(struct ctl_softc *ctl_softc)
+ctl_run_task(union ctl_io *io)
{
- union ctl_io *io, *next_io;
-
- mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
-
- CTL_DEBUG_PRINT(("ctl_run_task_queue\n"));
-
- for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue);
- io != NULL; io = next_io) {
- int retval;
- const char *task_desc;
+ struct ctl_softc *ctl_softc = control_softc;
+ int retval = 1;
+ const char *task_desc;
- next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
+ CTL_DEBUG_PRINT(("ctl_run_task\n"));
- retval = 0;
+ KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
+ ("ctl_run_task: Unextected io_type %d\n",
+ io->io_hdr.io_type));
- switch (io->io_hdr.io_type) {
- case CTL_IO_TASK: {
- task_desc = ctl_scsi_task_string(&io->taskio);
- if (task_desc != NULL) {
+ task_desc = ctl_scsi_task_string(&io->taskio);
+ if (task_desc != NULL) {
#ifdef NEEDTOPORT
- csevent_log(CSC_CTL | CSC_SHELF_SW |
- CTL_TASK_REPORT,
- csevent_LogType_Trace,
- csevent_Severity_Information,
- csevent_AlertLevel_Green,
- csevent_FRU_Firmware,
- csevent_FRU_Unknown,
- "CTL: received task: %s",task_desc);
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TASK_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "CTL: received task: %s",task_desc);
#endif
- } else {
+ } else {
#ifdef NEEDTOPORT
- csevent_log(CSC_CTL | CSC_SHELF_SW |
- CTL_TASK_REPORT,
- csevent_LogType_Trace,
- csevent_Severity_Information,
- csevent_AlertLevel_Green,
- csevent_FRU_Firmware,
- csevent_FRU_Unknown,
- "CTL: received unknown task "
- "type: %d (%#x)",
- io->taskio.task_action,
- io->taskio.task_action);
+ csevent_log(CSC_CTL | CSC_SHELF_SW |
+ CTL_TASK_REPORT,
+ csevent_LogType_Trace,
+ csevent_Severity_Information,
+ csevent_AlertLevel_Green,
+ csevent_FRU_Firmware,
+ csevent_FRU_Unknown,
+ "CTL: received unknown task "
+ "type: %d (%#x)",
+ io->taskio.task_action,
+ io->taskio.task_action);
#endif
- }
- switch (io->taskio.task_action) {
- case CTL_TASK_ABORT_TASK:
- retval = ctl_abort_task(io);
- break;
- case CTL_TASK_ABORT_TASK_SET:
- break;
- case CTL_TASK_CLEAR_ACA:
- break;
- case CTL_TASK_CLEAR_TASK_SET:
- break;
- case CTL_TASK_LUN_RESET: {
- struct ctl_lun *lun;
- uint32_t targ_lun;
- int retval;
-
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
-
- if ((targ_lun < CTL_MAX_LUNS)
- && (ctl_softc->ctl_luns[targ_lun] != NULL))
- lun = ctl_softc->ctl_luns[targ_lun];
- else {
- retval = 1;
- break;
- }
+ }
+ switch (io->taskio.task_action) {
+ case CTL_TASK_ABORT_TASK:
+ retval = ctl_abort_task(io);
+ break;
+ case CTL_TASK_ABORT_TASK_SET:
+ case CTL_TASK_CLEAR_TASK_SET:
+ retval = ctl_abort_task_set(io);
+ break;
+ case CTL_TASK_CLEAR_ACA:
+ break;
+ case CTL_TASK_I_T_NEXUS_RESET:
+ retval = ctl_i_t_nexus_reset(io);
+ break;
+ case CTL_TASK_LUN_RESET: {
+ struct ctl_lun *lun;
+ uint32_t targ_lun;
- if (!(io->io_hdr.flags &
- CTL_FLAG_FROM_OTHER_SC)) {
- union ctl_ha_msg msg_info;
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
+ mtx_lock(&ctl_softc->ctl_lock);
+ if ((targ_lun < CTL_MAX_LUNS)
+ && (ctl_softc->ctl_luns[targ_lun] != NULL))
+ lun = ctl_softc->ctl_luns[targ_lun];
+ else {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ retval = 1;
+ break;
+ }
- io->io_hdr.flags |=
- CTL_FLAG_SENT_2OTHER_SC;
- msg_info.hdr.msg_type =
- CTL_MSG_MANAGE_TASKS;
- msg_info.hdr.nexus = io->io_hdr.nexus;
- msg_info.task.task_action =
- CTL_TASK_LUN_RESET;
- msg_info.hdr.original_sc = NULL;
- msg_info.hdr.serializing_sc = NULL;
- if (CTL_HA_STATUS_SUCCESS !=
- ctl_ha_msg_send(CTL_HA_CHAN_CTL,
- (void *)&msg_info,
- sizeof(msg_info), 0)) {
- }
- }
+ if (!(io->io_hdr.flags &
+ CTL_FLAG_FROM_OTHER_SC)) {
+ union ctl_ha_msg msg_info;
- retval = ctl_lun_reset(lun, io,
- CTL_UA_LUN_RESET);
- break;
+ io->io_hdr.flags |=
+ CTL_FLAG_SENT_2OTHER_SC;
+ msg_info.hdr.msg_type =
+ CTL_MSG_MANAGE_TASKS;
+ msg_info.hdr.nexus = io->io_hdr.nexus;
+ msg_info.task.task_action =
+ CTL_TASK_LUN_RESET;
+ msg_info.hdr.original_sc = NULL;
+ msg_info.hdr.serializing_sc = NULL;
+ if (CTL_HA_STATUS_SUCCESS !=
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL,
+ (void *)&msg_info,
+ sizeof(msg_info), 0)) {
}
- case CTL_TASK_TARGET_RESET:
- retval = ctl_target_reset(ctl_softc, io,
- CTL_UA_TARG_RESET);
- break;
- case CTL_TASK_BUS_RESET:
- retval = ctl_bus_reset(ctl_softc, io);
- break;
- case CTL_TASK_PORT_LOGIN:
- break;
- case CTL_TASK_PORT_LOGOUT:
- break;
- default:
- printf("ctl_run_task_queue: got unknown task "
- "management event %d\n",
- io->taskio.task_action);
- break;
- }
- if (retval == 0)
- io->io_hdr.status = CTL_SUCCESS;
- else
- io->io_hdr.status = CTL_ERROR;
-
- STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
- ctl_io_hdr, links);
- /*
- * This will queue this I/O to the done queue, but the
- * work thread won't be able to process it until we
- * return and the lock is released.
- */
- ctl_done_lock(io, /*have_lock*/ 1);
- break;
}
- default: {
- printf("%s: invalid I/O type %d msg %d cdb %x"
- " iptl: %ju:%d:%ju:%d tag 0x%04x\n",
- __func__, io->io_hdr.io_type,
- io->io_hdr.msg_type, io->scsiio.cdb[0],
- (uintmax_t)io->io_hdr.nexus.initid.id,
- io->io_hdr.nexus.targ_port,
- (uintmax_t)io->io_hdr.nexus.targ_target.id,
- io->io_hdr.nexus.targ_lun /* XXX */,
- (io->io_hdr.io_type == CTL_IO_TASK) ?
- io->taskio.tag_num : io->scsiio.tag_num);
- STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
- ctl_io_hdr, links);
- ctl_free_io(io);
- break;
- }
- }
+ retval = ctl_lun_reset(lun, io,
+ CTL_UA_LUN_RESET);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ break;
}
-
- ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING;
+ case CTL_TASK_TARGET_RESET:
+ retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET);
+ break;
+ case CTL_TASK_BUS_RESET:
+ retval = ctl_bus_reset(ctl_softc, io);
+ break;
+ case CTL_TASK_PORT_LOGIN:
+ break;
+ case CTL_TASK_PORT_LOGOUT:
+ break;
+ default:
+ printf("ctl_run_task: got unknown task management event %d\n",
+ io->taskio.task_action);
+ break;
+ }
+ if (retval == 0)
+ io->io_hdr.status = CTL_SUCCESS;
+ else
+ io->io_hdr.status = CTL_ERROR;
+ ctl_done(io);
}
/*
@@ -11464,52 +12238,44 @@ ctl_handle_isc(union ctl_io *io)
ctl_softc = control_softc;
- targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = io->io_hdr.nexus.targ_mapped_lun;
lun = ctl_softc->ctl_luns[targ_lun];
switch (io->io_hdr.msg_type) {
case CTL_MSG_SERIALIZE:
- free_io = ctl_serialize_other_sc_cmd(&io->scsiio,
- /*have_lock*/ 0);
+ free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
break;
case CTL_MSG_R2R: {
- uint8_t opcode;
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
/*
* This is only used in SER_ONLY mode.
*/
free_io = 0;
- opcode = io->scsiio.cdb[0];
- entry = &ctl_cmd_table[opcode];
- mtx_lock(&ctl_softc->ctl_lock);
+ entry = ctl_get_cmd_entry(&io->scsiio);
+ mtx_lock(&lun->lun_lock);
if (ctl_scsiio_lun_check(ctl_softc, lun,
entry, (struct ctl_scsiio *)io) != 0) {
- ctl_done_lock(io, /*have_lock*/ 1);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
+ ctl_done(io);
break;
}
io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
- STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
- &io->io_hdr, links);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
+ ctl_enqueue_rtr(io);
break;
}
case CTL_MSG_FINISH_IO:
if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
free_io = 0;
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_done(io);
} else {
free_io = 1;
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
ooa_links);
- STAILQ_REMOVE(&ctl_softc->task_queue,
- &io->io_hdr, ctl_io_hdr, links);
ctl_check_blocked(lun);
- mtx_unlock(&ctl_softc->ctl_lock);
+ mtx_unlock(&lun->lun_lock);
}
break;
case CTL_MSG_PERS_ACTION:
@@ -11519,7 +12285,7 @@ ctl_handle_isc(union ctl_io *io)
break;
case CTL_MSG_BAD_JUJU:
free_io = 0;
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_done(io);
break;
case CTL_MSG_DATAMOVE:
/* Only used in XFER mode */
@@ -11550,9 +12316,8 @@ ctl_handle_isc(union ctl_io *io)
static ctl_lun_error_pattern
ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
{
- struct ctl_cmd_entry *entry;
+ const struct ctl_cmd_entry *entry;
ctl_lun_error_pattern filtered_pattern, pattern;
- uint8_t opcode;
pattern = desc->error_pattern;
@@ -11567,8 +12332,7 @@ ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
return (CTL_LUN_PAT_ANY);
- opcode = ctsio->cdb[0];
- entry = &ctl_cmd_table[opcode];
+ entry = ctl_get_cmd_entry(ctsio);
filtered_pattern = entry->pattern & pattern;
@@ -11616,7 +12380,7 @@ ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
{
struct ctl_error_desc *desc, *desc2;
- mtx_assert(&control_softc->ctl_lock, MA_OWNED);
+ mtx_assert(&lun->lun_lock, MA_OWNED);
STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
ctl_lun_error_pattern pattern;
@@ -11732,7 +12496,6 @@ ctl_datamove(union ctl_io *io)
}
#endif /* CTL_TIME_IO */
- mtx_lock(&control_softc->ctl_lock);
#ifdef CTL_IO_DELAY
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
struct ctl_lun *lun;
@@ -11757,31 +12520,10 @@ ctl_datamove(union ctl_io *io)
if (lun->delay_info.datamove_type ==
CTL_DELAY_TYPE_ONESHOT)
lun->delay_info.datamove_delay = 0;
- mtx_unlock(&control_softc->ctl_lock);
return;
}
}
#endif
- /*
- * If we have any pending task management commands, process them
- * first. This is necessary to eliminate a race condition with the
- * FETD:
- *
- * - FETD submits a task management command, like an abort.
- * - Back end calls fe_datamove() to move the data for the aborted
- * command. The FETD can't really accept it, but if it did, it
- * would end up transmitting data for a command that the initiator
- * told us to abort.
- *
- * We close the race by processing all pending task management
- * commands here (we can't block!), and then check this I/O to see
- * if it has been aborted. If so, return it to the back end with
- * bad status, so the back end can say return an error to the back end
- * and then when the back end returns an error, we can return the
- * aborted command to the FETD, so it can clean up its resources.
- */
- if (control_softc->flags & CTL_FLAG_TASK_PENDING)
- ctl_run_task_queue(control_softc);
/*
* This command has been aborted. Set the port status, so we fail
@@ -11793,9 +12535,7 @@ ctl_datamove(union ctl_io *io)
io->io_hdr.nexus.targ_port,
(uintmax_t)io->io_hdr.nexus.targ_target.id,
io->io_hdr.nexus.targ_lun);
- io->io_hdr.status = CTL_CMD_ABORTED;
io->io_hdr.port_status = 31337;
- mtx_unlock(&control_softc->ctl_lock);
/*
* Note that the backend, in this case, will get the
* callback in its context. In other cases it may get
@@ -11961,7 +12701,7 @@ ctl_datamove(union ctl_io *io)
}
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
- ctl_failover_io(io, /*have_lock*/ 1);
+ ctl_failover_io(io, /*have_lock*/ 0);
} else {
@@ -11971,7 +12711,6 @@ ctl_datamove(union ctl_io *io)
*/
fe_datamove =
control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
- mtx_unlock(&control_softc->ctl_lock);
fe_datamove(io);
}
@@ -12551,36 +13290,24 @@ ctl_datamove_remote(union ctl_io *io)
/*
* Note that we look for an aborted I/O here, but don't do some of
- * the other checks that ctl_datamove() normally does. We don't
- * need to run the task queue, because this I/O is on the ISC
- * queue, which is executed by the work thread after the task queue.
+ * the other checks that ctl_datamove() normally does.
* We don't need to run the datamove delay code, since that should
* have been done if need be on the other controller.
*/
- mtx_lock(&softc->ctl_lock);
-
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
-
printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.targ_target.id,
io->io_hdr.nexus.targ_lun);
- io->io_hdr.status = CTL_CMD_ABORTED;
io->io_hdr.port_status = 31338;
-
- mtx_unlock(&softc->ctl_lock);
-
ctl_send_datamove_done(io, /*have_lock*/ 0);
-
return;
}
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
- mtx_unlock(&softc->ctl_lock);
ctl_datamove_remote_write(io);
} else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
- mtx_unlock(&softc->ctl_lock);
ctl_datamove_remote_read(io);
} else {
union ctl_ha_msg msg;
@@ -12616,12 +13343,9 @@ ctl_datamove_remote(union ctl_io *io)
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
ctl_failover_io(io, /*have_lock*/ 1);
- mtx_unlock(&softc->ctl_lock);
return;
}
- mtx_unlock(&softc->ctl_lock);
-
if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
CTL_HA_STATUS_SUCCESS) {
/* XXX KDM what to do if this fails? */
@@ -12632,7 +13356,7 @@ ctl_datamove_remote(union ctl_io *io)
}
static int
-ctl_process_done(union ctl_io *io, int have_lock)
+ctl_process_done(union ctl_io *io)
{
struct ctl_lun *lun;
struct ctl_softc *ctl_softc;
@@ -12703,17 +13427,13 @@ ctl_process_done(union ctl_io *io, int have_lock)
lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
if (lun == NULL) {
CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
- io->io_hdr.nexus.targ_lun));
+ io->io_hdr.nexus.targ_mapped_lun));
fe_done(io);
goto bailout;
}
ctl_softc = lun->ctl_softc;
- /*
- * Remove this from the OOA queue.
- */
- if (have_lock == 0)
- mtx_lock(&ctl_softc->ctl_lock);
+ mtx_lock(&lun->lun_lock);
/*
* Check to see if we have any errors to inject here. We only
@@ -12729,134 +13449,39 @@ ctl_process_done(union ctl_io *io, int have_lock)
*
* XXX KDM should we also track I/O latency?
*/
- if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
- uint32_t blocksize;
+ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
+ io->io_hdr.io_type == CTL_IO_SCSI) {
#ifdef CTL_TIME_IO
struct bintime cur_bt;
#endif
+ int type;
- if ((lun->be_lun != NULL)
- && (lun->be_lun->blocksize != 0))
- blocksize = lun->be_lun->blocksize;
+ if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN)
+ type = CTL_STATS_READ;
+ else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_OUT)
+ type = CTL_STATS_WRITE;
else
- blocksize = 512;
-
- switch (io->io_hdr.io_type) {
- case CTL_IO_SCSI: {
- int isread;
- struct ctl_lba_len lbalen;
-
- isread = 0;
- switch (io->scsiio.cdb[0]) {
- case READ_6:
- case READ_10:
- case READ_12:
- case READ_16:
- isread = 1;
- /* FALLTHROUGH */
- case WRITE_6:
- case WRITE_10:
- case WRITE_12:
- case WRITE_16:
- case WRITE_VERIFY_10:
- case WRITE_VERIFY_12:
- case WRITE_VERIFY_16:
- memcpy(&lbalen, io->io_hdr.ctl_private[
- CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen));
-
- if (isread) {
- lun->stats.ports[targ_port].bytes[CTL_STATS_READ] +=
- lbalen.len * blocksize;
- lun->stats.ports[targ_port].operations[CTL_STATS_READ]++;
+ type = CTL_STATS_NO_IO;
+ lun->stats.ports[targ_port].bytes[type] +=
+ io->scsiio.kern_total_len;
+ lun->stats.ports[targ_port].operations[type]++;
#ifdef CTL_TIME_IO
- bintime_add(
- &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ],
- &io->io_hdr.dma_bt);
- lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] +=
- io->io_hdr.num_dmas;
- getbintime(&cur_bt);
- bintime_sub(&cur_bt,
- &io->io_hdr.start_bt);
-
- bintime_add(
- &lun->stats.ports[targ_port].time[CTL_STATS_READ],
- &cur_bt);
-
-#if 0
- cs_prof_gettime(&cur_ticks);
- lun->stats.time[CTL_STATS_READ] +=
- cur_ticks -
- io->io_hdr.start_ticks;
-#endif
-#if 0
- lun->stats.time[CTL_STATS_READ] +=
- jiffies - io->io_hdr.start_time;
-#endif
-#endif /* CTL_TIME_IO */
- } else {
- lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] +=
- lbalen.len * blocksize;
- lun->stats.ports[targ_port].operations[
- CTL_STATS_WRITE]++;
-
-#ifdef CTL_TIME_IO
- bintime_add(
- &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE],
- &io->io_hdr.dma_bt);
- lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] +=
- io->io_hdr.num_dmas;
- getbintime(&cur_bt);
- bintime_sub(&cur_bt,
- &io->io_hdr.start_bt);
-
- bintime_add(
- &lun->stats.ports[targ_port].time[CTL_STATS_WRITE],
- &cur_bt);
-#if 0
- cs_prof_gettime(&cur_ticks);
- lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
- cur_ticks -
- io->io_hdr.start_ticks;
- lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
- jiffies - io->io_hdr.start_time;
+ bintime_add(&lun->stats.ports[targ_port].dma_time[type],
+ &io->io_hdr.dma_bt);
+ lun->stats.ports[targ_port].num_dmas[type] +=
+ io->io_hdr.num_dmas;
+ getbintime(&cur_bt);
+ bintime_sub(&cur_bt, &io->io_hdr.start_bt);
+ bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
#endif
-#endif /* CTL_TIME_IO */
- }
- break;
- default:
- lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++;
-
-#ifdef CTL_TIME_IO
- bintime_add(
- &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO],
- &io->io_hdr.dma_bt);
- lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] +=
- io->io_hdr.num_dmas;
- getbintime(&cur_bt);
- bintime_sub(&cur_bt, &io->io_hdr.start_bt);
-
- bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO],
- &cur_bt);
-
-#if 0
- cs_prof_gettime(&cur_ticks);
- lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
- cur_ticks -
- io->io_hdr.start_ticks;
- lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
- jiffies - io->io_hdr.start_time;
-#endif
-#endif /* CTL_TIME_IO */
- break;
- }
- break;
- }
- default:
- break;
- }
}
+ /*
+ * Remove this from the OOA queue.
+ */
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
/*
@@ -12870,8 +13495,13 @@ ctl_process_done(union ctl_io *io, int have_lock)
* left on its OOA queue.
*/
if ((lun->flags & CTL_LUN_INVALID)
- && (TAILQ_FIRST(&lun->ooa_queue) == NULL))
+ && TAILQ_EMPTY(&lun->ooa_queue)) {
+ mtx_unlock(&lun->lun_lock);
+ mtx_lock(&ctl_softc->ctl_lock);
ctl_free_lun(lun);
+ mtx_unlock(&ctl_softc->ctl_lock);
+ } else
+ mtx_unlock(&lun->lun_lock);
/*
* If this command has been aborted, make sure we set the status
@@ -12879,7 +13509,7 @@ ctl_process_done(union ctl_io *io, int have_lock)
* whatever it needs to do to clean up its state.
*/
if (io->io_hdr.flags & CTL_FLAG_ABORT)
- io->io_hdr.status = CTL_CMD_ABORTED;
+ ctl_set_task_aborted(&io->scsiio);
/*
* We print out status for every task management command. For SCSI
@@ -12916,8 +13546,6 @@ ctl_process_done(union ctl_io *io, int have_lock)
if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){
ctl_softc->skipped_prints++;
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
} else {
uint32_t skipped_prints;
@@ -12926,8 +13554,6 @@ ctl_process_done(union ctl_io *io, int have_lock)
ctl_softc->skipped_prints = 0;
ctl_softc->last_print_jiffies = time_uptime;
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
if (skipped_prints > 0) {
#ifdef NEEDTOPORT
csevent_log(CSC_CTL | CSC_SHELF_SW |
@@ -12944,21 +13570,14 @@ ctl_process_done(union ctl_io *io, int have_lock)
if (bootverbose || verbose > 0)
ctl_io_error_print(io, NULL);
}
- } else {
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
}
break;
}
case CTL_IO_TASK:
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
if (bootverbose || verbose > 0)
ctl_io_error_print(io, NULL);
break;
default:
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
break;
}
@@ -13017,6 +13636,7 @@ bailout:
return (CTL_RETVAL_COMPLETE);
}
+#ifdef CTL_WITH_CA
/*
* Front end should call this if it doesn't do autosense. When the request
* sense comes back in from the initiator, we'll dequeue this and send it.
@@ -13046,8 +13666,7 @@ ctl_queue_sense(union ctl_io *io)
* information.
*/
targ_lun = io->io_hdr.nexus.targ_lun;
- if (io->io_hdr.nexus.lun_map_fn != NULL)
- targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
+ targ_lun = ctl_map_lun(io->io_hdr.nexus.targ_port, targ_lun);
if ((targ_lun < CTL_MAX_LUNS)
&& (ctl_softc->ctl_luns[targ_lun] != NULL))
lun = ctl_softc->ctl_luns[targ_lun];
@@ -13056,16 +13675,20 @@ ctl_queue_sense(union ctl_io *io)
initidx = ctl_get_initindex(&io->io_hdr.nexus);
+ mtx_lock(&lun->lun_lock);
/*
* Already have CA set for this LUN...toss the sense information.
*/
- if (ctl_is_set(lun->have_ca, initidx))
+ if (ctl_is_set(lun->have_ca, initidx)) {
+ mtx_unlock(&lun->lun_lock);
goto bailout;
+ }
- memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data,
- ctl_min(sizeof(lun->pending_sense[initidx].sense),
+ memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data,
+ ctl_min(sizeof(lun->pending_sense[initidx]),
sizeof(io->scsiio.sense_data)));
ctl_set_mask(lun->have_ca, initidx);
+ mtx_unlock(&lun->lun_lock);
bailout:
mtx_unlock(&ctl_softc->ctl_lock);
@@ -13074,6 +13697,7 @@ bailout:
return (CTL_RETVAL_COMPLETE);
}
+#endif
/*
* Primary command inlet from frontend ports. All SCSI and task I/O
@@ -13093,42 +13717,19 @@ ctl_queue(union ctl_io *io)
getbintime(&io->io_hdr.start_bt);
#endif /* CTL_TIME_IO */
- mtx_lock(&ctl_softc->ctl_lock);
+ /* Map FE-specific LUN ID into global one. */
+ io->io_hdr.nexus.targ_mapped_lun =
+ ctl_map_lun(io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun);
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
- STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr,
- links);
- break;
case CTL_IO_TASK:
- STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links);
- /*
- * Set the task pending flag. This is necessary to close a
- * race condition with the FETD:
- *
- * - FETD submits a task management command, like an abort.
- * - Back end calls fe_datamove() to move the data for the
- * aborted command. The FETD can't really accept it, but
- * if it did, it would end up transmitting data for a
- * command that the initiator told us to abort.
- *
- * We close the race condition by setting the flag here,
- * and checking it in ctl_datamove(), before calling the
- * FETD's fe_datamove routine. If we've got a task
- * pending, we run the task queue and then check to see
- * whether our particular I/O has been aborted.
- */
- ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
+ ctl_enqueue_incoming(io);
break;
default:
- mtx_unlock(&ctl_softc->ctl_lock);
printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
- return (-EINVAL);
- break; /* NOTREACHED */
+ return (EINVAL);
}
- mtx_unlock(&ctl_softc->ctl_lock);
-
- ctl_wakeup_thread();
return (CTL_RETVAL_COMPLETE);
}
@@ -13140,23 +13741,17 @@ ctl_done_timer_wakeup(void *arg)
union ctl_io *io;
io = (union ctl_io *)arg;
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_done(io);
}
#endif /* CTL_IO_DELAY */
void
-ctl_done_lock(union ctl_io *io, int have_lock)
+ctl_done(union ctl_io *io)
{
struct ctl_softc *ctl_softc;
-#ifndef CTL_DONE_THREAD
- union ctl_io *xio;
-#endif /* !CTL_DONE_THREAD */
ctl_softc = control_softc;
- if (have_lock == 0)
- mtx_lock(&ctl_softc->ctl_lock);
-
/*
* Enable this to catch duplicate completion issues.
*/
@@ -13187,11 +13782,8 @@ ctl_done_lock(union ctl_io *io, int have_lock)
* This is an internal copy of an I/O, and should not go through
* the normal done processing logic.
*/
- if (io->io_hdr.flags & CTL_FLAG_INT_COPY) {
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
+ if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
return;
- }
/*
* We need to send a msg to the serializing shelf to finish the IO
@@ -13236,38 +13828,12 @@ ctl_done_lock(union ctl_io *io, int have_lock)
ctl_done_timer_wakeup, io);
if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
lun->delay_info.done_delay = 0;
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
return;
}
}
#endif /* CTL_IO_DELAY */
- STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links);
-
-#ifdef CTL_DONE_THREAD
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
-
- ctl_wakeup_thread();
-#else /* CTL_DONE_THREAD */
- for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue);
- xio != NULL;
- xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) {
-
- STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links);
-
- ctl_process_done(xio, /*have_lock*/ 1);
- }
- if (have_lock == 0)
- mtx_unlock(&ctl_softc->ctl_lock);
-#endif /* CTL_DONE_THREAD */
-}
-
-void
-ctl_done(union ctl_io *io)
-{
- ctl_done_lock(io, /*have_lock*/ 0);
+ ctl_enqueue_done(io);
}
int
@@ -13291,24 +13857,18 @@ ctl_isc(struct ctl_scsiio *ctsio)
static void
ctl_work_thread(void *arg)
{
- struct ctl_softc *softc;
+ struct ctl_thread *thr = (struct ctl_thread *)arg;
+ struct ctl_softc *softc = thr->ctl_softc;
union ctl_io *io;
- struct ctl_be_lun *be_lun;
int retval;
CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
- softc = (struct ctl_softc *)arg;
- if (softc == NULL)
- return;
-
- mtx_lock(&softc->ctl_lock);
for (;;) {
retval = 0;
/*
* We handle the queues in this order:
- * - task management
* - ISC
* - done queue (to free up resources, unblock other commands)
* - RtR queue
@@ -13317,84 +13877,128 @@ ctl_work_thread(void *arg)
* If those queues are empty, we break out of the loop and
* go to sleep.
*/
- io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue);
+ mtx_lock(&thr->queue_lock);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
if (io != NULL) {
- ctl_run_task_queue(softc);
+ STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
+ mtx_unlock(&thr->queue_lock);
+ ctl_handle_isc(io);
continue;
}
- io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->isc_queue, links);
- ctl_handle_isc(io);
+ STAILQ_REMOVE_HEAD(&thr->done_queue, links);
+ /* clear any blocked commands, call fe_done */
+ mtx_unlock(&thr->queue_lock);
+ retval = ctl_process_done(io);
continue;
}
- io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->done_queue, links);
- /* clear any blocked commands, call fe_done */
- mtx_unlock(&softc->ctl_lock);
- /*
- * XXX KDM
- * Call this without a lock for now. This will
- * depend on whether there is any way the FETD can
- * sleep or deadlock if called with the CTL lock
- * held.
- */
- retval = ctl_process_done(io, /*have_lock*/ 0);
- mtx_lock(&softc->ctl_lock);
+ STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
+ mtx_unlock(&thr->queue_lock);
+ if (io->io_hdr.io_type == CTL_IO_TASK)
+ ctl_run_task(io);
+ else
+ ctl_scsiio_precheck(softc, &io->scsiio);
continue;
}
if (!ctl_pause_rtr) {
- io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
+ io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->rtr_queue, links);
- mtx_unlock(&softc->ctl_lock);
+ STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
+ mtx_unlock(&thr->queue_lock);
retval = ctl_scsiio(&io->scsiio);
if (retval != CTL_RETVAL_COMPLETE)
CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
- mtx_lock(&softc->ctl_lock);
continue;
}
}
- io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue);
- if (io != NULL) {
- STAILQ_REMOVE_HEAD(&softc->incoming_queue, links);
- mtx_unlock(&softc->ctl_lock);
- ctl_scsiio_precheck(softc, &io->scsiio);
- mtx_lock(&softc->ctl_lock);
- continue;
- }
- /*
- * We might want to move this to a separate thread, so that
- * configuration requests (in this case LUN creations)
- * won't impact the I/O path.
- */
+
+ /* Sleep until we have something to do. */
+ mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
+ }
+}
+
+static void
+ctl_lun_thread(void *arg)
+{
+ struct ctl_softc *softc = (struct ctl_softc *)arg;
+ struct ctl_be_lun *be_lun;
+ int retval;
+
+ CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
+
+ for (;;) {
+ retval = 0;
+ mtx_lock(&softc->ctl_lock);
be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
if (be_lun != NULL) {
STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
mtx_unlock(&softc->ctl_lock);
ctl_create_lun(be_lun);
- mtx_lock(&softc->ctl_lock);
continue;
}
- /* XXX KDM use the PDROP flag?? */
/* Sleep until we have something to do. */
- mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "-", 0);
-
- /* Back to the top of the loop to see what woke us up. */
- continue;
+ mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
+ PDROP | PRIBIO, "-", 0);
}
}
-void
-ctl_wakeup_thread()
+static void
+ctl_enqueue_incoming(union ctl_io *io)
{
- struct ctl_softc *softc;
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
+ u_int idx;
- softc = control_softc;
+ idx = (io->io_hdr.nexus.targ_port * 127 +
+ io->io_hdr.nexus.initid.id) % worker_threads;
+ thr = &softc->threads[idx];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
+}
+
+static void
+ctl_enqueue_rtr(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
+
+ thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
+}
+
+static void
+ctl_enqueue_done(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
+
+ thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
+}
+
+static void
+ctl_enqueue_isc(union ctl_io *io)
+{
+ struct ctl_softc *softc = control_softc;
+ struct ctl_thread *thr;
- wakeup_one(softc);
+ thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
+ mtx_lock(&thr->queue_lock);
+ STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
+ mtx_unlock(&thr->queue_lock);
+ wakeup(thr);
}
/* Initialization and failover */
diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h
index 07c82bac1bf54..0525a51d24a2f 100644
--- a/sys/cam/ctl/ctl.h
+++ b/sys/cam/ctl/ctl.h
@@ -53,6 +53,7 @@ typedef enum {
CTL_PORT_IOCTL = 0x04,
CTL_PORT_INTERNAL = 0x08,
CTL_PORT_ISCSI = 0x10,
+ CTL_PORT_SAS = 0x20,
CTL_PORT_ALL = 0xff,
CTL_PORT_ISC = 0x100 // FC port for inter-shelf communication
} ctl_port_type;
@@ -96,12 +97,15 @@ union ctl_modepage_info {
/*
* Device ID length, for VPD page 0x83.
*/
-#define CTL_DEVID_LEN 16
+#define CTL_DEVID_LEN 64
+#define CTL_DEVID_MIN_LEN 16
/*
* WWPN length, for VPD page 0x83.
*/
#define CTL_WWPN_LEN 8
+#define CTL_DRIVER_NAME_LEN 32
+
/*
* Unit attention types. ASC/ASCQ values for these should be placed in
* ctl_build_ua. These are also listed in order of reporting priority.
@@ -112,38 +116,24 @@ typedef enum {
CTL_UA_POWERON = 0x0001,
CTL_UA_BUS_RESET = 0x0002,
CTL_UA_TARG_RESET = 0x0004,
- CTL_UA_LUN_RESET = 0x0008,
- CTL_UA_LUN_CHANGE = 0x0010,
- CTL_UA_MODE_CHANGE = 0x0020,
- CTL_UA_LOG_CHANGE = 0x0040,
- CTL_UA_LVD = 0x0080,
- CTL_UA_SE = 0x0100,
- CTL_UA_RES_PREEMPT = 0x0200,
- CTL_UA_RES_RELEASE = 0x0400,
- CTL_UA_REG_PREEMPT = 0x0800,
- CTL_UA_ASYM_ACC_CHANGE = 0x1000,
- CTL_UA_CAPACITY_CHANGED = 0x2000
+ CTL_UA_I_T_NEXUS_LOSS = 0x0008,
+ CTL_UA_LUN_RESET = 0x0010,
+ CTL_UA_LUN_CHANGE = 0x0020,
+ CTL_UA_MODE_CHANGE = 0x0030,
+ CTL_UA_LOG_CHANGE = 0x0080,
+ CTL_UA_LVD = 0x0100,
+ CTL_UA_SE = 0x0200,
+ CTL_UA_RES_PREEMPT = 0x0400,
+ CTL_UA_RES_RELEASE = 0x0800,
+ CTL_UA_REG_PREEMPT = 0x1000,
+ CTL_UA_ASYM_ACC_CHANGE = 0x2000,
+ CTL_UA_CAPACITY_CHANGED = 0x4000
} ctl_ua_type;
#ifdef _KERNEL
MALLOC_DECLARE(M_CTL);
-typedef enum {
- CTL_THREAD_NONE = 0x00,
- CTL_THREAD_WAKEUP = 0x01
-} ctl_thread_flags;
-
-struct ctl_thread {
- void (*thread_func)(void *arg);
- void *arg;
- struct cv wait_queue;
- const char *thread_name;
- ctl_thread_flags thread_flags;
- struct completion *thread_event;
- struct task_struct *task;
-};
-
struct ctl_page_index;
#ifdef SYSCTL_DECL /* from sysctl.h */
@@ -195,20 +185,27 @@ int ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
int ctl_config_move_done(union ctl_io *io);
void ctl_datamove(union ctl_io *io);
void ctl_done(union ctl_io *io);
+void ctl_data_submit_done(union ctl_io *io);
void ctl_config_write_done(union ctl_io *io);
-#if 0
-int ctl_thread(void *arg);
-#endif
-void ctl_wakeup_thread(void);
-#if 0
-struct ctl_thread *ctl_create_thread(void (*thread_func)
- (void *thread_arg), void *thread_arg, const char *thread_name);
-void ctl_signal_thread(struct ctl_thread *thread);
-void ctl_shutdown_thread(struct ctl_thread *thread);
-#endif
void ctl_portDB_changed(int portnum);
void ctl_init_isc_msg(void);
+/*
+ * KPI to manipulate LUN/port options
+ */
+
+struct ctl_option {
+ STAILQ_ENTRY(ctl_option) links;
+ char *name;
+ char *value;
+};
+typedef STAILQ_HEAD(ctl_options, ctl_option) ctl_options_t;
+
+struct ctl_be_arg;
+void ctl_init_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args);
+void ctl_free_opts(ctl_options_t *opts);
+char * ctl_get_opt(ctl_options_t *opts, const char *name);
+
#endif /* _KERNEL */
#endif /* _CTL_H_ */
diff --git a/sys/cam/ctl/ctl_backend.c b/sys/cam/ctl/ctl_backend.c
index 5234c4a320764..0e1a76c5d9709 100644
--- a/sys/cam/ctl/ctl_backend.c
+++ b/sys/cam/ctl/ctl_backend.c
@@ -173,6 +173,49 @@ ctl_backend_find(char *backend_name)
return (NULL);
}
-/*
- * vim: ts=8
- */
+void
+ctl_init_opts(ctl_options_t *opts, int num_args, struct ctl_be_arg *args)
+{
+ struct ctl_option *opt;
+ int i;
+
+ STAILQ_INIT(opts);
+ for (i = 0; i < num_args; i++) {
+ if ((args[i].flags & CTL_BEARG_RD) == 0)
+ continue;
+ if ((args[i].flags & CTL_BEARG_ASCII) == 0)
+ continue;
+ opt = malloc(sizeof(*opt), M_CTL, M_WAITOK);
+ opt->name = malloc(strlen(args[i].kname) + 1, M_CTL, M_WAITOK);
+ strcpy(opt->name, args[i].kname);
+ opt->value = malloc(strlen(args[i].kvalue) + 1, M_CTL, M_WAITOK);
+ strcpy(opt->value, args[i].kvalue);
+ STAILQ_INSERT_TAIL(opts, opt, links);
+ }
+}
+
+void
+ctl_free_opts(ctl_options_t *opts)
+{
+ struct ctl_option *opt;
+
+ while ((opt = STAILQ_FIRST(opts)) != NULL) {
+ STAILQ_REMOVE_HEAD(opts, links);
+ free(opt->name, M_CTL);
+ free(opt->value, M_CTL);
+ free(opt, M_CTL);
+ }
+}
+
+char *
+ctl_get_opt(ctl_options_t *opts, const char *name)
+{
+ struct ctl_option *opt;
+
+ STAILQ_FOREACH(opt, opts, links) {
+ if (strcmp(opt->name, name) == 0) {
+ return (opt->value);
+ }
+ }
+ return (NULL);
+}
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
index ad93119afc971..c2066c527466a 100644
--- a/sys/cam/ctl/ctl_backend.h
+++ b/sys/cam/ctl/ctl_backend.h
@@ -180,12 +180,6 @@ typedef void (*be_lun_config_t)(void *be_lun,
* The links field is for CTL internal use only, and should not be used by
* the backend.
*/
-struct ctl_be_lun_option {
- STAILQ_ENTRY(ctl_be_lun_option) links;
- char *name;
- char *value;
-};
-
struct ctl_be_lun {
uint8_t lun_type; /* passed to CTL */
ctl_backend_lun_flags flags; /* passed to CTL */
@@ -202,7 +196,7 @@ struct ctl_be_lun {
be_lun_config_t lun_config_status; /* passed to CTL */
struct ctl_backend_driver *be; /* passed to CTL */
void *ctl_lun; /* used by CTL */
- STAILQ_HEAD(, ctl_be_lun_option) options; /* passed to CTL */
+ ctl_options_t options; /* passed to CTL */
STAILQ_ENTRY(ctl_be_lun) links; /* used by CTL */
};
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index 4856b45cd155c..528fb71781b35 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -92,9 +92,11 @@ __FBSDID("$FreeBSD$");
* The idea here is that we'll allocate enough S/G space to hold a 1MB
* I/O. If we get an I/O larger than that, we'll split it.
*/
-#define CTLBLK_MAX_IO_SIZE (1024 * 1024)
+#define CTLBLK_HALF_IO_SIZE (512 * 1024)
+#define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2)
#define CTLBLK_MAX_SEG MAXPHYS
-#define CTLBLK_MAX_SEGS MAX(CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG, 1)
+#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1)
+#define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2)
#ifdef CTLBLK_DEBUG
#define DPRINTF(fmt, args...) \
@@ -103,6 +105,11 @@ __FBSDID("$FreeBSD$");
#define DPRINTF(fmt, args...) do {} while(0)
#endif
+#define PRIV(io) \
+ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
+#define ARGS(io) \
+ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
+
SDT_PROVIDER_DEFINE(cbb);
typedef enum {
@@ -153,7 +160,6 @@ struct ctl_be_block_lun {
cbb_dispatch_t dispatch;
cbb_dispatch_t lun_flush;
cbb_dispatch_t unmap;
- struct mtx lock;
uma_zone_t lun_zone;
uint64_t size_blocks;
uint64_t size_bytes;
@@ -172,6 +178,8 @@ struct ctl_be_block_lun {
STAILQ_HEAD(, ctl_io_hdr) input_queue;
STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
+ struct mtx_padalign io_lock;
+ struct mtx_padalign queue_lock;
};
/*
@@ -212,10 +220,9 @@ struct ctl_be_block_io {
};
static int cbb_num_threads = 14;
-TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
"CAM Target Layer Block Backend");
-SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN,
&cbb_num_threads, 0, "Number of threads per backing file");
static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
@@ -309,6 +316,13 @@ ctl_free_beio(struct ctl_be_block_io *beio)
uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
beio->sg_segs[i].addr = NULL;
+
+ /* For compare we had two equal S/G lists. */
+ if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) {
+ uma_zfree(beio->lun->lun_zone,
+ beio->sg_segs[i + CTLBLK_HALF_SEGS].addr);
+ beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL;
+ }
}
if (duplicate_free > 0) {
@@ -322,28 +336,13 @@ ctl_free_beio(struct ctl_be_block_io *beio)
static void
ctl_complete_beio(struct ctl_be_block_io *beio)
{
- union ctl_io *io;
- int io_len;
-
- io = beio->io;
-
- if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
- io_len = beio->io_len;
- else
- io_len = 0;
-
- devstat_end_transaction(beio->lun->disk_stats,
- /*bytes*/ io_len,
- beio->ds_tag_type,
- beio->ds_trans_type,
- /*now*/ NULL,
- /*then*/&beio->ds_t0);
+ union ctl_io *io = beio->io;
if (beio->beio_cont != NULL) {
beio->beio_cont(beio);
} else {
ctl_free_beio(beio);
- ctl_done(io);
+ ctl_data_submit_done(io);
}
}
@@ -352,13 +351,13 @@ ctl_be_block_move_done(union ctl_io *io)
{
struct ctl_be_block_io *beio;
struct ctl_be_block_lun *be_lun;
+ struct ctl_lba_len_flags *lbalen;
#ifdef CTL_TIME_IO
struct bintime cur_bt;
-#endif
-
- beio = (struct ctl_be_block_io *)
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
+#endif
+ int i;
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
be_lun = beio->lun;
DPRINTF("entered\n");
@@ -369,16 +368,37 @@ ctl_be_block_move_done(union ctl_io *io)
bintime_add(&io->io_hdr.dma_bt, &cur_bt);
io->io_hdr.num_dmas++;
#endif
+ io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
/*
* We set status at this point for read commands, and write
* commands with errors.
*/
- if ((beio->bio_cmd == BIO_READ)
- && (io->io_hdr.port_status == 0)
- && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
- && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
- ctl_set_success(&io->scsiio);
+ if ((io->io_hdr.port_status == 0) &&
+ ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) &&
+ ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
+ lbalen = ARGS(beio->io);
+ if (lbalen->flags & CTL_LLF_READ) {
+ ctl_set_success(&io->scsiio);
+ } else if (lbalen->flags & CTL_LLF_COMPARE) {
+ /* We have two data blocks ready for comparison. */
+ for (i = 0; i < beio->num_segs; i++) {
+ if (memcmp(beio->sg_segs[i].addr,
+ beio->sg_segs[i + CTLBLK_HALF_SEGS].addr,
+ beio->sg_segs[i].len) != 0)
+ break;
+ }
+ if (i < beio->num_segs)
+ ctl_set_sense(&io->scsiio,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_MISCOMPARE,
+ /*asc*/ 0x1D,
+ /*ascq*/ 0x00,
+ SSD_ELEM_NONE);
+ else
+ ctl_set_success(&io->scsiio);
+ }
+ }
else if ((io->io_hdr.port_status != 0)
&& ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
&& ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
@@ -414,14 +434,14 @@ ctl_be_block_move_done(union ctl_io *io)
* This move done routine is generally called in the SIM's
* interrupt context, and therefore we cannot block.
*/
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
/*
* XXX KDM make sure that links is okay to use at this point.
* Otherwise, we either need to add another field to ctl_io_hdr,
* or deal with resource allocation here.
*/
STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
@@ -443,7 +463,7 @@ ctl_be_block_biodone(struct bio *bio)
DPRINTF("entered\n");
error = bio->bio_error;
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->io_lock);
if (error != 0)
beio->num_errors++;
@@ -461,7 +481,7 @@ ctl_be_block_biodone(struct bio *bio)
*/
if ((beio->send_complete == 0)
|| (beio->num_bios_done < beio->num_bios_sent)) {
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->io_lock);
return;
}
@@ -469,7 +489,10 @@ ctl_be_block_biodone(struct bio *bio)
* At this point, we've verified that we are the last I/O to
* complete, so it's safe to drop the lock.
*/
- mtx_unlock(&be_lun->lock);
+ devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+ beio->ds_tag_type, beio->ds_trans_type,
+ /*now*/ NULL, /*then*/&beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
/*
* If there are any errors from the backing device, we fail the
@@ -490,12 +513,13 @@ ctl_be_block_biodone(struct bio *bio)
}
/*
- * If this is a write, a flush or a delete, we're all done.
+ * If this is a write, a flush, a delete or verify, we're all done.
* If this is a read, we can now send the data to the user.
*/
if ((beio->bio_cmd == BIO_WRITE)
|| (beio->bio_cmd == BIO_FLUSH)
- || (beio->bio_cmd == BIO_DELETE)) {
+ || (beio->bio_cmd == BIO_DELETE)
+ || (ARGS(io)->flags & CTL_LLF_VERIFY)) {
ctl_set_success(&io->scsiio);
ctl_complete_beio(beio);
} else {
@@ -510,15 +534,18 @@ static void
ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
- union ctl_io *io;
+ union ctl_io *io = beio->io;
struct mount *mountpoint;
int error, lock_flags;
DPRINTF("entered\n");
- io = beio->io;
+ binuptime(&beio->ds_t0);
+ mtx_lock(&be_lun->io_lock);
+ devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
- (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
+ (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
if (MNT_SHARED_WRITES(mountpoint)
|| ((mountpoint == NULL)
@@ -529,14 +556,17 @@ ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
vn_lock(be_lun->vn, lock_flags | LK_RETRY);
- binuptime(&beio->ds_t0);
- devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
-
error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
VOP_UNLOCK(be_lun->vn, 0);
vn_finished_write(mountpoint);
+ mtx_lock(&be_lun->io_lock);
+ devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+ beio->ds_tag_type, beio->ds_trans_type,
+ /*now*/ NULL, /*then*/&beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
+
if (error == 0)
ctl_set_success(&io->scsiio);
else {
@@ -571,18 +601,14 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
io = beio->io;
flags = beio->bio_flags;
+ bzero(&xuio, sizeof(xuio));
if (beio->bio_cmd == BIO_READ) {
SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
+ xuio.uio_rw = UIO_READ;
} else {
SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
- }
-
- bzero(&xuio, sizeof(xuio));
- if (beio->bio_cmd == BIO_READ)
- xuio.uio_rw = UIO_READ;
- else
xuio.uio_rw = UIO_WRITE;
-
+ }
xuio.uio_offset = beio->io_offset;
xuio.uio_resid = beio->io_len;
xuio.uio_segflg = UIO_SYSSPACE;
@@ -595,12 +621,14 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
xiovec->iov_len = beio->sg_segs[i].len;
}
+ binuptime(&beio->ds_t0);
+ mtx_lock(&be_lun->io_lock);
+ devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
+
if (beio->bio_cmd == BIO_READ) {
vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
- binuptime(&beio->ds_t0);
- devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
-
/*
* UFS pays attention to IO_DIRECT for reads. If the
* DIRECTIO option is configured into the kernel, it calls
@@ -625,6 +653,7 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
(IO_DIRECT|IO_SYNC) : 0, file_data->cred);
VOP_UNLOCK(be_lun->vn, 0);
+ SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
} else {
struct mount *mountpoint;
int lock_flags;
@@ -640,9 +669,6 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
vn_lock(be_lun->vn, lock_flags | LK_RETRY);
- binuptime(&beio->ds_t0);
- devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
-
/*
* UFS pays attention to IO_DIRECT for writes. The write
* is done asynchronously. (Normally the write would just
@@ -666,8 +692,15 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
VOP_UNLOCK(be_lun->vn, 0);
vn_finished_write(mountpoint);
+ SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
}
+ mtx_lock(&be_lun->io_lock);
+ devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+ beio->ds_tag_type, beio->ds_trans_type,
+ /*now*/ NULL, /*then*/&beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
+
/*
* If we got an error, set the sense data to "MEDIUM ERROR" and
* return the I/O to the user.
@@ -689,15 +722,96 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
}
/*
- * If this is a write, we're all done.
+ * If this is a write or a verify, we're all done.
* If this is a read, we can now send the data to the user.
*/
- if (beio->bio_cmd == BIO_WRITE) {
+ if ((beio->bio_cmd == BIO_WRITE) ||
+ (ARGS(io)->flags & CTL_LLF_VERIFY)) {
ctl_set_success(&io->scsiio);
- SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
ctl_complete_beio(beio);
} else {
+#ifdef CTL_TIME_IO
+ getbintime(&io->io_hdr.dma_start_bt);
+#endif
+ ctl_datamove(io);
+ }
+}
+
+static void
+ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
+ struct ctl_be_block_io *beio)
+{
+ struct ctl_be_block_devdata *dev_data;
+ union ctl_io *io;
+ struct uio xuio;
+ struct iovec *xiovec;
+ int flags;
+ int error, i;
+
+ DPRINTF("entered\n");
+
+ dev_data = &be_lun->backend.dev;
+ io = beio->io;
+ flags = beio->bio_flags;
+
+ bzero(&xuio, sizeof(xuio));
+ if (beio->bio_cmd == BIO_READ) {
+ SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
+ xuio.uio_rw = UIO_READ;
+ } else {
+ SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
+ xuio.uio_rw = UIO_WRITE;
+ }
+ xuio.uio_offset = beio->io_offset;
+ xuio.uio_resid = beio->io_len;
+ xuio.uio_segflg = UIO_SYSSPACE;
+ xuio.uio_iov = beio->xiovecs;
+ xuio.uio_iovcnt = beio->num_segs;
+ xuio.uio_td = curthread;
+
+ for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
+ xiovec->iov_base = beio->sg_segs[i].addr;
+ xiovec->iov_len = beio->sg_segs[i].len;
+ }
+
+ binuptime(&beio->ds_t0);
+ mtx_lock(&be_lun->io_lock);
+ devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
+
+ if (beio->bio_cmd == BIO_READ) {
+ error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, 0);
SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
+ } else {
+ error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, 0);
+ SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
+ }
+
+ mtx_lock(&be_lun->io_lock);
+ devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
+ beio->ds_tag_type, beio->ds_trans_type,
+ /*now*/ NULL, /*then*/&beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
+
+ /*
+ * If we got an error, set the sense data to "MEDIUM ERROR" and
+ * return the I/O to the user.
+ */
+ if (error != 0) {
+ ctl_set_medium_error(&io->scsiio);
+ ctl_complete_beio(beio);
+ return;
+ }
+
+ /*
+ * If this is a write or a verify, we're all done.
+ * If this is a read, we can now send the data to the user.
+ */
+ if ((beio->bio_cmd == BIO_WRITE) ||
+ (ARGS(io)->flags & CTL_LLF_VERIFY)) {
+ ctl_set_success(&io->scsiio);
+ ctl_complete_beio(beio);
+ } else {
#ifdef CTL_TIME_IO
getbintime(&io->io_hdr.dma_start_bt);
#endif
@@ -739,7 +853,9 @@ ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
beio->send_complete = 1;
binuptime(&beio->ds_t0);
+ mtx_lock(&be_lun->io_lock);
devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
(*dev_data->csw->d_strategy)(bio);
}
@@ -770,11 +886,11 @@ ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun,
off += bio->bio_length;
len -= bio->bio_length;
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->io_lock);
beio->num_bios_sent++;
if (last && len == 0)
beio->send_complete = 1;
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->io_lock);
(*dev_data->csw->d_strategy)(bio);
}
@@ -786,7 +902,7 @@ ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
{
union ctl_io *io;
struct ctl_be_block_devdata *dev_data;
- struct ctl_ptr_len_flags ptrlen;
+ struct ctl_ptr_len_flags *ptrlen;
struct scsi_unmap_desc *buf, *end;
uint64_t len;
@@ -796,14 +912,15 @@ ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
DPRINTF("entered\n");
binuptime(&beio->ds_t0);
+ mtx_lock(&be_lun->io_lock);
devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+ mtx_unlock(&be_lun->io_lock);
if (beio->io_offset == -1) {
beio->io_len = 0;
- memcpy(&ptrlen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
- sizeof(ptrlen));
- buf = (struct scsi_unmap_desc *)ptrlen.ptr;
- end = buf + ptrlen.len / sizeof(*buf);
+ ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ buf = (struct scsi_unmap_desc *)ptrlen->ptr;
+ end = buf + ptrlen->len / sizeof(*buf);
for (; buf < end; buf++) {
len = (uint64_t)scsi_4btoul(buf->length) *
be_lun->blocksize;
@@ -821,6 +938,7 @@ static void
ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
+ TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
int i;
struct bio *bio;
struct ctl_be_block_devdata *dev_data;
@@ -841,14 +959,6 @@ ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
max_iosize = DFLTPHYS;
cur_offset = beio->io_offset;
-
- /*
- * XXX KDM need to accurately reflect the number of I/Os outstanding
- * to a device.
- */
- binuptime(&beio->ds_t0);
- devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
-
for (i = 0; i < beio->num_segs; i++) {
size_t cur_size;
uint8_t *cur_ptr;
@@ -876,32 +986,23 @@ ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
cur_ptr += bio->bio_length;
cur_size -= bio->bio_length;
- /*
- * Make sure we set the complete bit just before we
- * issue the last bio so we don't wind up with a
- * race.
- *
- * Use the LUN mutex here instead of a combination
- * of atomic variables for simplicity.
- *
- * XXX KDM we could have a per-IO lock, but that
- * would cause additional per-IO setup and teardown
- * overhead. Hopefully there won't be too much
- * contention on the LUN lock.
- */
- mtx_lock(&be_lun->lock);
-
+ TAILQ_INSERT_TAIL(&queue, bio, bio_queue);
beio->num_bios_sent++;
-
- if ((i == beio->num_segs - 1)
- && (cur_size == 0))
- beio->send_complete = 1;
-
- mtx_unlock(&be_lun->lock);
-
- (*dev_data->csw->d_strategy)(bio);
}
}
+ binuptime(&beio->ds_t0);
+ mtx_lock(&be_lun->io_lock);
+ devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
+ beio->send_complete = 1;
+ mtx_unlock(&be_lun->io_lock);
+
+ /*
+ * Fire off all allocated requests!
+ */
+ while ((bio = TAILQ_FIRST(&queue)) != NULL) {
+ TAILQ_REMOVE(&queue, bio, bio_queue);
+ (*dev_data->csw->d_strategy)(bio);
+ }
}
static void
@@ -911,8 +1012,9 @@ ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio)
io = beio->io;
ctl_free_beio(beio);
- if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
- && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
+ if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
+ ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+ (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
ctl_config_write_done(io);
return;
}
@@ -926,20 +1028,19 @@ ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
{
struct ctl_be_block_io *beio;
struct ctl_be_block_softc *softc;
- struct ctl_lba_len_flags lbalen;
+ struct ctl_lba_len_flags *lbalen;
uint64_t len_left, lba;
int i, seglen;
uint8_t *buf, *end;
DPRINTF("entered\n");
- beio = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
softc = be_lun->softc;
- memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
- sizeof(lbalen));
+ lbalen = ARGS(beio->io);
- if (lbalen.flags & ~(SWS_LBDATA | SWS_UNMAP) ||
- (lbalen.flags & SWS_UNMAP && be_lun->unmap == NULL)) {
+ if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP) ||
+ (lbalen->flags & SWS_UNMAP && be_lun->unmap == NULL)) {
ctl_free_beio(beio);
ctl_set_invalid_field(&io->scsiio,
/*sks_valid*/ 1,
@@ -975,9 +1076,9 @@ ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
break;
}
- if (lbalen.flags & SWS_UNMAP) {
- beio->io_offset = lbalen.lba * be_lun->blocksize;
- beio->io_len = (uint64_t)lbalen.len * be_lun->blocksize;
+ if (lbalen->flags & SWS_UNMAP) {
+ beio->io_offset = lbalen->lba * be_lun->blocksize;
+ beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize;
beio->bio_cmd = BIO_DELETE;
beio->ds_trans_type = DEVSTAT_FREE;
@@ -989,9 +1090,9 @@ ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
beio->ds_trans_type = DEVSTAT_WRITE;
DPRINTF("WRITE SAME at LBA %jx len %u\n",
- (uintmax_t)lbalen.lba, lbalen.len);
+ (uintmax_t)lbalen->lba, lbalen->len);
- len_left = (uint64_t)lbalen.len * be_lun->blocksize;
+ len_left = (uint64_t)lbalen->len * be_lun->blocksize;
for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) {
/*
@@ -1012,21 +1113,19 @@ ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
end = buf + seglen;
for (; buf < end; buf += be_lun->blocksize) {
memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize);
- if (lbalen.flags & SWS_LBDATA)
- scsi_ulto4b(lbalen.lba + lba, buf);
+ if (lbalen->flags & SWS_LBDATA)
+ scsi_ulto4b(lbalen->lba + lba, buf);
lba++;
}
}
- beio->io_offset = lbalen.lba * be_lun->blocksize;
+ beio->io_offset = lbalen->lba * be_lun->blocksize;
beio->io_len = lba * be_lun->blocksize;
/* We can not do all in one run. Correct and schedule rerun. */
if (len_left > 0) {
- lbalen.lba += lba;
- lbalen.len -= lba;
- memcpy(io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
- sizeof(lbalen));
+ lbalen->lba += lba;
+ lbalen->len -= lba;
beio->beio_cont = ctl_be_block_cw_done_ws;
}
@@ -1039,16 +1138,15 @@ ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun,
{
struct ctl_be_block_io *beio;
struct ctl_be_block_softc *softc;
- struct ctl_ptr_len_flags ptrlen;
+ struct ctl_ptr_len_flags *ptrlen;
DPRINTF("entered\n");
- beio = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
softc = be_lun->softc;
- memcpy(&ptrlen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
- sizeof(ptrlen));
+ ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
- if (ptrlen.flags != 0 || be_lun->unmap == NULL) {
+ if (ptrlen->flags != 0 || be_lun->unmap == NULL) {
ctl_free_beio(beio);
ctl_set_invalid_field(&io->scsiio,
/*sks_valid*/ 0,
@@ -1090,8 +1188,7 @@ ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun,
beio->bio_cmd = BIO_DELETE;
beio->ds_trans_type = DEVSTAT_FREE;
- DPRINTF("WRITE SAME at LBA %jx len %u\n",
- (uintmax_t)lbalen.lba, lbalen.len);
+ DPRINTF("UNMAP\n");
be_lun->unmap(be_lun, beio);
}
@@ -1120,7 +1217,7 @@ ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
beio->io = io;
beio->lun = be_lun;
beio->beio_cont = ctl_be_block_cw_done;
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
+ PRIV(io)->ptr = (void *)beio;
switch (io->scsiio.cdb[0]) {
case SYNCHRONIZE_CACHE:
@@ -1158,24 +1255,24 @@ ctl_be_block_next(struct ctl_be_block_io *beio)
io = beio->io;
be_lun = beio->lun;
ctl_free_beio(beio);
- if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
- && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
- ctl_done(io);
+ if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
+ ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
+ (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
+ ctl_data_submit_done(io);
return;
}
- io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
io->io_hdr.status &= ~CTL_STATUS_MASK;
io->io_hdr.status |= CTL_STATUS_NONE;
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
/*
* XXX KDM make sure that links is okay to use at this point.
* Otherwise, we either need to add another field to ctl_io_hdr,
* or deal with resource allocation here.
*/
STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
}
@@ -1186,24 +1283,27 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
{
struct ctl_be_block_io *beio;
struct ctl_be_block_softc *softc;
- struct ctl_lba_len lbalen;
- uint64_t len_left, lbaoff;
+ struct ctl_lba_len_flags *lbalen;
+ struct ctl_ptr_len_flags *bptrlen;
+ uint64_t len_left, lbas;
int i;
softc = be_lun->softc;
DPRINTF("entered\n");
- if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
- SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
- } else {
+ lbalen = ARGS(io);
+ if (lbalen->flags & CTL_LLF_WRITE) {
SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
+ } else {
+ SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
}
beio = ctl_alloc_beio(softc);
beio->io = io;
beio->lun = be_lun;
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
+ bptrlen = PRIV(io);
+ bptrlen->ptr = (void *)beio;
/*
* If the I/O came down with an ordered or head of queue tag, set
@@ -1234,28 +1334,25 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
break;
}
- /*
- * This path handles read and write only. The config write path
- * handles flush operations.
- */
- if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
- beio->bio_cmd = BIO_READ;
- beio->ds_trans_type = DEVSTAT_READ;
- } else {
+ if (lbalen->flags & CTL_LLF_WRITE) {
beio->bio_cmd = BIO_WRITE;
beio->ds_trans_type = DEVSTAT_WRITE;
+ } else {
+ beio->bio_cmd = BIO_READ;
+ beio->ds_trans_type = DEVSTAT_READ;
}
- memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
- sizeof(lbalen));
DPRINTF("%s at LBA %jx len %u @%ju\n",
(beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
- (uintmax_t)lbalen.lba, lbalen.len, lbaoff);
- lbaoff = io->scsiio.kern_rel_offset / be_lun->blocksize;
- beio->io_offset = (lbalen.lba + lbaoff) * be_lun->blocksize;
- beio->io_len = MIN((lbalen.len - lbaoff) * be_lun->blocksize,
- CTLBLK_MAX_IO_SIZE);
- beio->io_len -= beio->io_len % be_lun->blocksize;
+ (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len);
+ if (lbalen->flags & CTL_LLF_COMPARE)
+ lbas = CTLBLK_HALF_IO_SIZE;
+ else
+ lbas = CTLBLK_MAX_IO_SIZE;
+ lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize);
+ beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize;
+ beio->io_len = lbas * be_lun->blocksize;
+ bptrlen->len += lbas;
for (i = 0, len_left = beio->io_len; len_left > 0; i++) {
KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)",
@@ -1270,14 +1367,25 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
DPRINTF("segment %d addr %p len %zd\n", i,
beio->sg_segs[i].addr, beio->sg_segs[i].len);
+ /* Set up second segment for compare operation. */
+ if (lbalen->flags & CTL_LLF_COMPARE) {
+ beio->sg_segs[i + CTLBLK_HALF_SEGS].len =
+ beio->sg_segs[i].len;
+ beio->sg_segs[i + CTLBLK_HALF_SEGS].addr =
+ uma_zalloc(be_lun->lun_zone, M_WAITOK);
+ }
+
beio->num_segs++;
len_left -= beio->sg_segs[i].len;
}
- if (io->scsiio.kern_rel_offset + beio->io_len <
- io->scsiio.kern_total_len)
+ if (bptrlen->len < lbalen->len)
beio->beio_cont = ctl_be_block_next;
io->scsiio.be_move_done = ctl_be_block_move_done;
- io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
+ /* For compare we have separate S/G lists for read and datamove. */
+ if (lbalen->flags & CTL_LLF_COMPARE)
+ io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS];
+ else
+ io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
@@ -1312,7 +1420,7 @@ ctl_be_block_worker(void *context, int pending)
DPRINTF("entered\n");
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
for (;;) {
io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
if (io != NULL) {
@@ -1323,14 +1431,13 @@ ctl_be_block_worker(void *context, int pending)
STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
ctl_io_hdr, links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
- beio = (struct ctl_be_block_io *)
- io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
+ beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
be_lun->dispatch(be_lun, beio);
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
continue;
}
io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
@@ -1341,11 +1448,11 @@ ctl_be_block_worker(void *context, int pending)
STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
ctl_io_hdr, links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
ctl_be_block_cw_dispatch(be_lun, io);
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
continue;
}
io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
@@ -1354,7 +1461,7 @@ ctl_be_block_worker(void *context, int pending)
STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
ctl_io_hdr, links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
/*
* We must drop the lock, since this routine and
@@ -1362,7 +1469,7 @@ ctl_be_block_worker(void *context, int pending)
*/
ctl_be_block_dispatch(be_lun, io);
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
continue;
}
@@ -1372,7 +1479,7 @@ ctl_be_block_worker(void *context, int pending)
*/
break;
}
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
}
/*
@@ -1383,15 +1490,11 @@ ctl_be_block_worker(void *context, int pending)
static int
ctl_be_block_submit(union ctl_io *io)
{
- struct ctl_lba_len lbalen;
struct ctl_be_block_lun *be_lun;
struct ctl_be_lun *ctl_be_lun;
- int retval;
DPRINTF("entered\n");
- retval = CTL_RETVAL_COMPLETE;
-
ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
CTL_PRIV_BACKEND_LUN].ptr;
be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
@@ -1402,23 +1505,19 @@ ctl_be_block_submit(union ctl_io *io)
KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
"%#x) encountered", io->io_hdr.io_type));
- memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
- sizeof(lbalen));
- io->scsiio.kern_total_len = lbalen.len * be_lun->blocksize;
- io->scsiio.kern_rel_offset = 0;
+ PRIV(io)->len = 0;
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
/*
* XXX KDM make sure that links is okay to use at this point.
* Otherwise, we either need to add another field to ctl_io_hdr,
* or deal with resource allocation here.
*/
STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
- mtx_unlock(&be_lun->lock);
-
+ mtx_unlock(&be_lun->queue_lock);
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
- return (retval);
+ return (CTL_RETVAL_COMPLETE);
}
static int
@@ -1561,14 +1660,17 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
params = &req->reqdata.create;
be_lun->dev_type = CTL_BE_BLOCK_DEV;
- be_lun->dispatch = ctl_be_block_dispatch_dev;
- be_lun->lun_flush = ctl_be_block_flush_dev;
- be_lun->unmap = ctl_be_block_unmap_dev;
be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
&be_lun->backend.dev.dev_ref);
if (be_lun->backend.dev.csw == NULL)
panic("Unable to retrieve device switch");
+ if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0)
+ be_lun->dispatch = ctl_be_block_dispatch_zvol;
+ else
+ be_lun->dispatch = ctl_be_block_dispatch_dev;
+ be_lun->lun_flush = ctl_be_block_flush_dev;
+ be_lun->unmap = ctl_be_block_unmap_dev;
error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
if (error) {
@@ -1821,27 +1923,28 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
{
struct ctl_be_block_lun *be_lun;
struct ctl_lun_create_params *params;
- struct ctl_be_arg *file_arg;
+ char num_thread_str[16];
char tmpstr[32];
+ char *value;
int retval, num_threads, unmap;
- int i;
+ int tmp_num_threads;
params = &req->reqdata.create;
retval = 0;
num_threads = cbb_num_threads;
- file_arg = NULL;
-
be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
be_lun->softc = softc;
STAILQ_INIT(&be_lun->input_queue);
STAILQ_INIT(&be_lun->config_write_queue);
STAILQ_INIT(&be_lun->datamove_queue);
- STAILQ_INIT(&be_lun->ctl_be_lun.options);
sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
- mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
+ mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF);
+ mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF);
+ ctl_init_opts(&be_lun->ctl_be_lun.options,
+ req->num_be_args, req->kern_be_args);
be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG,
NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
@@ -1858,24 +1961,13 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
be_lun->ctl_be_lun.lun_type = T_DIRECT;
if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
- for (i = 0; i < req->num_be_args; i++) {
- if (strcmp(req->kern_be_args[i].kname, "file") == 0) {
- file_arg = &req->kern_be_args[i];
- break;
- }
- }
-
- if (file_arg == NULL) {
+ value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file");
+ if (value == NULL) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: no file argument specified", __func__);
goto bailout_error;
}
-
- be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK,
- M_WAITOK | M_ZERO);
-
- strlcpy(be_lun->dev_path, (char *)file_arg->kvalue,
- file_arg->vallen);
+ be_lun->dev_path = strdup(value, M_CTLBLK);
retval = ctl_be_block_open(softc, be_lun, req);
if (retval != 0) {
@@ -1914,50 +2006,27 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
* XXX This searching loop might be refactored to be combined with
* the loop above,
*/
- unmap = 0;
- for (i = 0; i < req->num_be_args; i++) {
- if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) {
- struct ctl_be_arg *thread_arg;
- char num_thread_str[16];
- int tmp_num_threads;
+ value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads");
+ if (value != NULL) {
+ tmp_num_threads = strtol(value, NULL, 0);
-
- thread_arg = &req->kern_be_args[i];
-
- strlcpy(num_thread_str, (char *)thread_arg->kvalue,
- min(thread_arg->vallen,
- sizeof(num_thread_str)));
-
- tmp_num_threads = strtol(num_thread_str, NULL, 0);
-
- /*
- * We don't let the user specify less than one
- * thread, but hope he's clueful enough not to
- * specify 1000 threads.
- */
- if (tmp_num_threads < 1) {
- snprintf(req->error_str, sizeof(req->error_str),
- "%s: invalid number of threads %s",
- __func__, num_thread_str);
- goto bailout_error;
- }
-
- num_threads = tmp_num_threads;
- } else if (strcmp(req->kern_be_args[i].kname, "unmap") == 0 &&
- strcmp(req->kern_be_args[i].kvalue, "on") == 0) {
- unmap = 1;
- } else if (strcmp(req->kern_be_args[i].kname, "file") != 0 &&
- strcmp(req->kern_be_args[i].kname, "dev") != 0) {
- struct ctl_be_lun_option *opt;
-
- opt = malloc(sizeof(*opt), M_CTLBLK, M_WAITOK);
- opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_CTLBLK, M_WAITOK);
- strcpy(opt->name, req->kern_be_args[i].kname);
- opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_CTLBLK, M_WAITOK);
- strcpy(opt->value, req->kern_be_args[i].kvalue);
- STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links);
+ /*
+ * We don't let the user specify less than one
+ * thread, but hope he's clueful enough not to
+ * specify 1000 threads.
+ */
+ if (tmp_num_threads < 1) {
+ snprintf(req->error_str, sizeof(req->error_str),
+ "%s: invalid number of threads %s",
+ __func__, num_thread_str);
+ goto bailout_error;
}
+ num_threads = tmp_num_threads;
}
+ unmap = 0;
+ value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
+ if (value != NULL && strcmp(value, "on") == 0)
+ unmap = 1;
be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
@@ -2113,9 +2182,16 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
bailout_error:
req->status = CTL_LUN_ERROR;
+ if (be_lun->io_taskqueue != NULL)
+ taskqueue_free(be_lun->io_taskqueue);
ctl_be_block_close(be_lun);
-
- free(be_lun->dev_path, M_CTLBLK);
+ if (be_lun->dev_path != NULL)
+ free(be_lun->dev_path, M_CTLBLK);
+ if (be_lun->lun_zone != NULL)
+ uma_zdestroy(be_lun->lun_zone);
+ ctl_free_opts(&be_lun->ctl_be_lun.options);
+ mtx_destroy(&be_lun->queue_lock);
+ mtx_destroy(&be_lun->io_lock);
free(be_lun, M_CTLBLK);
return (retval);
@@ -2201,8 +2277,10 @@ ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
uma_zdestroy(be_lun->lun_zone);
+ ctl_free_opts(&be_lun->ctl_be_lun.options);
free(be_lun->dev_path, M_CTLBLK);
-
+ mtx_destroy(&be_lun->queue_lock);
+ mtx_destroy(&be_lun->io_lock);
free(be_lun, M_CTLBLK);
req->status = CTL_LUN_OK;
@@ -2449,10 +2527,10 @@ ctl_be_block_config_write(union ctl_io *io)
* user asked to be synced out. When they issue a sync
* cache command, we'll sync out the whole thing.
*/
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
break;
case START_STOP_UNIT: {
@@ -2517,7 +2595,7 @@ ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
lun = (struct ctl_be_block_lun *)be_lun;
retval = 0;
- retval = sbuf_printf(sb, "<num_threads>");
+ retval = sbuf_printf(sb, "\t<num_threads>");
if (retval != 0)
goto bailout;
@@ -2527,26 +2605,7 @@ ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
if (retval != 0)
goto bailout;
- retval = sbuf_printf(sb, "</num_threads>");
-
- /*
- * For processor devices, we don't have a path variable.
- */
- if ((retval != 0)
- || (lun->dev_path == NULL))
- goto bailout;
-
- retval = sbuf_printf(sb, "<file>");
-
- if (retval != 0)
- goto bailout;
-
- retval = ctl_sbuf_printf_esc(sb, lun->dev_path);
-
- if (retval != 0)
- goto bailout;
-
- retval = sbuf_printf(sb, "</file>\n");
+ retval = sbuf_printf(sb, "</num_threads>\n");
bailout:
@@ -2562,7 +2621,7 @@ ctl_be_block_init(void)
softc = &backend_block_softc;
retval = 0;
- mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF);
+ mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
STAILQ_INIT(&softc->disk_list);
diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c
index 57c470e9ce343..6613e8e8656d5 100644
--- a/sys/cam/ctl/ctl_backend_ramdisk.c
+++ b/sys/cam/ctl/ctl_backend_ramdisk.c
@@ -84,7 +84,7 @@ struct ctl_be_ramdisk_lun {
struct taskqueue *io_taskqueue;
struct task io_task;
STAILQ_HEAD(, ctl_io_hdr) cont_queue;
- struct mtx lock;
+ struct mtx_padalign queue_lock;
};
struct ctl_be_ramdisk_softc {
@@ -150,7 +150,7 @@ ctl_backend_ramdisk_init(void)
memset(softc, 0, sizeof(*softc));
- mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF);
+ mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
STAILQ_INIT(&softc->lun_list);
softc->rd_size = 1024 * 1024;
@@ -241,11 +241,11 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
if ((io->io_hdr.port_status == 0)
&& ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
&& ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
- if (io->scsiio.kern_rel_offset < io->scsiio.kern_total_len) {
- mtx_lock(&be_lun->lock);
+ if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
+ mtx_lock(&be_lun->queue_lock);
STAILQ_INSERT_TAIL(&be_lun->cont_queue,
&io->io_hdr, links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
taskqueue_enqueue(be_lun->io_taskqueue,
&be_lun->io_task);
return (0);
@@ -267,27 +267,27 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
/*retry_count*/
io->io_hdr.port_status);
}
- ctl_done(io);
+ ctl_data_submit_done(io);
return(0);
}
static int
ctl_backend_ramdisk_submit(union ctl_io *io)
{
- struct ctl_lba_len lbalen;
struct ctl_be_lun *ctl_be_lun;
+ struct ctl_lba_len_flags *lbalen;
ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
CTL_PRIV_BACKEND_LUN].ptr;
-
- memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
- sizeof(lbalen));
- io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
- io->scsiio.kern_total_len = lbalen.len * ctl_be_lun->blocksize;
- io->scsiio.kern_rel_offset = 0;
- io->scsiio.kern_data_resid = 0;
+ lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
+ if (lbalen->flags & CTL_LLF_VERIFY) {
+ ctl_set_success(&io->scsiio);
+ ctl_data_submit_done(io);
+ return (CTL_RETVAL_COMPLETE);
+ }
+ io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
+ lbalen->len * ctl_be_lun->blocksize;
ctl_backend_ramdisk_continue(io);
-
return (CTL_RETVAL_COMPLETE);
}
@@ -302,7 +302,7 @@ ctl_backend_ramdisk_continue(union ctl_io *io)
#endif
softc = &rd_softc;
- len = io->scsiio.kern_total_len - io->scsiio.kern_rel_offset;
+ len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
#ifdef CTL_RAMDISK_PAGES
sg_filled = min(btoc(len), softc->num_pages);
if (sg_filled > 1) {
@@ -328,9 +328,12 @@ ctl_backend_ramdisk_continue(union ctl_io *io)
io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
#endif /* CTL_RAMDISK_PAGES */
+ io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
+ io->scsiio.kern_data_resid = 0;
io->scsiio.kern_data_len = len_filled;
io->scsiio.kern_sg_entries = sg_filled;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
#ifdef CTL_TIME_IO
getbintime(&io->io_hdr.dma_start_bt);
#endif
@@ -347,18 +350,18 @@ ctl_backend_ramdisk_worker(void *context, int pending)
be_lun = (struct ctl_be_ramdisk_lun *)context;
softc = be_lun->softc;
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
for (;;) {
io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
if (io != NULL) {
STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
ctl_io_hdr, links);
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
ctl_backend_ramdisk_continue(io);
- mtx_lock(&be_lun->lock);
+ mtx_lock(&be_lun->queue_lock);
continue;
}
@@ -368,7 +371,7 @@ ctl_backend_ramdisk_worker(void *context, int pending)
*/
break;
}
- mtx_unlock(&be_lun->lock);
+ mtx_unlock(&be_lun->queue_lock);
}
static int
@@ -502,7 +505,8 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
if (retval == 0) {
taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
taskqueue_free(be_lun->io_taskqueue);
- mtx_destroy(&be_lun->lock);
+ ctl_free_opts(&be_lun->ctl_be_lun.options);
+ mtx_destroy(&be_lun->queue_lock);
free(be_lun, M_RAMDISK);
}
@@ -523,8 +527,9 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
struct ctl_be_ramdisk_lun *be_lun;
struct ctl_lun_create_params *params;
uint32_t blocksize;
+ char *value;
char tmpstr[32];
- int i, retval, unmap;
+ int retval, unmap;
retval = 0;
params = &req->reqdata.create;
@@ -543,7 +548,8 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
goto bailout_error;
}
sprintf(be_lun->lunname, "cram%d", softc->num_luns);
- STAILQ_INIT(&be_lun->ctl_be_lun.options);
+ ctl_init_opts(&be_lun->ctl_be_lun.options,
+ req->num_be_args, req->kern_be_args);
if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
be_lun->ctl_be_lun.lun_type = params->device_type;
@@ -581,21 +587,9 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
be_lun->softc = softc;
unmap = 0;
- for (i = 0; i < req->num_be_args; i++) {
- if (strcmp(req->kern_be_args[i].kname, "unmap") == 0 &&
- strcmp(req->kern_be_args[i].kvalue, "on") == 0) {
- unmap = 1;
- } else {
- struct ctl_be_lun_option *opt;
-
- opt = malloc(sizeof(*opt), M_RAMDISK, M_WAITOK);
- opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_RAMDISK, M_WAITOK);
- strcpy(opt->name, req->kern_be_args[i].kname);
- opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_RAMDISK, M_WAITOK);
- strcpy(opt->value, req->kern_be_args[i].kvalue);
- STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links);
- }
- }
+ value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
+ if (value != NULL && strcmp(value, "on") == 0)
+ unmap = 1;
be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
@@ -646,7 +640,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
}
STAILQ_INIT(&be_lun->cont_queue);
- mtx_init(&be_lun->lock, "CTL ramdisk", NULL, MTX_DEF);
+ mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
be_lun);
@@ -728,7 +722,8 @@ bailout_error:
if (be_lun->io_taskqueue != NULL) {
taskqueue_free(be_lun->io_taskqueue);
}
- mtx_destroy(&be_lun->lock);
+ ctl_free_opts(&be_lun->ctl_be_lun.options);
+ mtx_destroy(&be_lun->queue_lock);
free(be_lun, M_RAMDISK);
}
@@ -820,7 +815,7 @@ ctl_backend_ramdisk_lun_shutdown(void *be_lun)
if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
wakeup(lun);
} else {
- STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
+ STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
links);
softc->num_luns--;
do_free = 1;
diff --git a/sys/cam/ctl/ctl_cmd_table.c b/sys/cam/ctl/ctl_cmd_table.c
index 145ddb46ccde1..1240f2a88a3b1 100644
--- a/sys/cam/ctl/ctl_cmd_table.c
+++ b/sys/cam/ctl/ctl_cmd_table.c
@@ -58,16 +58,437 @@
#include <cam/ctl/ctl_private.h>
/*
- * Whenever support for a new command is added, it should be added to this
- * table.
+ * Whenever support for a new command is added, it should be added to these
+ * tables.
*/
-struct ctl_cmd_entry ctl_cmd_table[] =
+
+/* 5E PERSISTENT RESERVE IN */
+const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
+{
+/* 00 READ KEYS */
+{ctl_persistent_reserve_in, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x00, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 01 READ RESERVATION */
+{ctl_persistent_reserve_in, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x01, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 02 REPORT CAPABILITIES */
+{ctl_persistent_reserve_in, CTL_SERIDX_INQ, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x02, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 03 READ FULL STATUS */
+{ctl_persistent_reserve_in, CTL_SERIDX_INQ, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x03, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
+
+/* 04-1f */
+};
+
+/* 5F PERSISTENT RESERVE OUT */
+const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
+{
+/* 00 REGISTER */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x00, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 01 RESERVE */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x01, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 02 RELEASE */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x02, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 03 CLEAR */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x03, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 04 PREEMPT */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x04, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 05 PREEMPT AND ABORT */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 REGISTER AND IGNORE EXISTING KEY */
+{ctl_persistent_reserve_out, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, { 0x06, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x07}},
+
+/* 07 REGISTER AND MOVE */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08-1f */
+};
+
+/* 83 EXTENDED COPY */
+const struct ctl_cmd_entry ctl_cmd_table_83[32] =
+{
+/* 00 EXTENDED COPY (LID1) */
+{ctl_extended_copy_lid1, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 01 EXTENDED COPY (LID4) */
+{ctl_extended_copy_lid4, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 04 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 POPULATE TOKEN */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 11 WRITE USING TOKEN */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 12 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 13 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 14 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 15 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 16 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 17 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 18 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 19 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 1C COPY OPERATION ABORT */
+{ctl_copy_operation_abort, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_NONE,
+ CTL_LUN_PAT_NONE,
+ 16, { 0x1c, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x07}},
+};
+
+/* 84 RECEIVE COPY STATUS */
+const struct ctl_cmd_entry ctl_cmd_table_84[32] =
+{
+/* 00 RECEIVE COPY STATUS (LID1) */
+{ctl_receive_copy_status_lid1, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x00, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 01 RECEIVE COPY DATA (LID1) */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 RECEIVE COPY OPERATING PARAMETERS */
+{ctl_receive_copy_operating_parameters, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 04 RECEIVE COPY FAILURE DETAILS (LID1) */
+{ctl_receive_copy_failure_details, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x04, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 05 RECEIVE COPY STATUS (LID4) */
+{ctl_receive_copy_status_lid4, CTL_SERIDX_RD_CAP,
+ CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 16, {0x05, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 06 RECEIVE COPY DATA (LID4)*/
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 RECEIVE ROD TOKEN INFORMATION */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 REPORT ALL ROD TOKENS */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+};
+
+/* 9E SERVICE ACTION IN(16) */
+const struct ctl_cmd_entry ctl_cmd_table_9e[32] =
+{
+/* 00 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 01 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 04 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0D */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 10 */
+{ctl_read_capacity_16, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READCAP,
+ 16, {0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 11-1f */
+};
+
+/* A3 MAINTENANCE IN */
+const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
+{
+/* 00 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 01 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 02 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 03 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 04 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 05 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 06 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 07 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 08 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 09 */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0A REPORT TARGET PORT GROUPS */
+{ctl_report_tagret_port_groups, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 12, {0x0a, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 0B */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0C REPORT SUPPORTED_OPCODES */
+{ctl_report_supported_opcodes, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 12, {0x0c, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 0D REPORT SUPPORTED_TASK MANAGEMENT FUNCTIONS */
+{ctl_report_supported_tmf, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 12, {0x0d, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 0E */
+{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+
+/* 0F REPORT TIMESTAMP */
+{ctl_report_timestamp, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_CMD_FLAG_OK_ON_STOPPED |
+ CTL_CMD_FLAG_OK_ON_INOPERABLE |
+ CTL_CMD_FLAG_OK_ON_SECONDARY |
+ CTL_FLAG_DATA_IN,
+ CTL_LUN_PAT_NONE,
+ 12, {0x0f, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
+
+/* 10-1f */
+};
+
+const struct ctl_cmd_entry ctl_cmd_table[256] =
{
/* 00 TEST UNIT READY */
{ctl_tur, CTL_SERIDX_TUR, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_FLAG_DATA_NONE |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_TUR},
+ CTL_LUN_PAT_TUR, 6, {0, 0, 0, 0, 0x07}},
/* 01 REWIND */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -85,13 +506,13 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0xff, 0x07}},
/* 04 FORMAT UNIT */
{ctl_format, CTL_SERIDX_FORMAT, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0xff, 0, 0, 0, 0x07}},
/* 05 READ BLOCK LIMITS */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -106,7 +527,7 @@ struct ctl_cmd_entry ctl_cmd_table[] =
{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE, 6, {0x1f, 0xff, 0xff, 0xff, 0x07}},
/* 09 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -114,7 +535,7 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 0A WRITE(6) */
{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE, 6, {0x1f, 0xff, 0xff, 0xff, 0x07}},
/* 0B SEEK(6) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -147,7 +568,7 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0xe1, 0xff, 0xff, 0xff, 0x07}},
/* 13 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -161,25 +582,25 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0x11, 0, 0, 0xff, 0x07}},
/* 16 RESERVE(6) */
-{ctl_scsi_reserve, CTL_SERIDX_RESV, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
/* 17 RELEASE(6) */
-{ctl_scsi_release, CTL_SERIDX_REL, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_release, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
/* 18 COPY */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -192,8 +613,9 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
- CTL_FLAG_DATA_IN,
- CTL_LUN_PAT_NONE},
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE, 6, {0x08, 0xff, 0, 0xff, 0x07}},
/* 1B START STOP UNIT */
{ctl_start_stop, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
@@ -202,7 +624,7 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_FLAG_DATA_NONE |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0x03, 0x07}},
/* 1C RECEIVE DIAGNOSTIC RESULTS */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -238,7 +660,7 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READCAP},
+ CTL_LUN_PAT_READCAP, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07}},
/* 26 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -250,14 +672,16 @@ struct ctl_cmd_entry ctl_cmd_table[] =
{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 10, {0x18, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
/* 29 READ GENERATION */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* 2A WRITE(10) */
{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 10, {0x18, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
/* 2B SEEK(10) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -270,10 +694,15 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 2E WRITE AND VERIFY(10) */
{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 10, {0x10, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
/* 2F VERIFY(10) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 10, {0x16, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
/* 30 SEARCH DATA HIGH(10) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -293,7 +722,8 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 35 SYNCHRONIZE CACHE(10) */
{ctl_sync_cache, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE,
+ 10, {0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
/* 36 LOCK UNLOCK CACHE(10) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -311,12 +741,17 @@ struct ctl_cmd_entry ctl_cmd_table[] =
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* 3B WRITE BUFFER */
-{ctl_write_buffer, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_PROC |
- CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+{ctl_write_buffer, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_NONE,
+ 10, {0x1f, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
/* 3C READ BUFFER */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE,
+ 10, {0x1f, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
/* 3D UPDATE BLOCK */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -333,11 +768,13 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 41 WRITE SAME(10) */
{ctl_write_same, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 10, {0x0a, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
/* 42 READ SUB-CHANNEL / UNMAP */
{ctl_unmap, CTL_SERIDX_UNMAP, CTL_CMD_FLAG_OK_ON_SLUN | CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE},
+ CTL_LUN_PAT_WRITE,
+ 10, {0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07}},
/* 43 READ TOC/PMA/ATIP */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -399,25 +836,25 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 10, {0x11, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
/* 56 RESERVE(10) */
-{ctl_scsi_reserve, CTL_SERIDX_RESV, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_reserve, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
/* 57 RELEASE(10) */
-{ctl_scsi_release, CTL_SERIDX_REL, CTL_CMD_FLAG_ALLOW_ON_RESV |
+{ctl_scsi_release, CTL_SERIDX_RES, CTL_CMD_FLAG_ALLOW_ON_RESV |
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
/* 58 REPAIR TRACK */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -430,8 +867,9 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
- CTL_FLAG_DATA_IN,
- CTL_LUN_PAT_NONE},
+ CTL_FLAG_DATA_IN |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_NONE, 10, {0x18, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
/* 5B CLOSE TRACK/SESSION */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -443,29 +881,13 @@ struct ctl_cmd_entry ctl_cmd_table[] =
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* 5E PERSISTENT RESERVE IN */
-{ctl_persistent_reserve_in, CTL_SERIDX_PRES_IN, CTL_CMD_FLAG_ALLOW_ON_RESV |
- CTL_CMD_FLAG_OK_ON_BOTH |
- CTL_CMD_FLAG_OK_ON_STOPPED |
- CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
- CTL_FLAG_DATA_IN |
- CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+{__DECONST(ctl_opfunc *, ctl_cmd_table_5e), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
CTL_LUN_PAT_NONE},
-//{ctl_persistent_reserve_in, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-
/* 5F PERSISTENT RESERVE OUT */
-{ctl_persistent_reserve_out, CTL_SERIDX_PRES_OUT, CTL_CMD_FLAG_ALLOW_ON_RESV |
- CTL_CMD_FLAG_OK_ON_BOTH |
- CTL_CMD_FLAG_OK_ON_STOPPED |
- CTL_CMD_FLAG_OK_ON_INOPERABLE|
- CTL_CMD_FLAG_OK_ON_SECONDARY |
- CTL_FLAG_DATA_OUT |
- CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+{__DECONST(ctl_opfunc *, ctl_cmd_table_5f), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
CTL_LUN_PAT_NONE},
-//{ctl_persistent_reserve_out, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
-
/* 60 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -572,10 +994,12 @@ struct ctl_cmd_entry ctl_cmd_table[] =
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* 83 EXTENDED COPY */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{__DECONST(ctl_opfunc *, ctl_cmd_table_83), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
/* 84 RECEIVE COPY RESULTS */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{__DECONST(ctl_opfunc *, ctl_cmd_table_84), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
/* 85 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -589,14 +1013,21 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 88 READ(16) */
{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN | CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
-/* 89 */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+/* 89 COMPARE AND WRITE */
+{ctl_cnw, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0, 0, 0, 0xff, 0, 0x07}},
/* 8A WRITE(16) */
{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 8B */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -609,10 +1040,17 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 8E WRITE AND VERIFY(16) */
{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 8F VERIFY(16) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 16, {0x16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 90 PRE-FETCH(16) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -620,7 +1058,9 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 91 SYNCHRONIZE CACHE(16) */
{ctl_sync_cache, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE,
+ 16, {0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 92 LOCK UNLOCK CACHE(16) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -628,7 +1068,9 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* 93 WRITE SAME(16) */
{ctl_write_same, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 16, {0x0a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 94 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -661,14 +1103,8 @@ struct ctl_cmd_entry ctl_cmd_table[] =
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* 9E SERVICE ACTION IN(16) */
-/* XXX KDM not all service actions will be read capacity!! */
-{ctl_service_action_in, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
- CTL_CMD_FLAG_OK_ON_STOPPED |
- CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
- CTL_FLAG_DATA_IN |
- CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READCAP},
+{__DECONST(ctl_opfunc *, ctl_cmd_table_9e), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
+ CTL_LUN_PAT_NONE},
/* 9F SERVICE ACTION OUT(16) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -683,7 +1119,8 @@ struct ctl_cmd_entry ctl_cmd_table[] =
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE,
+ 12, {0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* A1 BLANK */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -691,15 +1128,11 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* A2 SEND EVENT */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* A3 MAINTENANCE (IN) Service Action - (0A) REPORT TARGET PORT GROUP */
-{ctl_maintenance_in, CTL_SERIDX_MAIN_IN, CTL_CMD_FLAG_OK_ON_BOTH |
- CTL_CMD_FLAG_OK_ON_STOPPED |
- CTL_CMD_FLAG_OK_ON_INOPERABLE |
- CTL_CMD_FLAG_OK_ON_SECONDARY |
- CTL_FLAG_DATA_IN,
+/* A3 MAINTENANCE IN */
+{__DECONST(ctl_opfunc *, ctl_cmd_table_a3), CTL_SERIDX_INVLD, CTL_CMD_FLAG_SA5,
CTL_LUN_PAT_NONE},
-/* A4 MAINTENANCE (OUT) */
+/* A4 MAINTENANCE OUT */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* A5 MOVE MEDIUM */
@@ -714,14 +1147,16 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* A8 READ(12) */
{ctl_read_write, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN | CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
- CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 12, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* A9 PLAY TRACK RELATIVE(12) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* AA WRITE(12) */
{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 12, {0x18, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* AB SERVICE ACTION IN(12) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -734,10 +1169,15 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* AE WRITE AND VERIFY(12) */
{ctl_read_write, CTL_SERIDX_WRITE, CTL_CMD_FLAG_OK_ON_SLUN| CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE},
+ CTL_LUN_PAT_WRITE | CTL_LUN_PAT_RANGE,
+ 12, {0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* AF VERIFY(12) */
-{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
+{ctl_verify, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_SLUN |
+ CTL_FLAG_DATA_OUT |
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
+ CTL_LUN_PAT_READ | CTL_LUN_PAT_RANGE,
+ 12, {0x16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* B0 SEARCH DATA HIGH(12) */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@@ -769,43 +1209,51 @@ struct ctl_cmd_entry ctl_cmd_table[] =
/* B9 READ CD MSF */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* BA REDUNDANCY GROUP (IN) */
+/* BA REDUNDANCY GROUP IN */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* BB REDUNDANCY GROUP (OUT) */
+/* BB REDUNDANCY GROUP OUT */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* BC SPARE (IN) */
+/* BC SPARE IN */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* BD SPARE (OUT) */
+/* BD SPARE OUT */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* BE VOLUME SET (IN) */
+/* BE VOLUME SET IN */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
-/* BF VOLUME SET (OUT) */
+/* BF VOLUME SET OUT */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* C0 - ISC_SEND_MSG_SHORT */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_NONE,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE,
+ 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C1 - ISC_SEND_MSG */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE,
+ 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C2 - ISC_WRITE */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE,
+ 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C3 - ISC_READ */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_IN,
- CTL_LUN_PAT_NONE},
+ CTL_LUN_PAT_NONE,
+ 16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C4 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
diff --git a/sys/cam/ctl/ctl_error.c b/sys/cam/ctl/ctl_error.c
index 4425b2d608ad5..6ecb54bee75ae 100644
--- a/sys/cam/ctl/ctl_error.c
+++ b/sys/cam/ctl/ctl_error.c
@@ -401,6 +401,11 @@ ctl_build_ua(ctl_ua_type ua_type, struct scsi_sense_data *sense,
asc = 0x29;
ascq = 0x03;
break;
+ case CTL_UA_I_T_NEXUS_LOSS:
+ /* 29h/07h I_T NEXUS LOSS OCCURRED */
+ asc = 0x29;
+ ascq = 0x07;
+ break;
case CTL_UA_LUN_RESET:
/* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
/*
@@ -790,6 +795,18 @@ ctl_set_busy(struct ctl_scsiio *ctsio)
}
void
+ctl_set_task_aborted(struct ctl_scsiio *ctsio)
+{
+ struct scsi_sense_data *sense;
+
+ sense = &ctsio->sense_data;
+ memset(sense, 0, sizeof(*sense));
+ ctsio->scsi_status = SCSI_STATUS_TASK_ABORTED;
+ ctsio->sense_len = 0;
+ ctsio->io_hdr.status = CTL_CMD_ABORTED;
+}
+
+void
ctl_set_success(struct ctl_scsiio *ctsio)
{
struct scsi_sense_data *sense;
diff --git a/sys/cam/ctl/ctl_error.h b/sys/cam/ctl/ctl_error.h
index 1d8bd9b7d1b83..62596d0a1a400 100644
--- a/sys/cam/ctl/ctl_error.h
+++ b/sys/cam/ctl/ctl_error.h
@@ -80,6 +80,7 @@ void ctl_set_data_phase_error(struct ctl_scsiio *ctsio);
void ctl_set_reservation_conflict(struct ctl_scsiio *ctsio);
void ctl_set_queue_full(struct ctl_scsiio *ctsio);
void ctl_set_busy(struct ctl_scsiio *ctsio);
+void ctl_set_task_aborted(struct ctl_scsiio *ctsio);
void ctl_set_success(struct ctl_scsiio *ctsio);
#endif /* _CTL_ERROR_H_ */
diff --git a/sys/cam/ctl/ctl_frontend.c b/sys/cam/ctl/ctl_frontend.c
index 2bc6ade46415b..34b4a9ef58756 100644
--- a/sys/cam/ctl/ctl_frontend.c
+++ b/sys/cam/ctl/ctl_frontend.c
@@ -66,7 +66,77 @@ __FBSDID("$FreeBSD$");
extern struct ctl_softc *control_softc;
int
-ctl_frontend_register(struct ctl_frontend *fe, int master_shelf)
+ctl_frontend_register(struct ctl_frontend *fe)
+{
+ struct ctl_frontend *fe_tmp;
+
+ KASSERT(control_softc != NULL, ("CTL is not initialized"));
+
+ /*
+ * Sanity check, make sure this isn't a duplicate registration.
+ */
+ mtx_lock(&control_softc->ctl_lock);
+ STAILQ_FOREACH(fe_tmp, &control_softc->fe_list, links) {
+ if (strcmp(fe_tmp->name, fe->name) == 0) {
+ mtx_unlock(&control_softc->ctl_lock);
+ return (-1);
+ }
+ }
+ mtx_unlock(&control_softc->ctl_lock);
+ STAILQ_INIT(&fe->port_list);
+
+ /*
+ * Call the frontend's initialization routine.
+ */
+ if (fe->init != NULL)
+ fe->init();
+
+ mtx_lock(&control_softc->ctl_lock);
+ control_softc->num_frontends++;
+ STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links);
+ mtx_unlock(&control_softc->ctl_lock);
+ return (0);
+}
+
+int
+ctl_frontend_deregister(struct ctl_frontend *fe)
+{
+
+ if (!STAILQ_EMPTY(&fe->port_list))
+ return (-1);
+
+ mtx_lock(&control_softc->ctl_lock);
+ STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links);
+ control_softc->num_frontends--;
+ mtx_unlock(&control_softc->ctl_lock);
+
+ /*
+ * Call the frontend's shutdown routine.
+ */
+ if (fe->shutdown != NULL)
+ fe->shutdown();
+ return (0);
+}
+
+struct ctl_frontend *
+ctl_frontend_find(char *frontend_name)
+{
+ struct ctl_softc *ctl_softc = control_softc;
+ struct ctl_frontend *fe;
+
+ mtx_lock(&ctl_softc->ctl_lock);
+ STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
+ if (strcmp(fe->name, frontend_name) == 0) {
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (fe);
+ }
+ }
+ mtx_unlock(&ctl_softc->ctl_lock);
+ return (NULL);
+}
+
+int
+ctl_port_register(struct ctl_port *port, int master_shelf)
{
struct ctl_io_pool *pool;
int port_num;
@@ -80,13 +150,24 @@ ctl_frontend_register(struct ctl_frontend *fe, int master_shelf)
port_num = ctl_ffz(&control_softc->ctl_port_mask, CTL_MAX_PORTS);
if ((port_num == -1)
|| (ctl_set_mask(&control_softc->ctl_port_mask, port_num) == -1)) {
- fe->targ_port = -1;
+ port->targ_port = -1;
mtx_unlock(&control_softc->ctl_lock);
return (1);
}
- control_softc->num_frontends++;
-
+ control_softc->num_ports++;
mtx_unlock(&control_softc->ctl_lock);
+
+ /*
+ * Initialize the initiator and portname mappings
+ */
+ port->max_initiators = CTL_MAX_INIT_PER_PORT;
+ port->wwpn_iid = malloc(sizeof(*port->wwpn_iid) * port->max_initiators,
+ M_CTL, M_NOWAIT | M_ZERO);
+ if (port->wwpn_iid == NULL) {
+ retval = ENOMEM;
+ goto error;
+ }
+
/*
* We add 20 to whatever the caller requests, so he doesn't get
* burned by queueing things back to the pending sense queue. In
@@ -95,90 +176,137 @@ ctl_frontend_register(struct ctl_frontend *fe, int master_shelf)
* pending sense queue on the next command, whether or not it is
* a REQUEST SENSE.
*/
- retval = ctl_pool_create(control_softc,
- (fe->port_type != CTL_PORT_IOCTL) ?
- CTL_POOL_FETD : CTL_POOL_IOCTL,
- fe->num_requested_ctl_io + 20, &pool);
+ retval = ctl_pool_create(control_softc, CTL_POOL_FETD,
+ port->num_requested_ctl_io + 20, &pool);
if (retval != 0) {
- fe->targ_port = -1;
+ free(port->wwpn_iid, M_CTL);
+error:
+ port->targ_port = -1;
mtx_lock(&control_softc->ctl_lock);
ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
mtx_unlock(&control_softc->ctl_lock);
return (retval);
}
+ port->ctl_pool_ref = pool;
- mtx_lock(&control_softc->ctl_lock);
-
- /* For now assume master shelf */
- //fe->targ_port = port_num;
- fe->targ_port = port_num + (master_shelf!=0 ? 0 : CTL_MAX_PORTS);
- fe->max_initiators = CTL_MAX_INIT_PER_PORT;
- STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links);
- control_softc->ctl_ports[port_num] = fe;
+ if (port->options.stqh_first == NULL)
+ STAILQ_INIT(&port->options);
+ mtx_lock(&control_softc->ctl_lock);
+ port->targ_port = port_num + (master_shelf != 0 ? 0 : CTL_MAX_PORTS);
+ STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links);
+ STAILQ_INSERT_TAIL(&control_softc->port_list, port, links);
+ control_softc->ctl_ports[port_num] = port;
mtx_unlock(&control_softc->ctl_lock);
- fe->ctl_pool_ref = pool;
-
return (retval);
}
int
-ctl_frontend_deregister(struct ctl_frontend *fe)
+ctl_port_deregister(struct ctl_port *port)
{
struct ctl_io_pool *pool;
- int port_num;
- int retval;
+ int port_num, retval, i;
retval = 0;
- pool = (struct ctl_io_pool *)fe->ctl_pool_ref;
+ pool = (struct ctl_io_pool *)port->ctl_pool_ref;
- if (fe->targ_port == -1) {
+ if (port->targ_port == -1) {
retval = 1;
goto bailout;
}
mtx_lock(&control_softc->ctl_lock);
- STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links);
- control_softc->num_frontends--;
- port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port :
- fe->targ_port - CTL_MAX_PORTS;
+ STAILQ_REMOVE(&control_softc->port_list, port, ctl_port, links);
+ STAILQ_REMOVE(&port->frontend->port_list, port, ctl_port, fe_links);
+ control_softc->num_ports--;
+ port_num = (port->targ_port < CTL_MAX_PORTS) ? port->targ_port :
+ port->targ_port - CTL_MAX_PORTS;
ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
control_softc->ctl_ports[port_num] = NULL;
mtx_unlock(&control_softc->ctl_lock);
ctl_pool_free(pool);
+ ctl_free_opts(&port->options);
+
+ free(port->port_devid, M_CTL);
+ port->port_devid = NULL;
+ free(port->target_devid, M_CTL);
+ port->target_devid = NULL;
+ free(port->init_devid, M_CTL);
+ port->init_devid = NULL;
+ for (i = 0; i < port->max_initiators; i++)
+ free(port->wwpn_iid[i].name, M_CTL);
+ free(port->wwpn_iid, M_CTL);
bailout:
return (retval);
}
void
-ctl_frontend_set_wwns(struct ctl_frontend *fe, int wwnn_valid, uint64_t wwnn,
+ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn,
int wwpn_valid, uint64_t wwpn)
{
- if (wwnn_valid)
- fe->wwnn = wwnn;
+ struct scsi_vpd_id_descriptor *desc;
+ int len, proto;
- if (wwpn_valid)
- fe->wwpn = wwpn;
+ if (port->port_type == CTL_PORT_FC)
+ proto = SCSI_PROTO_FC << 4;
+ else if (port->port_type == CTL_PORT_ISCSI)
+ proto = SCSI_PROTO_ISCSI << 4;
+ else
+ proto = SCSI_PROTO_SPI << 4;
+
+ if (wwnn_valid) {
+ port->wwnn = wwnn;
+
+ free(port->target_devid, M_CTL);
+
+ len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN;
+ port->target_devid = malloc(sizeof(struct ctl_devid) + len,
+ M_CTL, M_WAITOK | M_ZERO);
+ port->target_devid->len = len;
+ desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data;
+ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET |
+ SVPD_ID_TYPE_NAA;
+ desc->length = CTL_WWPN_LEN;
+ scsi_u64to8b(port->wwnn, desc->identifier);
+ }
+
+ if (wwpn_valid) {
+ port->wwpn = wwpn;
+
+ free(port->port_devid, M_CTL);
+
+ len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN;
+ port->port_devid = malloc(sizeof(struct ctl_devid) + len,
+ M_CTL, M_WAITOK | M_ZERO);
+ port->port_devid->len = len;
+ desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data;
+ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+ SVPD_ID_TYPE_NAA;
+ desc->length = CTL_WWPN_LEN;
+ scsi_u64to8b(port->wwpn, desc->identifier);
+ }
}
void
-ctl_frontend_online(struct ctl_frontend *fe)
+ctl_port_online(struct ctl_port *port)
{
- fe->port_online(fe->onoff_arg);
+ port->port_online(port->onoff_arg);
/* XXX KDM need a lock here? */
- fe->status |= CTL_PORT_STATUS_ONLINE;
+ port->status |= CTL_PORT_STATUS_ONLINE;
}
void
-ctl_frontend_offline(struct ctl_frontend *fe)
+ctl_port_offline(struct ctl_port *port)
{
- fe->port_offline(fe->onoff_arg);
+ port->port_offline(port->onoff_arg);
/* XXX KDM need a lock here? */
- fe->status &= ~CTL_PORT_STATUS_ONLINE;
+ port->status &= ~CTL_PORT_STATUS_ONLINE;
}
/*
diff --git a/sys/cam/ctl/ctl_frontend.h b/sys/cam/ctl/ctl_frontend.h
index 23f91b4a810e6..825ff50a0bcab 100644
--- a/sys/cam/ctl/ctl_frontend.h
+++ b/sys/cam/ctl/ctl_frontend.h
@@ -46,12 +46,46 @@ typedef enum {
CTL_PORT_STATUS_LUN_ONLINE = 0x04
} ctl_port_status;
+typedef int (*fe_init_t)(void);
+typedef void (*fe_shutdown_t)(void);
typedef void (*port_func_t)(void *onoff_arg);
-typedef int (*targ_func_t)(void *arg, struct ctl_id targ_id);
+typedef int (*port_info_func_t)(void *onoff_arg, struct sbuf *sb);
typedef int (*lun_func_t)(void *arg, struct ctl_id targ_id, int lun_id);
+typedef uint32_t (*lun_map_func_t)(void *arg, uint32_t lun_id);
typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td);
-typedef int (*fe_devid_t)(struct ctl_scsiio *ctsio, int alloc_len);
+
+#define CTL_FRONTEND_DECLARE(name, driver) \
+ static int name ## _modevent(module_t mod, int type, void *data) \
+ { \
+ switch (type) { \
+ case MOD_LOAD: \
+ ctl_frontend_register( \
+ (struct ctl_frontend *)data); \
+ break; \
+ case MOD_UNLOAD: \
+ printf(#name " module unload - not possible for this module type\n"); \
+ return EINVAL; \
+ default: \
+ return EOPNOTSUPP; \
+ } \
+ return 0; \
+ } \
+ static moduledata_t name ## _mod = { \
+ #name, \
+ name ## _modevent, \
+ (void *)&driver \
+ }; \
+ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \
+ MODULE_DEPEND(name, ctl, 1, 1, 1); \
+ MODULE_DEPEND(name, cam, 1, 1, 1)
+
+struct ctl_wwpn_iid {
+ int in_use;
+ time_t last_use;
+ uint64_t wwpn;
+ char *name;
+};
/*
* The ctl_frontend structure is the registration mechanism between a FETD
@@ -105,30 +139,6 @@ typedef int (*fe_devid_t)(struct ctl_scsiio *ctsio, int alloc_len);
* and port_offline(). This is specified by the
* FETD.
*
- * targ_enable(): This function is called, with targ_lun_arg and a
- * target ID as its arguments, by CTL when it wants
- * the FETD to enable a particular target. targ_enable()
- * will always be called for a particular target ID
- * before any LUN is enabled for that target. If the
- * FETD does not support enabling targets, but rather
- * LUNs, it should ignore this call and return 0. If
- * the FETD does support enabling targets, it should
- * return 0 for success and non-zero if it cannot
- * enable the given target.
- *
- * TODO: Add the ability to specify a WWID here.
- *
- * targ_disable(): This function is called, with targ_lun_arg and a
- * target ID as its arguments, by CTL when it wants
- * the FETD to disable a particular target.
- * targ_disable() will always be called for a
- * particular target ID after all LUNs are disabled
- * on that particular target. If the FETD does not
- * support enabling targets, it should ignore this
- * call and return 0. If the FETD does support
- * enabling targets, it should return 0 for success,
- * and non-zero if it cannot disable the given target.
- *
* lun_enable(): This function is called, with targ_lun_arg, a target
* ID and a LUN ID as its arguments, by CTL when it
* wants the FETD to enable a particular LUN. If the
@@ -203,7 +213,8 @@ typedef int (*fe_devid_t)(struct ctl_scsiio *ctsio, int alloc_len);
* links: Linked list pointers, used by CTL. The FETD
* shouldn't touch this field.
*/
-struct ctl_frontend {
+struct ctl_port {
+ struct ctl_frontend *frontend;
ctl_port_type port_type; /* passed to CTL */
int num_requested_ctl_io; /* passed to CTL */
char *port_name; /* passed to CTL */
@@ -211,25 +222,38 @@ struct ctl_frontend {
int virtual_port; /* passed to CTL */
port_func_t port_online; /* passed to CTL */
port_func_t port_offline; /* passed to CTL */
+ port_info_func_t port_info; /* passed to CTL */
void *onoff_arg; /* passed to CTL */
- targ_func_t targ_enable; /* passed to CTL */
- targ_func_t targ_disable; /* passed to CTL */
lun_func_t lun_enable; /* passed to CTL */
lun_func_t lun_disable; /* passed to CTL */
- fe_ioctl_t ioctl; /* passed to CTL */
- fe_devid_t devid; /* passed to CTL */
+ lun_map_func_t lun_map; /* passed to CTL */
void *targ_lun_arg; /* passed to CTL */
void (*fe_datamove)(union ctl_io *io); /* passed to CTL */
void (*fe_done)(union ctl_io *io); /* passed to CTL */
- void (*fe_dump)(void); /* passed to CTL */
int max_targets; /* passed to CTL */
int max_target_id; /* passed to CTL */
int32_t targ_port; /* passed back to FETD */
void *ctl_pool_ref; /* passed back to FETD */
uint32_t max_initiators; /* passed back to FETD */
+ struct ctl_wwpn_iid *wwpn_iid; /* used by CTL */
uint64_t wwnn; /* set by CTL before online */
uint64_t wwpn; /* set by CTL before online */
ctl_port_status status; /* used by CTL */
+ ctl_options_t options; /* passed to CTL */
+ struct ctl_devid *port_devid; /* passed to CTL */
+ struct ctl_devid *target_devid; /* passed to CTL */
+ struct ctl_devid *init_devid; /* passed to CTL */
+ STAILQ_ENTRY(ctl_port) fe_links; /* used by CTL */
+ STAILQ_ENTRY(ctl_port) links; /* used by CTL */
+};
+
+struct ctl_frontend {
+ char name[CTL_DRIVER_NAME_LEN]; /* passed to CTL */
+ fe_init_t init; /* passed to CTL */
+ fe_ioctl_t ioctl; /* passed to CTL */
+ void (*fe_dump)(void); /* passed to CTL */
+ fe_shutdown_t shutdown; /* passed to CTL */
+ STAILQ_HEAD(, ctl_port) port_list; /* used by CTL */
STAILQ_ENTRY(ctl_frontend) links; /* used by CTL */
};
@@ -237,7 +261,7 @@ struct ctl_frontend {
* This may block until resources are allocated. Called at FETD module load
* time. Returns 0 for success, non-zero for failure.
*/
-int ctl_frontend_register(struct ctl_frontend *fe, int master_SC);
+int ctl_frontend_register(struct ctl_frontend *fe);
/*
* Called at FETD module unload time.
@@ -246,20 +270,37 @@ int ctl_frontend_register(struct ctl_frontend *fe, int master_SC);
int ctl_frontend_deregister(struct ctl_frontend *fe);
/*
+ * Find the frontend by its name. Returns NULL if not found.
+ */
+struct ctl_frontend * ctl_frontend_find(char *frontend_name);
+
+/*
+ * This may block until resources are allocated. Called at FETD module load
+ * time. Returns 0 for success, non-zero for failure.
+ */
+int ctl_port_register(struct ctl_port *port, int master_SC);
+
+/*
+ * Called at FETD module unload time.
+ * Returns 0 for success, non-zero for failure.
+ */
+int ctl_port_deregister(struct ctl_port *port);
+
+/*
* Called to set the WWNN and WWPN for a particular frontend.
*/
-void ctl_frontend_set_wwns(struct ctl_frontend *fe, int wwnn_valid,
+void ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid,
uint64_t wwnn, int wwpn_valid, uint64_t wwpn);
/*
* Called to bring a particular frontend online.
*/
-void ctl_frontend_online(struct ctl_frontend *fe);
+void ctl_port_online(struct ctl_port *fe);
/*
* Called to take a particular frontend offline.
*/
-void ctl_frontend_offline(struct ctl_frontend *fe);
+void ctl_port_offline(struct ctl_port *fe);
/*
* This routine queues I/O and task management requests from the FETD to the
@@ -280,21 +321,18 @@ int ctl_queue(union ctl_io *io);
int ctl_queue_sense(union ctl_io *io);
/*
- * This routine adds an initiator to CTL's port database. The WWPN should
- * be the FC WWPN, if available. The targ_port field should be the same as
- * the targ_port passed back from CTL in the ctl_frontend structure above.
+ * This routine adds an initiator to CTL's port database.
* The iid field should be the same as the iid passed in the nexus of each
* ctl_io from this initiator.
+ * The WWPN should be the FC WWPN, if available.
*/
-int ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid);
+int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name);
/*
- * This routine will remove an initiator from CTL's port database. The
- * targ_port field should be the same as the targ_port passed back in the
- * ctl_frontend structure above. The iid field should be the same as the
- * iid passed in the nexus of each ctl_io from this initiator.
+ * This routine will remove an initiator from CTL's port database.
+ * The iid field should be the same as the iid passed in the nexus of each
+ * ctl_io from this initiator.
*/
-int
-ctl_remove_initiator(int32_t targ_port, uint32_t iid);
+int ctl_remove_initiator(struct ctl_port *port, int iid);
#endif /* _CTL_FRONTEND_H_ */
diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c
index 37366e5685431..72f8dd81c8b48 100644
--- a/sys/cam/ctl/ctl_frontend_cam_sim.c
+++ b/sys/cam/ctl/ctl_frontend_cam_sim.c
@@ -74,13 +74,12 @@ struct cfcs_io {
};
struct cfcs_softc {
- struct ctl_frontend fe;
+ struct ctl_port port;
char port_name[32];
struct cam_sim *sim;
struct cam_devq *devq;
struct cam_path *path;
struct mtx lock;
- char lock_desc[32];
uint64_t wwnn;
uint64_t wwpn;
uint32_t cur_tag_num;
@@ -97,12 +96,9 @@ struct cfcs_softc {
CAM_SENSE_PHYS)
int cfcs_init(void);
-void cfcs_shutdown(void);
static void cfcs_poll(struct cam_sim *sim);
static void cfcs_online(void *arg);
static void cfcs_offline(void *arg);
-static int cfcs_targ_enable(void *arg, struct ctl_id targ_id);
-static int cfcs_targ_disable(void *arg, struct ctl_id targ_id);
static int cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
static int cfcs_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
static void cfcs_datamove(union ctl_io *io);
@@ -124,25 +120,19 @@ SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD, 0,
SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW,
&cfcs_max_sense, 0, "Maximum sense data size");
-static int cfcs_module_event_handler(module_t, int /*modeventtype_t*/, void *);
-
-static moduledata_t cfcs_moduledata = {
- "ctlcfcs",
- cfcs_module_event_handler,
- NULL
+static struct ctl_frontend cfcs_frontend =
+{
+ .name = "camsim",
+ .init = cfcs_init,
};
-
-DECLARE_MODULE(ctlcfcs, cfcs_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
-MODULE_VERSION(ctlcfcs, 1);
-MODULE_DEPEND(ctlcfi, ctl, 1, 1, 1);
-MODULE_DEPEND(ctlcfi, cam, 1, 1, 1);
+CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend);
int
cfcs_init(void)
{
struct cfcs_softc *softc;
struct ccb_setasync csa;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
#ifdef NEEDTOPORT
char wwnn[8];
#endif
@@ -151,34 +141,32 @@ cfcs_init(void)
softc = &cfcs_softc;
retval = 0;
bzero(softc, sizeof(*softc));
- sprintf(softc->lock_desc, "ctl2cam");
- mtx_init(&softc->lock, softc->lock_desc, NULL, MTX_DEF);
- fe = &softc->fe;
+ mtx_init(&softc->lock, "ctl2cam", NULL, MTX_DEF);
+ port = &softc->port;
- fe->port_type = CTL_PORT_INTERNAL;
+ port->frontend = &cfcs_frontend;
+ port->port_type = CTL_PORT_INTERNAL;
/* XXX KDM what should the real number be here? */
- fe->num_requested_ctl_io = 4096;
- snprintf(softc->port_name, sizeof(softc->port_name), "ctl2cam");
- fe->port_name = softc->port_name;
- fe->port_online = cfcs_online;
- fe->port_offline = cfcs_offline;
- fe->onoff_arg = softc;
- fe->targ_enable = cfcs_targ_enable;
- fe->targ_disable = cfcs_targ_disable;
- fe->lun_enable = cfcs_lun_enable;
- fe->lun_disable = cfcs_lun_disable;
- fe->targ_lun_arg = softc;
- fe->fe_datamove = cfcs_datamove;
- fe->fe_done = cfcs_done;
+ port->num_requested_ctl_io = 4096;
+ snprintf(softc->port_name, sizeof(softc->port_name), "camsim");
+ port->port_name = softc->port_name;
+ port->port_online = cfcs_online;
+ port->port_offline = cfcs_offline;
+ port->onoff_arg = softc;
+ port->lun_enable = cfcs_lun_enable;
+ port->lun_disable = cfcs_lun_disable;
+ port->targ_lun_arg = softc;
+ port->fe_datamove = cfcs_datamove;
+ port->fe_done = cfcs_done;
/* XXX KDM what should we report here? */
/* XXX These should probably be fetched from CTL. */
- fe->max_targets = 1;
- fe->max_target_id = 15;
+ port->max_targets = 1;
+ port->max_target_id = 15;
- retval = ctl_frontend_register(fe, /*master_SC*/ 1);
+ retval = ctl_port_register(port, /*master_SC*/ 1);
if (retval != 0) {
- printf("%s: ctl_frontend_register() failed with error %d!\n",
+ printf("%s: ctl_port_register() failed with error %d!\n",
__func__, retval);
mtx_destroy(&softc->lock);
return (retval);
@@ -190,30 +178,29 @@ cfcs_init(void)
#ifdef NEEDTOPORT
ddb_GetWWNN((char *)wwnn);
softc->wwnn = be64dec(wwnn);
- softc->wwpn = softc->wwnn + (softc->fe.targ_port & 0xff);
+ softc->wwpn = softc->wwnn + (softc->port.targ_port & 0xff);
#endif
/*
* If the CTL frontend didn't tell us what our WWNN/WWPN is, go
* ahead and set something random.
*/
- if (fe->wwnn == 0) {
+ if (port->wwnn == 0) {
uint64_t random_bits;
arc4rand(&random_bits, sizeof(random_bits), 0);
softc->wwnn = (random_bits & 0x0000000fffffff00ULL) |
/* Company ID */ 0x5000000000000000ULL |
/* NL-Port */ 0x0300;
- softc->wwpn = softc->wwnn + fe->targ_port + 1;
- fe->wwnn = softc->wwnn;
- fe->wwpn = softc->wwpn;
+ softc->wwpn = softc->wwnn + port->targ_port + 1;
+ ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn);
} else {
- softc->wwnn = fe->wwnn;
- softc->wwpn = fe->wwpn;
+ softc->wwnn = port->wwnn;
+ softc->wwpn = port->wwpn;
}
mtx_lock(&softc->lock);
- softc->devq = cam_simq_alloc(fe->num_requested_ctl_io);
+ softc->devq = cam_simq_alloc(port->num_requested_ctl_io);
if (softc->devq == NULL) {
printf("%s: error allocating devq\n", __func__);
retval = ENOMEM;
@@ -222,7 +209,7 @@ cfcs_init(void)
softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name,
softc, /*unit*/ 0, &softc->lock, 1,
- fe->num_requested_ctl_io, softc->devq);
+ port->num_requested_ctl_io, softc->devq);
if (softc->sim == NULL) {
printf("%s: error allocating SIM\n", __func__);
retval = ENOMEM;
@@ -273,26 +260,6 @@ cfcs_poll(struct cam_sim *sim)
}
-void
-cfcs_shutdown(void)
-{
-
-}
-
-static int
-cfcs_module_event_handler(module_t mod, int what, void *arg)
-{
-
- switch (what) {
- case MOD_LOAD:
- return (cfcs_init());
- case MOD_UNLOAD:
- return (EBUSY);
- default:
- return (EOPNOTSUPP);
- }
-}
-
static void
cfcs_onoffline(void *arg, int online)
{
@@ -336,18 +303,6 @@ cfcs_offline(void *arg)
}
static int
-cfcs_targ_enable(void *arg, struct ctl_id targ_id)
-{
- return (0);
-}
-
-static int
-cfcs_targ_disable(void *arg, struct ctl_id targ_id)
-{
- return (0);
-}
-
-static int
cfcs_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
{
return (0);
@@ -505,6 +460,10 @@ cfcs_done(union ctl_io *io)
union ccb *ccb;
ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ if (ccb == NULL) {
+ ctl_free_io(io);
+ return;
+ }
/*
* At this point we should have status. If we don't, that's a bug.
@@ -586,7 +545,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
return;
}
- io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(softc->port.ctl_pool_ref);
if (io == NULL) {
printf("%s: can't allocate ctl_io\n", __func__);
ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
@@ -605,7 +564,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
*/
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.nexus.initid.id = 1;
- io->io_hdr.nexus.targ_port = softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = softc->port.targ_port;
/*
* XXX KDM how do we handle target IDs?
*/
@@ -681,7 +640,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
return;
}
- io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(softc->port.ctl_pool_ref);
if (io == NULL) {
ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
xpt_freeze_devq(ccb->ccb_h.path, 1);
@@ -696,7 +655,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = 1;
- io->io_hdr.nexus.targ_port = softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
io->taskio.task_action = CTL_TASK_ABORT_TASK;
@@ -753,7 +712,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
fc->bitrate = 800000;
fc->wwnn = softc->wwnn;
fc->wwpn = softc->wwpn;
- fc->port = softc->fe.targ_port;
+ fc->port = softc->port.targ_port;
fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN |
CTS_FC_VALID_PORT;
ccb->ccb_h.status = CAM_REQ_CMP;
@@ -776,7 +735,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
return;
}
- io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(softc->port.ctl_pool_ref);
if (io == NULL) {
ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
xpt_freeze_devq(ccb->ccb_h.path, 1);
@@ -786,12 +745,13 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
ctl_zero_io(io);
/* Save pointers on both sides */
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb;
+ if (ccb->ccb_h.func_code == XPT_RESET_DEV)
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb;
ccb->ccb_h.io_ptr = io;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = 0;
- io->io_hdr.nexus.targ_port = softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
if (ccb->ccb_h.func_code == XPT_RESET_BUS)
@@ -845,7 +805,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
cpi->transport_version = 0;
cpi->xport_specific.fc.wwnn = softc->wwnn;
cpi->xport_specific.fc.wwpn = softc->wwpn;
- cpi->xport_specific.fc.port = softc->fe.targ_port;
+ cpi->xport_specific.fc.port = softc->port.targ_port;
cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000;
cpi->ccb_h.status = CAM_REQ_CMP;
break;
diff --git a/sys/cam/ctl/ctl_frontend_internal.c b/sys/cam/ctl/ctl_frontend_internal.c
index 85eec748221af..ac972f306420b 100644
--- a/sys/cam/ctl/ctl_frontend_internal.c
+++ b/sys/cam/ctl/ctl_frontend_internal.c
@@ -173,7 +173,7 @@ typedef enum {
} cfi_flags;
struct cfi_softc {
- struct ctl_frontend fe;
+ struct ctl_port port;
char fe_name[40];
struct mtx lock;
cfi_flags flags;
@@ -192,8 +192,6 @@ int cfi_init(void);
void cfi_shutdown(void) __unused;
static void cfi_online(void *arg);
static void cfi_offline(void *arg);
-static int cfi_targ_enable(void *arg, struct ctl_id targ_id);
-static int cfi_targ_disable(void *arg, struct ctl_id targ_id);
static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
static void cfi_datamove(union ctl_io *io);
@@ -216,28 +214,24 @@ static void cfi_metatask_io_done(union ctl_io *io);
static void cfi_err_recovery_done(union ctl_io *io);
static void cfi_lun_io_done(union ctl_io *io);
-static int cfi_module_event_handler(module_t, int /*modeventtype_t*/, void *);
-
-static moduledata_t cfi_moduledata = {
- "ctlcfi",
- cfi_module_event_handler,
- NULL
+static struct ctl_frontend cfi_frontend =
+{
+ .name = "kernel",
+ .init = cfi_init,
+ .shutdown = cfi_shutdown,
};
-
-DECLARE_MODULE(ctlcfi, cfi_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
-MODULE_VERSION(ctlcfi, 1);
-MODULE_DEPEND(ctlcfi, ctl, 1, 1, 1);
+CTL_FRONTEND_DECLARE(ctlcfi, cfi_frontend);
int
cfi_init(void)
{
struct cfi_softc *softc;
- struct ctl_frontend *fe;
+ struct ctl_port *port;
int retval;
softc = &fetd_internal_softc;
- fe = &softc->fe;
+ port = &softc->port;
retval = 0;
@@ -254,24 +248,23 @@ cfi_init(void)
STAILQ_INIT(&softc->lun_list);
STAILQ_INIT(&softc->metatask_list);
- sprintf(softc->fe_name, "CTL internal");
- fe->port_type = CTL_PORT_INTERNAL;
- fe->num_requested_ctl_io = 100;
- fe->port_name = softc->fe_name;
- fe->port_online = cfi_online;
- fe->port_offline = cfi_offline;
- fe->onoff_arg = softc;
- fe->targ_enable = cfi_targ_enable;
- fe->targ_disable = cfi_targ_disable;
- fe->lun_enable = cfi_lun_enable;
- fe->lun_disable = cfi_lun_disable;
- fe->targ_lun_arg = softc;
- fe->fe_datamove = cfi_datamove;
- fe->fe_done = cfi_done;
- fe->max_targets = 15;
- fe->max_target_id = 15;
+ sprintf(softc->fe_name, "kernel");
+ port->frontend = &cfi_frontend;
+ port->port_type = CTL_PORT_INTERNAL;
+ port->num_requested_ctl_io = 100;
+ port->port_name = softc->fe_name;
+ port->port_online = cfi_online;
+ port->port_offline = cfi_offline;
+ port->onoff_arg = softc;
+ port->lun_enable = cfi_lun_enable;
+ port->lun_disable = cfi_lun_disable;
+ port->targ_lun_arg = softc;
+ port->fe_datamove = cfi_datamove;
+ port->fe_done = cfi_done;
+ port->max_targets = 15;
+ port->max_target_id = 15;
- if (ctl_frontend_register(fe, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
+ if (ctl_port_register(port, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
{
printf("%s: internal frontend registration failed\n", __func__);
return (0);
@@ -295,27 +288,13 @@ cfi_shutdown(void)
/*
* XXX KDM need to clear out any I/O pending on each LUN.
*/
- if (ctl_frontend_deregister(&softc->fe) != 0)
+ if (ctl_port_deregister(&softc->port) != 0)
printf("%s: ctl_frontend_deregister() failed\n", __func__);
uma_zdestroy(cfi_lun_zone);
uma_zdestroy(cfi_metatask_zone);
}
-static int
-cfi_module_event_handler(module_t mod, int what, void *arg)
-{
-
- switch (what) {
- case MOD_LOAD:
- return (cfi_init());
- case MOD_UNLOAD:
- return (EBUSY);
- default:
- return (EOPNOTSUPP);
- }
-}
-
static void
cfi_online(void *arg)
{
@@ -347,18 +326,6 @@ cfi_offline(void *arg)
}
static int
-cfi_targ_enable(void *arg, struct ctl_id targ_id)
-{
- return (0);
-}
-
-static int
-cfi_targ_disable(void *arg, struct ctl_id targ_id)
-{
- return (0);
-}
-
-static int
cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
{
struct cfi_softc *softc;
@@ -734,7 +701,7 @@ cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
struct cfi_lun_io *lun_io;
io->io_hdr.nexus.initid.id = 7;
- io->io_hdr.nexus.targ_port = lun->softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = lun->softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = lun->target_id.id;
io->io_hdr.nexus.targ_lun = lun->lun_id;
io->io_hdr.retries = retries;
@@ -795,7 +762,7 @@ cfi_done(union ctl_io *io)
union ctl_io *new_io;
struct cfi_lun_io *new_lun_io;
- new_io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ new_io = ctl_alloc_io(softc->port.ctl_pool_ref);
if (new_io == NULL) {
printf("%s: unable to allocate ctl_io for "
"error recovery\n", __func__);
@@ -1001,7 +968,7 @@ cfi_lun_probe(struct cfi_lun *lun, int have_lock)
struct cfi_lun_io *lun_io;
union ctl_io *io;
- io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
if (io == NULL) {
printf("%s: unable to alloc ctl_io for target %ju "
"lun %d probe\n", __func__,
@@ -1048,7 +1015,7 @@ cfi_lun_probe(struct cfi_lun *lun, int have_lock)
uint8_t *dataptr;
union ctl_io *io;
- io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
if (io == NULL) {
printf("%s: unable to alloc ctl_io for target %ju "
"lun %d probe\n", __func__,
@@ -1429,7 +1396,7 @@ cfi_action(struct cfi_metatask *metatask)
if (SID_TYPE(&lun->inq_data) != T_DIRECT)
continue;
da_luns++;
- io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(softc->port.ctl_pool_ref);
if (io != NULL) {
ios_allocated++;
STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
@@ -1583,7 +1550,7 @@ cfi_action(struct cfi_metatask *metatask)
}
- io = ctl_alloc_io(softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(softc->port.ctl_pool_ref);
if (io == NULL) {
metatask->status = CFI_MT_ERROR;
metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c
index fbe46b40a5c44..8953ece5fd029 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.c
+++ b/sys/cam/ctl/ctl_frontend_iscsi.c
@@ -85,19 +85,15 @@ static uma_zone_t cfiscsi_data_wait_zone;
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0,
"CAM Target Layer iSCSI Frontend");
static int debug = 3;
-TUNABLE_INT("kern.cam.ctl.iscsi.debug", &debug);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN,
&debug, 1, "Enable debug messages");
static int ping_timeout = 5;
-TUNABLE_INT("kern.cam.ctl.iscsi.ping_timeout", &ping_timeout);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN,
&ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds");
static int login_timeout = 60;
-TUNABLE_INT("kern.cam.ctl.iscsi.login_timeout", &login_timeout);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN,
&login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds");
static int maxcmdsn_delta = 256;
-TUNABLE_INT("kern.cam.ctl.iscsi.maxcmdsn_delta", &maxcmdsn_delta);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN,
&maxcmdsn_delta, 256, "Number of commands the initiator can send "
"without confirmation");
@@ -149,18 +145,16 @@ SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN,
int cfiscsi_init(void);
static void cfiscsi_online(void *arg);
static void cfiscsi_offline(void *arg);
-static int cfiscsi_targ_enable(void *arg, struct ctl_id targ_id);
-static int cfiscsi_targ_disable(void *arg, struct ctl_id targ_id);
+static int cfiscsi_info(void *arg, struct sbuf *sb);
static int cfiscsi_lun_enable(void *arg,
struct ctl_id target_id, int lun_id);
static int cfiscsi_lun_disable(void *arg,
struct ctl_id target_id, int lun_id);
+static uint32_t cfiscsi_lun_map(void *arg, uint32_t lun);
static int cfiscsi_ioctl(struct cdev *dev,
u_long cmd, caddr_t addr, int flag, struct thread *td);
-static int cfiscsi_devid(struct ctl_scsiio *ctsio, int alloc_len);
static void cfiscsi_datamove(union ctl_io *io);
static void cfiscsi_done(union ctl_io *io);
-static uint32_t cfiscsi_map_lun(void *arg, uint32_t lun);
static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request);
static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request);
static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request);
@@ -170,23 +164,21 @@ static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request);
static void cfiscsi_session_terminate(struct cfiscsi_session *cs);
static struct cfiscsi_target *cfiscsi_target_find(struct cfiscsi_softc
*softc, const char *name);
+static struct cfiscsi_target *cfiscsi_target_find_or_create(
+ struct cfiscsi_softc *softc, const char *name, const char *alias);
static void cfiscsi_target_release(struct cfiscsi_target *ct);
static void cfiscsi_session_delete(struct cfiscsi_session *cs);
static struct cfiscsi_softc cfiscsi_softc;
extern struct ctl_softc *control_softc;
-static int cfiscsi_module_event_handler(module_t, int /*modeventtype_t*/, void *);
-
-static moduledata_t cfiscsi_moduledata = {
- "ctlcfiscsi",
- cfiscsi_module_event_handler,
- NULL
+static struct ctl_frontend cfiscsi_frontend =
+{
+ .name = "iscsi",
+ .init = cfiscsi_init,
+ .ioctl = cfiscsi_ioctl,
};
-
-DECLARE_MODULE(ctlcfiscsi, cfiscsi_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
-MODULE_VERSION(ctlcfiscsi, 1);
-MODULE_DEPEND(ctlcfiscsi, ctl, 1, 1, 1);
+CTL_FRONTEND_DECLARE(ctlcfiscsi, cfiscsi_frontend);
MODULE_DEPEND(ctlcfiscsi, icl, 1, 1, 1);
static struct icl_pdu *
@@ -547,7 +539,7 @@ cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request)
cfiscsi_session_terminate(cs);
return;
}
- io = ctl_alloc_io(cs->cs_target->ct_softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
if (io == NULL) {
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io; "
"dropping connection");
@@ -559,11 +551,9 @@ cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request)
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
- io->io_hdr.nexus.targ_port = cs->cs_target->ct_softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
io->io_hdr.nexus.targ_lun = cfiscsi_decode_lun(bhssc->bhssc_lun);
- io->io_hdr.nexus.lun_map_fn = cfiscsi_map_lun;
- io->io_hdr.nexus.lun_map_arg = cs;
io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag;
switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) {
case BHSSC_FLAGS_ATTR_UNTAGGED:
@@ -613,7 +603,7 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
cs = PDU_SESSION(request);
bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs;
- io = ctl_alloc_io(cs->cs_target->ct_softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
if (io == NULL) {
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io;"
"dropping connection");
@@ -625,11 +615,9 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
- io->io_hdr.nexus.targ_port = cs->cs_target->ct_softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
io->io_hdr.nexus.targ_lun = cfiscsi_decode_lun(bhstmr->bhstmr_lun);
- io->io_hdr.nexus.lun_map_fn = cfiscsi_map_lun;
- io->io_hdr.nexus.lun_map_arg = cs;
io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
switch (bhstmr->bhstmr_function & ~0x80) {
@@ -640,6 +628,12 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
io->taskio.task_action = CTL_TASK_ABORT_TASK;
io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag;
break;
+ case BHSTMR_FUNCTION_ABORT_TASK_SET:
+#if 0
+ CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET");
+#endif
+ io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
+ break;
case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET");
@@ -737,12 +731,15 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
buffer_offset = ntohl(bhsdo->bhsdo_buffer_offset);
else
buffer_offset = 0;
+ len = icl_pdu_data_segment_length(request);
/*
* Make sure the offset, as sent by the initiator, matches the offset
* we're supposed to be at in the scatter-gather list.
*/
- if (buffer_offset !=
+ if (buffer_offset >
+ io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled ||
+ buffer_offset + len <=
io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled) {
CFISCSI_SESSION_WARN(cs, "received bad buffer offset %zd, "
"expected %zd; dropping connection", buffer_offset,
@@ -758,8 +755,8 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
* to buffer_offset, which is the offset within the task (SCSI
* command).
*/
- off = 0;
- len = icl_pdu_data_segment_length(request);
+ off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled -
+ buffer_offset;
/*
* Iterate over the scatter/gather segments, filling them with data
@@ -816,12 +813,8 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
* This obviously can only happen with SCSI Command PDU.
*/
if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) ==
- ISCSI_BHS_OPCODE_SCSI_COMMAND) {
- CFISCSI_SESSION_DEBUG(cs, "received too much immediate "
- "data: got %zd bytes, expected %zd",
- icl_pdu_data_segment_length(request), off);
+ ISCSI_BHS_OPCODE_SCSI_COMMAND)
return (true);
- }
CFISCSI_SESSION_WARN(cs, "received too much data: got %zd bytes, "
"expected %zd; dropping connection",
@@ -1043,71 +1036,61 @@ cfiscsi_callout(void *context)
static void
cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
{
- struct cfiscsi_data_wait *cdw, *tmpcdw;
+ struct cfiscsi_data_wait *cdw;
union ctl_io *io;
- int error;
+ int error, last;
-#ifdef notyet
- io = ctl_alloc_io(cs->cs_target->ct_softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
if (io == NULL) {
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io");
return;
}
ctl_zero_io(io);
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
+ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
- io->io_hdr.nexus.targ_port = cs->cs_target->ct_softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
- io->io_hdr.nexus.targ_lun = lun;
+ io->io_hdr.nexus.targ_lun = 0;
io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
- io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
+ io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
+ refcount_acquire(&cs->cs_outstanding_ctl_pdus);
error = ctl_queue(io);
if (error != CTL_RETVAL_COMPLETE) {
CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error);
+ refcount_release(&cs->cs_outstanding_ctl_pdus);
ctl_free_io(io);
}
-#else
- /*
- * CTL doesn't currently support CTL_TASK_ABORT_TASK_SET, so instead
- * just iterate over tasks that are waiting for something - data - and
- * terminate those.
- */
+
CFISCSI_SESSION_LOCK(cs);
- TAILQ_FOREACH_SAFE(cdw,
- &cs->cs_waiting_for_data_out, cdw_next, tmpcdw) {
- io = ctl_alloc_io(cs->cs_target->ct_softc->fe.ctl_pool_ref);
- if (io == NULL) {
- CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io");
- return;
- }
- ctl_zero_io(io);
- io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
- io->io_hdr.io_type = CTL_IO_TASK;
- io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
- io->io_hdr.nexus.targ_port =
- cs->cs_target->ct_softc->fe.targ_port;
- io->io_hdr.nexus.targ_target.id = 0;
- //io->io_hdr.nexus.targ_lun = lun; /* Not needed? */
- io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
- io->taskio.task_action = CTL_TASK_ABORT_TASK;
- io->taskio.tag_num = cdw->cdw_initiator_task_tag;
- error = ctl_queue(io);
- if (error != CTL_RETVAL_COMPLETE) {
- CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error);
- ctl_free_io(io);
- return;
- }
-#if 0
- CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task tag "
- "0x%x", cdw->cdw_initiator_task_tag);
-#endif
- cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io);
+ while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) {
TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next);
+ CFISCSI_SESSION_UNLOCK(cs);
+ /*
+ * Set nonzero port status; this prevents backends from
+ * assuming that the data transfer actually succeeded
+ * and writing uninitialized data to disk.
+ */
+ cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42;
+ cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io);
uma_zfree(cfiscsi_data_wait_zone, cdw);
+ CFISCSI_SESSION_LOCK(cs);
}
CFISCSI_SESSION_UNLOCK(cs);
-#endif
+
+ /*
+ * Wait for CTL to terminate all the tasks.
+ */
+ for (;;) {
+ refcount_acquire(&cs->cs_outstanding_ctl_pdus);
+ last = refcount_release(&cs->cs_outstanding_ctl_pdus);
+ if (last != 0)
+ break;
+ CFISCSI_SESSION_WARN(cs, "waiting for CTL to terminate tasks, "
+ "%d remaining", cs->cs_outstanding_ctl_pdus);
+ tsleep(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus),
+ 0, "cfiscsi_terminate", hz / 100);
+ }
}
static void
@@ -1124,21 +1107,22 @@ cfiscsi_maintenance_thread(void *arg)
CFISCSI_SESSION_UNLOCK(cs);
if (cs->cs_terminating) {
- cfiscsi_session_terminate_tasks(cs);
- callout_drain(&cs->cs_callout);
+ /*
+ * We used to wait up to 30 seconds to deliver queued
+ * PDUs to the initiator. We also tried hard to deliver
+ * SCSI Responses for the aborted PDUs. We don't do
+ * that anymore. We might need to revisit that.
+ */
+ callout_drain(&cs->cs_callout);
icl_conn_shutdown(cs->cs_conn);
icl_conn_close(cs->cs_conn);
- cs->cs_terminating++;
-
/*
- * XXX: We used to wait up to 30 seconds to deliver queued PDUs
- * to the initiator. We also tried hard to deliver SCSI Responses
- * for the aborted PDUs. We don't do that anymore. We might need
- * to revisit that.
+ * At this point ICL receive thread is no longer
+ * running; no new tasks can be queued.
*/
-
+ cfiscsi_session_terminate_tasks(cs);
cfiscsi_session_delete(cs);
kthread_exit();
return;
@@ -1151,9 +1135,9 @@ static void
cfiscsi_session_terminate(struct cfiscsi_session *cs)
{
- if (cs->cs_terminating != 0)
+ if (cs->cs_terminating)
return;
- cs->cs_terminating = 1;
+ cs->cs_terminating = true;
cv_signal(&cs->cs_maintenance_cv);
#ifdef ICL_KERNEL_PROXY
cv_signal(&cs->cs_login_cv);
@@ -1163,41 +1147,25 @@ cfiscsi_session_terminate(struct cfiscsi_session *cs)
static int
cfiscsi_session_register_initiator(struct cfiscsi_session *cs)
{
- int error, i;
- struct cfiscsi_softc *softc;
+ struct cfiscsi_target *ct;
+ char *name;
+ int i;
KASSERT(cs->cs_ctl_initid == -1, ("already registered"));
- softc = &cfiscsi_softc;
-
- mtx_lock(&softc->lock);
- for (i = 0; i < softc->max_initiators; i++) {
- if (softc->ctl_initids[i] == 0)
- break;
- }
- if (i == softc->max_initiators) {
- CFISCSI_SESSION_WARN(cs, "too many concurrent sessions (%d)",
- softc->max_initiators);
- mtx_unlock(&softc->lock);
+ ct = cs->cs_target;
+ name = strdup(cs->cs_initiator_id, M_CTL);
+ i = ctl_add_initiator(&ct->ct_port, -1, 0, name);
+ if (i < 0) {
+ CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d",
+ i);
+ cs->cs_ctl_initid = -1;
return (1);
}
- softc->ctl_initids[i] = 1;
- mtx_unlock(&softc->lock);
-
+ cs->cs_ctl_initid = i;
#if 0
- CFISCSI_SESSION_DEBUG(cs, "adding initiator id %d, max %d",
- i, softc->max_initiators);
+ CFISCSI_SESSION_DEBUG(cs, "added initiator id %d", i);
#endif
- cs->cs_ctl_initid = i;
- error = ctl_add_initiator(0x0, softc->fe.targ_port, cs->cs_ctl_initid);
- if (error != 0) {
- CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d", error);
- mtx_lock(&softc->lock);
- softc->ctl_initids[cs->cs_ctl_initid] = 0;
- mtx_unlock(&softc->lock);
- cs->cs_ctl_initid = -1;
- return (1);
- }
return (0);
}
@@ -1206,21 +1174,15 @@ static void
cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs)
{
int error;
- struct cfiscsi_softc *softc;
if (cs->cs_ctl_initid == -1)
return;
- softc = &cfiscsi_softc;
-
- error = ctl_remove_initiator(softc->fe.targ_port, cs->cs_ctl_initid);
+ error = ctl_remove_initiator(&cs->cs_target->ct_port, cs->cs_ctl_initid);
if (error != 0) {
CFISCSI_SESSION_WARN(cs, "ctl_remove_initiator failed with error %d",
error);
}
- mtx_lock(&softc->lock);
- softc->ctl_initids[cs->cs_ctl_initid] = 0;
- mtx_unlock(&softc->lock);
cs->cs_ctl_initid = -1;
}
@@ -1304,7 +1266,6 @@ int
cfiscsi_init(void)
{
struct cfiscsi_softc *softc;
- struct ctl_frontend *fe;
int retval;
softc = &cfiscsi_softc;
@@ -1318,62 +1279,11 @@ cfiscsi_init(void)
TAILQ_INIT(&softc->sessions);
TAILQ_INIT(&softc->targets);
- fe = &softc->fe;
- fe->port_type = CTL_PORT_ISCSI;
- /* XXX KDM what should the real number be here? */
- fe->num_requested_ctl_io = 4096;
- snprintf(softc->port_name, sizeof(softc->port_name), "iscsi");
- fe->port_name = softc->port_name;
- fe->port_online = cfiscsi_online;
- fe->port_offline = cfiscsi_offline;
- fe->onoff_arg = softc;
- fe->targ_enable = cfiscsi_targ_enable;
- fe->targ_disable = cfiscsi_targ_disable;
- fe->lun_enable = cfiscsi_lun_enable;
- fe->lun_disable = cfiscsi_lun_disable;
- fe->targ_lun_arg = softc;
- fe->ioctl = cfiscsi_ioctl;
- fe->devid = cfiscsi_devid;
- fe->fe_datamove = cfiscsi_datamove;
- fe->fe_done = cfiscsi_done;
-
- /* XXX KDM what should we report here? */
- /* XXX These should probably be fetched from CTL. */
- fe->max_targets = 1;
- fe->max_target_id = 15;
-
- retval = ctl_frontend_register(fe, /*master_SC*/ 1);
- if (retval != 0) {
- CFISCSI_WARN("ctl_frontend_register() failed with error %d",
- retval);
- retval = 1;
- goto bailout;
- }
-
- softc->max_initiators = fe->max_initiators;
-
cfiscsi_data_wait_zone = uma_zcreate("cfiscsi_data_wait",
sizeof(struct cfiscsi_data_wait), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
return (0);
-
-bailout:
- return (retval);
-}
-
-static int
-cfiscsi_module_event_handler(module_t mod, int what, void *arg)
-{
-
- switch (what) {
- case MOD_LOAD:
- return (cfiscsi_init());
- case MOD_UNLOAD:
- return (EBUSY);
- default:
- return (EOPNOTSUPP);
- }
}
#ifdef ICL_KERNEL_PROXY
@@ -1400,10 +1310,23 @@ static void
cfiscsi_online(void *arg)
{
struct cfiscsi_softc *softc;
+ struct cfiscsi_target *ct;
+ int online;
+
+ ct = (struct cfiscsi_target *)arg;
+ softc = ct->ct_softc;
- softc = (struct cfiscsi_softc *)arg;
+ mtx_lock(&softc->lock);
+ if (ct->ct_online) {
+ mtx_unlock(&softc->lock);
+ return;
+ }
+ ct->ct_online = 1;
+ online = softc->online++;
+ mtx_unlock(&softc->lock);
+ if (online > 0)
+ return;
- softc->online = 1;
#ifdef ICL_KERNEL_PROXY
if (softc->listener != NULL)
icl_listen_free(softc->listener);
@@ -1415,16 +1338,28 @@ static void
cfiscsi_offline(void *arg)
{
struct cfiscsi_softc *softc;
+ struct cfiscsi_target *ct;
struct cfiscsi_session *cs;
+ int online;
- softc = (struct cfiscsi_softc *)arg;
-
- softc->online = 0;
+ ct = (struct cfiscsi_target *)arg;
+ softc = ct->ct_softc;
mtx_lock(&softc->lock);
- TAILQ_FOREACH(cs, &softc->sessions, cs_next)
- cfiscsi_session_terminate(cs);
+ if (!ct->ct_online) {
+ mtx_unlock(&softc->lock);
+ return;
+ }
+ ct->ct_online = 0;
+ online = --softc->online;
+
+ TAILQ_FOREACH(cs, &softc->sessions, cs_next) {
+ if (cs->cs_target == ct)
+ cfiscsi_session_terminate(cs);
+ }
mtx_unlock(&softc->lock);
+ if (online > 0)
+ return;
#ifdef ICL_KERNEL_PROXY
icl_listen_free(softc->listener);
@@ -1433,24 +1368,21 @@ cfiscsi_offline(void *arg)
}
static int
-cfiscsi_targ_enable(void *arg, struct ctl_id targ_id)
-{
-
- return (0);
-}
-
-static int
-cfiscsi_targ_disable(void *arg, struct ctl_id targ_id)
+cfiscsi_info(void *arg, struct sbuf *sb)
{
+ struct cfiscsi_target *ct = (struct cfiscsi_target *)arg;
+ int retval;
- return (0);
+ retval = sbuf_printf(sb, "\t<cfiscsi_state>%d</cfiscsi_state>\n",
+ ct->ct_state);
+ return (retval);
}
static void
cfiscsi_ioctl_handoff(struct ctl_iscsi *ci)
{
struct cfiscsi_softc *softc;
- struct cfiscsi_session *cs;
+ struct cfiscsi_session *cs, *cs2;
struct cfiscsi_target *ct;
struct ctl_iscsi_handoff_params *cihp;
int error;
@@ -1462,18 +1394,19 @@ cfiscsi_ioctl_handoff(struct ctl_iscsi *ci)
cihp->initiator_name, cihp->initiator_addr,
cihp->target_name);
- if (softc->online == 0) {
+ ct = cfiscsi_target_find(softc, cihp->target_name);
+ if (ct == NULL) {
ci->status = CTL_ISCSI_ERROR;
snprintf(ci->error_str, sizeof(ci->error_str),
- "%s: port offline", __func__);
+ "%s: target not found", __func__);
return;
}
- ct = cfiscsi_target_find(softc, cihp->target_name);
- if (ct == NULL) {
+ if (ct->ct_online == 0) {
ci->status = CTL_ISCSI_ERROR;
snprintf(ci->error_str, sizeof(ci->error_str),
- "%s: target not found", __func__);
+ "%s: port offline", __func__);
+ cfiscsi_target_release(ct);
return;
}
@@ -1537,13 +1470,44 @@ cfiscsi_ioctl_handoff(struct ctl_iscsi *ci)
cihp->initiator_addr, sizeof(cs->cs_initiator_addr));
strlcpy(cs->cs_initiator_alias,
cihp->initiator_alias, sizeof(cs->cs_initiator_alias));
+ memcpy(cs->cs_initiator_isid,
+ cihp->initiator_isid, sizeof(cs->cs_initiator_isid));
+ snprintf(cs->cs_initiator_id, sizeof(cs->cs_initiator_id),
+ "%s,i,0x%02x%02x%02x%02x%02x%02x", cs->cs_initiator_name,
+ cihp->initiator_isid[0], cihp->initiator_isid[1],
+ cihp->initiator_isid[2], cihp->initiator_isid[3],
+ cihp->initiator_isid[4], cihp->initiator_isid[5]);
+
+ refcount_acquire(&cs->cs_outstanding_ctl_pdus);
+restart:
+ if (!cs->cs_terminating) {
+ mtx_lock(&softc->lock);
+ TAILQ_FOREACH(cs2, &softc->sessions, cs_next) {
+ if (cs2 != cs && cs2->cs_tasks_aborted == false &&
+ cs->cs_target == cs2->cs_target &&
+ cs->cs_portal_group_tag == cs2->cs_portal_group_tag &&
+ strcmp(cs->cs_initiator_id, cs2->cs_initiator_id) == 0) {
+ cfiscsi_session_terminate(cs2);
+ mtx_unlock(&softc->lock);
+ pause("cfiscsi_reinstate", 1);
+ goto restart;
+ }
+ }
+ mtx_unlock(&softc->lock);
+ }
+
+ /*
+ * Register initiator with CTL.
+ */
+ cfiscsi_session_register_initiator(cs);
#ifdef ICL_KERNEL_PROXY
if (cihp->socket > 0) {
#endif
error = icl_conn_handoff(cs->cs_conn, cihp->socket);
if (error != 0) {
- cfiscsi_session_delete(cs);
+ cfiscsi_session_terminate(cs);
+ refcount_release(&cs->cs_outstanding_ctl_pdus);
ci->status = CTL_ISCSI_ERROR;
snprintf(ci->error_str, sizeof(ci->error_str),
"%s: icl_conn_handoff failed with error %d",
@@ -1554,11 +1518,6 @@ cfiscsi_ioctl_handoff(struct ctl_iscsi *ci)
}
#endif
- /*
- * Register initiator with CTL.
- */
- cfiscsi_session_register_initiator(cs);
-
#ifdef ICL_KERNEL_PROXY
cs->cs_login_phase = false;
@@ -1573,6 +1532,7 @@ cfiscsi_ioctl_handoff(struct ctl_iscsi *ci)
}
#endif
+ refcount_release(&cs->cs_outstanding_ctl_pdus);
ci->status = CTL_ISCSI_OK;
}
@@ -1971,11 +1931,181 @@ cfiscsi_ioctl_receive(struct ctl_iscsi *ci)
#endif /* !ICL_KERNEL_PROXY */
+static void
+cfiscsi_ioctl_port_create(struct ctl_req *req)
+{
+ struct cfiscsi_target *ct;
+ struct ctl_port *port;
+ const char *target, *alias, *tag;
+ struct scsi_vpd_id_descriptor *desc;
+ ctl_options_t opts;
+ int retval, len, idlen;
+
+ ctl_init_opts(&opts, req->num_args, req->kern_args);
+ target = ctl_get_opt(&opts, "cfiscsi_target");
+ alias = ctl_get_opt(&opts, "cfiscsi_target_alias");
+ tag = ctl_get_opt(&opts, "cfiscsi_portal_group_tag");
+ if (target == NULL || tag == NULL) {
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "Missing required argument");
+ ctl_free_opts(&opts);
+ return;
+ }
+ ct = cfiscsi_target_find_or_create(&cfiscsi_softc, target, alias);
+ if (ct == NULL) {
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "failed to create target \"%s\"", target);
+ ctl_free_opts(&opts);
+ return;
+ }
+ if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) {
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "target \"%s\" already exist", target);
+ cfiscsi_target_release(ct);
+ ctl_free_opts(&opts);
+ return;
+ }
+ port = &ct->ct_port;
+ if (ct->ct_state == CFISCSI_TARGET_STATE_DYING)
+ goto done;
+
+ port->frontend = &cfiscsi_frontend;
+ port->port_type = CTL_PORT_ISCSI;
+ /* XXX KDM what should the real number be here? */
+ port->num_requested_ctl_io = 4096;
+ port->port_name = "iscsi";
+ port->virtual_port = strtoul(tag, NULL, 0);
+ port->port_online = cfiscsi_online;
+ port->port_offline = cfiscsi_offline;
+ port->port_info = cfiscsi_info;
+ port->onoff_arg = ct;
+ port->lun_enable = cfiscsi_lun_enable;
+ port->lun_disable = cfiscsi_lun_disable;
+ port->lun_map = cfiscsi_lun_map;
+ port->targ_lun_arg = ct;
+ port->fe_datamove = cfiscsi_datamove;
+ port->fe_done = cfiscsi_done;
+
+ /* XXX KDM what should we report here? */
+ /* XXX These should probably be fetched from CTL. */
+ port->max_targets = 1;
+ port->max_target_id = 15;
+
+ port->options = opts;
+ STAILQ_INIT(&opts);
+
+ /* Generate Port ID. */
+ idlen = strlen(target) + strlen(",t,0x0001") + 1;
+ idlen = roundup2(idlen, 4);
+ len = sizeof(struct scsi_vpd_device_id) + idlen;
+ port->port_devid = malloc(sizeof(struct ctl_devid) + len,
+ M_CTL, M_WAITOK | M_ZERO);
+ port->port_devid->len = len;
+ desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data;
+ desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
+ SVPD_ID_TYPE_SCSI_NAME;
+ desc->length = idlen;
+ snprintf(desc->identifier, idlen, "%s,t,0x%4.4x",
+ target, port->virtual_port);
+
+ /* Generate Target ID. */
+ idlen = strlen(target) + 1;
+ idlen = roundup2(idlen, 4);
+ len = sizeof(struct scsi_vpd_device_id) + idlen;
+ port->target_devid = malloc(sizeof(struct ctl_devid) + len,
+ M_CTL, M_WAITOK | M_ZERO);
+ port->target_devid->len = len;
+ desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data;
+ desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8;
+ desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET |
+ SVPD_ID_TYPE_SCSI_NAME;
+ desc->length = idlen;
+ strlcpy(desc->identifier, target, idlen);
+
+ retval = ctl_port_register(port, /*master_SC*/ 1);
+ if (retval != 0) {
+ ctl_free_opts(&port->options);
+ cfiscsi_target_release(ct);
+ free(port->port_devid, M_CFISCSI);
+ free(port->target_devid, M_CFISCSI);
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "ctl_frontend_register() failed with error %d", retval);
+ return;
+ }
+done:
+ ct->ct_state = CFISCSI_TARGET_STATE_ACTIVE;
+ req->status = CTL_LUN_OK;
+ memcpy(req->kern_args[0].kvalue, &port->targ_port,
+ sizeof(port->targ_port)); //XXX
+}
+
+static void
+cfiscsi_ioctl_port_remove(struct ctl_req *req)
+{
+ struct cfiscsi_target *ct;
+ const char *target;
+ ctl_options_t opts;
+
+ ctl_init_opts(&opts, req->num_args, req->kern_args);
+ target = ctl_get_opt(&opts, "cfiscsi_target");
+ if (target == NULL) {
+ ctl_free_opts(&opts);
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "Missing required argument");
+ return;
+ }
+ ct = cfiscsi_target_find(&cfiscsi_softc, target);
+ if (ct == NULL) {
+ ctl_free_opts(&opts);
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "can't find target \"%s\"", target);
+ return;
+ }
+ if (ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) {
+ ctl_free_opts(&opts);
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "target \"%s\" is already dying", target);
+ return;
+ }
+ ctl_free_opts(&opts);
+
+ ct->ct_state = CFISCSI_TARGET_STATE_DYING;
+ ctl_port_offline(&ct->ct_port);
+ cfiscsi_target_release(ct);
+ cfiscsi_target_release(ct);
+}
+
static int
cfiscsi_ioctl(struct cdev *dev,
u_long cmd, caddr_t addr, int flag, struct thread *td)
{
struct ctl_iscsi *ci;
+ struct ctl_req *req;
+
+ if (cmd == CTL_PORT_REQ) {
+ req = (struct ctl_req *)addr;
+ switch (req->reqtype) {
+ case CTL_REQ_CREATE:
+ cfiscsi_ioctl_port_create(req);
+ break;
+ case CTL_REQ_REMOVE:
+ cfiscsi_ioctl_port_remove(req);
+ break;
+ default:
+ req->status = CTL_LUN_ERROR;
+ snprintf(req->error_str, sizeof(req->error_str),
+ "Unsupported request type %d", req->reqtype);
+ }
+ return (0);
+ }
if (cmd != CTL_ISCSI)
return (ENOTTY);
@@ -2028,135 +2158,6 @@ cfiscsi_ioctl(struct cdev *dev,
return (0);
}
-static int
-cfiscsi_devid(struct ctl_scsiio *ctsio, int alloc_len)
-{
- struct cfiscsi_session *cs;
- struct scsi_vpd_device_id *devid_ptr;
- struct scsi_vpd_id_descriptor *desc, *desc1;
- struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */
- struct scsi_vpd_id_t10 *t10id;
- struct ctl_lun *lun;
- const struct icl_pdu *request;
- size_t devid_len, wwpn_len;
-
- lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
- request = ctsio->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
- cs = PDU_SESSION(request);
-
- wwpn_len = strlen(cs->cs_target->ct_name);
- wwpn_len += strlen(",t,0x01");
- wwpn_len += 1; /* '\0' */
- if ((wwpn_len % 4) != 0)
- wwpn_len += (4 - (wwpn_len % 4));
-
- devid_len = sizeof(struct scsi_vpd_device_id) +
- sizeof(struct scsi_vpd_id_descriptor) +
- sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN +
- sizeof(struct scsi_vpd_id_descriptor) + wwpn_len +
- sizeof(struct scsi_vpd_id_descriptor) +
- sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
- sizeof(struct scsi_vpd_id_descriptor) +
- sizeof(struct scsi_vpd_id_trgt_port_grp_id);
-
- ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO);
- devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
- ctsio->kern_sg_entries = 0;
-
- if (devid_len < alloc_len) {
- ctsio->residual = alloc_len - devid_len;
- ctsio->kern_data_len = devid_len;
- ctsio->kern_total_len = devid_len;
- } else {
- ctsio->residual = 0;
- ctsio->kern_data_len = alloc_len;
- ctsio->kern_total_len = alloc_len;
- }
- ctsio->kern_data_resid = 0;
- ctsio->kern_rel_offset = 0;
- ctsio->kern_sg_entries = 0;
-
- desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
- t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
- desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
- sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN);
- desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] +
- wwpn_len);
- desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] +
- sizeof(struct scsi_vpd_id_rel_trgt_port_id));
-
- if (lun != NULL)
- devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
- lun->be_lun->lun_type;
- else
- devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
-
- devid_ptr->page_code = SVPD_DEVICE_ID;
-
- scsi_ulto2b(devid_len - 4, devid_ptr->length);
-
- /*
- * We're using a LUN association here. i.e., this device ID is a
- * per-LUN identifier.
- */
- desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_ASCII;
- desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
- desc->length = sizeof(*t10id) + CTL_DEVID_LEN;
- strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
-
- /*
- * If we've actually got a backend, copy the device id from the
- * per-LUN data. Otherwise, set it to all spaces.
- */
- if (lun != NULL) {
- /*
- * Copy the backend's LUN ID.
- */
- strncpy((char *)t10id->vendor_spec_id,
- (char *)lun->be_lun->device_id, CTL_DEVID_LEN);
- } else {
- /*
- * No backend, set this to spaces.
- */
- memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN);
- }
-
- /*
- * desc1 is for the WWPN which is a port asscociation.
- */
- desc1->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8;
- desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
- SVPD_ID_TYPE_SCSI_NAME;
- desc1->length = wwpn_len;
- snprintf(desc1->identifier, wwpn_len, "%s,t,0x%x",
- cs->cs_target->ct_name, cs->cs_portal_group_tag);
-
- /*
- * desc2 is for the Relative Target Port(type 4h) identifier
- */
- desc2->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_BINARY;
- desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
- SVPD_ID_TYPE_RELTARG;
- desc2->length = 4;
- desc2->identifier[3] = 1;
-
- /*
- * desc3 is for the Target Port Group(type 5h) identifier
- */
- desc3->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_BINARY;
- desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
- SVPD_ID_TYPE_TPORTGRP;
- desc3->length = 4;
- desc3->identifier[3] = 1;
-
- ctsio->scsi_status = SCSI_STATUS_OK;
-
- ctsio->be_move_done = ctl_config_move_done;
- ctl_datamove((union ctl_io *)ctsio);
-
- return (CTL_RETVAL_COMPLETE);
-}
-
static void
cfiscsi_target_hold(struct cfiscsi_target *ct)
{
@@ -2174,6 +2175,12 @@ cfiscsi_target_release(struct cfiscsi_target *ct)
if (refcount_release(&ct->ct_refcount)) {
TAILQ_REMOVE(&softc->targets, ct, ct_next);
mtx_unlock(&softc->lock);
+ if (ct->ct_state != CFISCSI_TARGET_STATE_INVALID) {
+ ct->ct_state = CFISCSI_TARGET_STATE_INVALID;
+ if (ctl_port_deregister(&ct->ct_port) != 0)
+ printf("%s: ctl_port_deregister() failed\n",
+ __func__);
+ }
free(ct, M_CFISCSI);
return;
@@ -2188,7 +2195,8 @@ cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name)
mtx_lock(&softc->lock);
TAILQ_FOREACH(ct, &softc->targets, ct_next) {
- if (strcmp(name, ct->ct_name) != 0)
+ if (strcmp(name, ct->ct_name) != 0 ||
+ ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE)
continue;
cfiscsi_target_hold(ct);
mtx_unlock(&softc->lock);
@@ -2213,7 +2221,8 @@ cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name,
mtx_lock(&softc->lock);
TAILQ_FOREACH(ct, &softc->targets, ct_next) {
- if (strcmp(name, ct->ct_name) != 0)
+ if (strcmp(name, ct->ct_name) != 0 ||
+ ct->ct_state == CFISCSI_TARGET_STATE_INVALID)
continue;
cfiscsi_target_hold(ct);
mtx_unlock(&softc->lock);
@@ -2222,7 +2231,7 @@ cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name,
}
for (i = 0; i < CTL_MAX_LUNS; i++)
- newct->ct_luns[i] = -1;
+ newct->ct_luns[i] = UINT32_MAX;
strlcpy(newct->ct_name, name, sizeof(newct->ct_name));
if (alias != NULL)
@@ -2239,22 +2248,16 @@ cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name,
* Takes LUN from the target space and returns LUN from the CTL space.
*/
static uint32_t
-cfiscsi_map_lun(void *arg, uint32_t lun)
+cfiscsi_lun_map(void *arg, uint32_t lun)
{
- struct cfiscsi_session *cs;
-
- cs = arg;
+ struct cfiscsi_target *ct = arg;
if (lun >= CTL_MAX_LUNS) {
CFISCSI_DEBUG("requested lun number %d is higher "
"than maximum %d", lun, CTL_MAX_LUNS - 1);
- return (0xffffffff);
+ return (UINT32_MAX);
}
-
- if (cs->cs_target->ct_luns[lun] < 0)
- return (0xffffffff);
-
- return (cs->cs_target->ct_luns[lun]);
+ return (ct->ct_luns[lun]);
}
static int
@@ -2268,7 +2271,7 @@ cfiscsi_target_set_lun(struct cfiscsi_target *ct,
return (-1);
}
- if (ct->ct_luns[lun_id] >= 0) {
+ if (ct->ct_luns[lun_id] < CTL_MAX_LUNS) {
/*
* CTL calls cfiscsi_lun_enable() twice for each LUN - once
* when the LUN is created, and a second time just before
@@ -2287,22 +2290,6 @@ cfiscsi_target_set_lun(struct cfiscsi_target *ct,
#endif
ct->ct_luns[lun_id] = ctl_lun_id;
- cfiscsi_target_hold(ct);
-
- return (0);
-}
-
-static int
-cfiscsi_target_unset_lun(struct cfiscsi_target *ct, unsigned long lun_id)
-{
-
- if (ct->ct_luns[lun_id] < 0) {
- CFISCSI_WARN("lun %ld not allocated", lun_id);
- return (-1);
- }
-
- ct->ct_luns[lun_id] = -1;
- cfiscsi_target_release(ct);
return (0);
}
@@ -2312,22 +2299,17 @@ cfiscsi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
{
struct cfiscsi_softc *softc;
struct cfiscsi_target *ct;
- struct ctl_be_lun_option *opt;
- const char *target = NULL, *target_alias = NULL;
+ const char *target = NULL;
const char *lun = NULL;
unsigned long tmp;
- softc = (struct cfiscsi_softc *)arg;
+ ct = (struct cfiscsi_target *)arg;
+ softc = ct->ct_softc;
- STAILQ_FOREACH(opt,
- &control_softc->ctl_luns[lun_id]->be_lun->options, links) {
- if (strcmp(opt->name, "cfiscsi_target") == 0)
- target = opt->value;
- else if (strcmp(opt->name, "cfiscsi_target_alias") == 0)
- target_alias = opt->value;
- else if (strcmp(opt->name, "cfiscsi_lun") == 0)
- lun = opt->value;
- }
+ target = ctl_get_opt(&control_softc->ctl_luns[lun_id]->be_lun->options,
+ "cfiscsi_target");
+ lun = ctl_get_opt(&control_softc->ctl_luns[lun_id]->be_lun->options,
+ "cfiscsi_lun");
if (target == NULL && lun == NULL)
return (0);
@@ -2338,15 +2320,11 @@ cfiscsi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
return (0);
}
- ct = cfiscsi_target_find_or_create(softc, target, target_alias);
- if (ct == NULL) {
- CFISCSI_WARN("failed to create target \"%s\"", target);
+ if (strcmp(target, ct->ct_name) != 0)
return (0);
- }
tmp = strtoul(lun, NULL, 10);
cfiscsi_target_set_lun(ct, tmp, lun_id);
- cfiscsi_target_release(ct);
return (0);
}
@@ -2357,19 +2335,15 @@ cfiscsi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
struct cfiscsi_target *ct;
int i;
- softc = (struct cfiscsi_softc *)arg;
+ ct = (struct cfiscsi_target *)arg;
+ softc = ct->ct_softc;
mtx_lock(&softc->lock);
- TAILQ_FOREACH(ct, &softc->targets, ct_next) {
- for (i = 0; i < CTL_MAX_LUNS; i++) {
- if (ct->ct_luns[i] < 0)
- continue;
- if (ct->ct_luns[i] != lun_id)
- continue;
- mtx_unlock(&softc->lock);
- cfiscsi_target_unset_lun(ct, i);
- return (0);
- }
+ for (i = 0; i < CTL_MAX_LUNS; i++) {
+ if (ct->ct_luns[i] != lun_id)
+ continue;
+ ct->ct_luns[i] = UINT32_MAX;
+ break;
}
mtx_unlock(&softc->lock);
return (0);
@@ -2623,8 +2597,8 @@ cfiscsi_datamove_out(union ctl_io *io)
cdw->cdw_target_transfer_tag = target_transfer_tag;
cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
- if (cs->cs_immediate_data && io->scsiio.kern_rel_offset == 0 &&
- icl_pdu_data_segment_length(request) > 0) {
+ if (cs->cs_immediate_data && io->scsiio.kern_rel_offset <
+ icl_pdu_data_segment_length(request)) {
done = cfiscsi_handle_data_segment(request, cdw);
if (done) {
uma_zfree(cfiscsi_data_wait_zone, cdw);
@@ -2725,6 +2699,17 @@ cfiscsi_scsi_command_done(union ctl_io *io)
CFISCSI_SESSION_UNLOCK(cs);
#endif
+ /*
+ * Do not return status for aborted commands.
+ * There are exceptions, but none supported by CTL yet.
+ */
+ if ((io->io_hdr.flags & CTL_FLAG_ABORT) &&
+ (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) {
+ ctl_free_io(io);
+ icl_pdu_free(request);
+ return;
+ }
+
response = cfiscsi_pdu_new_response(request, M_WAITOK);
bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs;
bhssr->bhssr_opcode = ISCSI_BHS_OPCODE_SCSI_RESPONSE;
@@ -2851,14 +2836,20 @@ cfiscsi_done(union ctl_io *io)
KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE),
("invalid CTL status %#x", io->io_hdr.status));
- request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
- if (request == NULL) {
+ if (io->io_hdr.io_type == CTL_IO_TASK &&
+ io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) {
/*
* Implicit task termination has just completed; nothing to do.
*/
+ cs = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ cs->cs_tasks_aborted = true;
+ refcount_release(&cs->cs_outstanding_ctl_pdus);
+ wakeup(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus));
+ ctl_free_io(io);
return;
}
+ request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
cs = PDU_SESSION(request);
refcount_release(&cs->cs_outstanding_ctl_pdus);
diff --git a/sys/cam/ctl/ctl_frontend_iscsi.h b/sys/cam/ctl/ctl_frontend_iscsi.h
index aca89f9dcfcae..0ac0e98ce46cc 100644
--- a/sys/cam/ctl/ctl_frontend_iscsi.h
+++ b/sys/cam/ctl/ctl_frontend_iscsi.h
@@ -32,13 +32,20 @@
#ifndef CTL_FRONTEND_ISCSI_H
#define CTL_FRONTEND_ISCSI_H
+#define CFISCSI_TARGET_STATE_INVALID 0
+#define CFISCSI_TARGET_STATE_ACTIVE 1
+#define CFISCSI_TARGET_STATE_DYING 2
+
struct cfiscsi_target {
TAILQ_ENTRY(cfiscsi_target) ct_next;
- int ct_luns[CTL_MAX_LUNS];
+ uint32_t ct_luns[CTL_MAX_LUNS];
struct cfiscsi_softc *ct_softc;
volatile u_int ct_refcount;
char ct_name[CTL_ISCSI_NAME_LEN];
char ct_alias[CTL_ISCSI_ALIAS_LEN];
+ int ct_state;
+ int ct_online;
+ struct ctl_port ct_port;
};
struct cfiscsi_data_wait {
@@ -72,13 +79,16 @@ struct cfiscsi_session {
int cs_timeout;
int cs_portal_group_tag;
struct cv cs_maintenance_cv;
- int cs_terminating;
+ bool cs_terminating;
+ bool cs_tasks_aborted;
size_t cs_max_data_segment_length;
size_t cs_max_burst_length;
bool cs_immediate_data;
char cs_initiator_name[CTL_ISCSI_NAME_LEN];
char cs_initiator_addr[CTL_ISCSI_ADDR_LEN];
char cs_initiator_alias[CTL_ISCSI_ALIAS_LEN];
+ char cs_initiator_isid[6];
+ char cs_initiator_id[CTL_ISCSI_NAME_LEN + 5 + 6 + 1];
unsigned int cs_id;
int cs_ctl_initid;
#ifdef ICL_KERNEL_PROXY
@@ -96,15 +106,12 @@ struct icl_listen;
#endif
struct cfiscsi_softc {
- struct ctl_frontend fe;
struct mtx lock;
char port_name[32];
int online;
unsigned int last_session_id;
TAILQ_HEAD(, cfiscsi_target) targets;
TAILQ_HEAD(, cfiscsi_session) sessions;
- char ctl_initids[CTL_MAX_INIT_PER_PORT];
- int max_initiators;
#ifdef ICL_KERNEL_PROXY
struct icl_listen *listener;
struct cv accept_cv;
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
index e018b3f8a5f6d..8f4ab92c330ba 100644
--- a/sys/cam/ctl/ctl_io.h
+++ b/sys/cam/ctl/ctl_io.h
@@ -96,6 +96,7 @@ typedef enum {
CTL_FLAG_CONTROL_DEV = 0x00000080, /* processor device */
CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */
CTL_FLAG_BLOCKED = 0x00000200, /* on the blocked queue */
+ CTL_FLAG_ABORT_STATUS = 0x00000400, /* return TASK ABORTED status */
CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */
CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */
CTL_FLAG_NO_DATASYNC = 0x00002000, /* don't cache flush data */
@@ -138,6 +139,10 @@ struct ctl_lba_len_flags {
uint64_t lba;
uint32_t len;
uint32_t flags;
+#define CTL_LLF_READ 0x10000000
+#define CTL_LLF_WRITE 0x20000000
+#define CTL_LLF_VERIFY 0x40000000
+#define CTL_LLF_COMPARE 0x80000000
};
struct ctl_ptr_len_flags {
@@ -216,8 +221,7 @@ struct ctl_nexus {
uint32_t targ_port; /* Target port, filled in by PORT */
struct ctl_id targ_target; /* Destination target */
uint32_t targ_lun; /* Destination lun */
- uint32_t (*lun_map_fn)(void *arg, uint32_t lun);
- void *lun_map_arg;
+ uint32_t targ_mapped_lun; /* Destination lun CTL-wide */
};
typedef enum {
@@ -361,6 +365,7 @@ typedef enum {
CTL_TASK_ABORT_TASK_SET,
CTL_TASK_CLEAR_ACA,
CTL_TASK_CLEAR_TASK_SET,
+ CTL_TASK_I_T_NEXUS_RESET,
CTL_TASK_LUN_RESET,
CTL_TASK_TARGET_RESET,
CTL_TASK_BUS_RESET,
@@ -402,7 +407,7 @@ struct ctl_pr_info {
ctl_pr_action action;
uint8_t sa_res_key[8];
uint8_t res_type;
- uint16_t residx;
+ uint32_t residx;
};
struct ctl_ha_msg_hdr {
diff --git a/sys/cam/ctl/ctl_ioctl.h b/sys/cam/ctl/ctl_ioctl.h
index 93dc4920334ce..a90475b8fcf5c 100644
--- a/sys/cam/ctl/ctl_ioctl.h
+++ b/sys/cam/ctl/ctl_ioctl.h
@@ -70,7 +70,7 @@
/*
* Maximum number of ports registered at one time.
*/
-#define CTL_MAX_PORTS 32
+#define CTL_MAX_PORTS 128
/*
* Maximum number of initiators we support.
@@ -595,6 +595,45 @@ struct ctl_lun_list {
};
/*
+ * Port request interface:
+ *
+ * driver: This is required, and is NUL-terminated a string
+ * that is the name of the frontend, like "iscsi" .
+ *
+ * reqtype: The type of request, CTL_REQ_CREATE to create a
+ * port, CTL_REQ_REMOVE to delete a port.
+ *
+ * num_be_args: This is the number of frontend-specific arguments
+ * in the be_args array.
+ *
+ * be_args: This is an array of frontend-specific arguments.
+ * See above for a description of the fields in this
+ * structure.
+ *
+ * status: Status of the request.
+ *
+ * error_str: If the status is CTL_LUN_ERROR, this will
+ * contain a string describing the error.
+ *
+ * kern_be_args: For kernel use only.
+ */
+typedef enum {
+ CTL_REQ_CREATE,
+ CTL_REQ_REMOVE,
+ CTL_REQ_MODIFY,
+} ctl_req_type;
+
+struct ctl_req {
+ char driver[CTL_DRIVER_NAME_LEN];
+ ctl_req_type reqtype;
+ int num_args;
+ struct ctl_be_arg *args;
+ ctl_lun_status status;
+ char error_str[CTL_ERROR_STR_LEN];
+ struct ctl_be_arg *kern_args;
+};
+
+/*
* iSCSI status
*
* OK: Request completed successfully.
@@ -642,6 +681,7 @@ struct ctl_iscsi_handoff_params {
char initiator_name[CTL_ISCSI_NAME_LEN];
char initiator_addr[CTL_ISCSI_ADDR_LEN];
char initiator_alias[CTL_ISCSI_ALIAS_LEN];
+ uint8_t initiator_isid[6];
char target_name[CTL_ISCSI_NAME_LEN];
int socket;
int portal_group_tag;
@@ -789,6 +829,8 @@ struct ctl_iscsi {
#define CTL_ERROR_INJECT_DELETE _IOW(CTL_MINOR, 0x23, struct ctl_error_desc)
#define CTL_SET_PORT_WWNS _IOW(CTL_MINOR, 0x24, struct ctl_port_entry)
#define CTL_ISCSI _IOWR(CTL_MINOR, 0x25, struct ctl_iscsi)
+#define CTL_PORT_REQ _IOWR(CTL_MINOR, 0x26, struct ctl_req)
+#define CTL_PORT_LIST _IOWR(CTL_MINOR, 0x27, struct ctl_lun_list)
#endif /* _CTL_IOCTL_H_ */
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index c742a9344e82f..23298fa05d7cc 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -79,7 +79,6 @@ typedef enum {
CTL_POOL_INTERNAL,
CTL_POOL_FETD,
CTL_POOL_EMERGENCY,
- CTL_POOL_IOCTL,
CTL_POOL_4OTHERSC
} ctl_pool_type;
@@ -110,7 +109,7 @@ typedef enum {
struct ctl_ioctl_info {
ctl_ioctl_flags flags;
uint32_t cur_tag_num;
- struct ctl_frontend fe;
+ struct ctl_port port;
char port_name[24];
};
@@ -136,6 +135,7 @@ typedef enum {
*
* Note: "OK_ON_ALL_LUNS" == we don't have to have a lun configured
* "OK_ON_BOTH" == we have to have a lun configured
+ * "SA5" == command has 5-bit service action at byte 1
*/
typedef enum {
CTL_CMD_FLAG_NONE = 0x0000,
@@ -149,7 +149,8 @@ typedef enum {
CTL_CMD_FLAG_OK_ON_INOPERABLE = 0x0800,
CTL_CMD_FLAG_OK_ON_OFFLINE = 0x1000,
CTL_CMD_FLAG_OK_ON_SECONDARY = 0x2000,
- CTL_CMD_FLAG_ALLOW_ON_PR_RESV = 0x4000
+ CTL_CMD_FLAG_ALLOW_ON_PR_RESV = 0x4000,
+ CTL_CMD_FLAG_SA5 = 0x8000
} ctl_cmd_flags;
typedef enum {
@@ -162,14 +163,10 @@ typedef enum {
CTL_SERIDX_RQ_SNS,
CTL_SERIDX_INQ,
CTL_SERIDX_RD_CAP,
- CTL_SERIDX_RESV,
- CTL_SERIDX_REL,
+ CTL_SERIDX_RES,
CTL_SERIDX_LOG_SNS,
CTL_SERIDX_FORMAT,
CTL_SERIDX_START,
- CTL_SERIDX_PRES_IN,
- CTL_SERIDX_PRES_OUT,
- CTL_SERIDX_MAIN_IN,
/* TBD: others to be filled in as needed */
CTL_SERIDX_COUNT, /* LAST, not a normal code, provides # codes */
CTL_SERIDX_INVLD = CTL_SERIDX_COUNT
@@ -182,6 +179,9 @@ struct ctl_cmd_entry {
ctl_seridx seridx;
ctl_cmd_flags flags;
ctl_lun_error_pattern pattern;
+ uint8_t length; /* CDB length */
+ uint8_t usage[15]; /* Mask of allowed CDB bits
+ * after the opcode byte. */
};
typedef enum {
@@ -332,11 +332,6 @@ struct ctl_mode_pages {
struct ctl_page_index index[CTL_NUM_MODE_PAGES];
};
-struct ctl_pending_sense {
- ctl_ua_type ua_pending;
- struct scsi_sense_data sense;
-};
-
struct ctl_lun_delay_info {
ctl_delay_type datamove_type;
uint32_t datamove_delay;
@@ -360,15 +355,20 @@ struct ctl_per_res_info {
uint8_t registered;
};
-#define CTL_PR_ALL_REGISTRANTS 0xFFFF
-#define CTL_PR_NO_RESERVATION 0xFFF0
+#define CTL_PR_ALL_REGISTRANTS 0xFFFFFFFF
+#define CTL_PR_NO_RESERVATION 0xFFFFFFF0
+
+struct ctl_devid {
+ int len;
+ uint8_t data[];
+};
/*
* For report target port groups.
*/
#define NUM_TARGET_PORT_GROUPS 2
-#define NUM_PORTS_PER_GRP 2
+struct tpc_list;
struct ctl_lun {
struct mtx lun_lock;
struct ctl_id target;
@@ -388,29 +388,38 @@ struct ctl_lun {
STAILQ_ENTRY(ctl_lun) links;
STAILQ_ENTRY(ctl_lun) run_links;
struct ctl_nexus rsv_nexus;
+#ifdef CTL_WITH_CA
uint32_t have_ca[CTL_MAX_INITIATORS >> 5];
- struct ctl_pending_sense pending_sense[CTL_MAX_INITIATORS];
+ struct scsi_sense_data pending_sense[CTL_MAX_INITIATORS];
+#endif
+ ctl_ua_type pending_ua[CTL_MAX_INITIATORS];
struct ctl_mode_pages mode_pages;
struct ctl_lun_io_stats stats;
struct ctl_per_res_info per_res[2*CTL_MAX_INITIATORS];
unsigned int PRGeneration;
int pr_key_count;
- uint16_t pr_res_idx;
+ uint32_t pr_res_idx;
uint8_t res_type;
- uint8_t write_buffer[524288];
+ uint8_t write_buffer[262144];
+ struct ctl_devid *lun_devid;
+ TAILQ_HEAD(tpc_lists, tpc_list) tpc_lists;
};
typedef enum {
- CTL_FLAG_TASK_PENDING = 0x01,
CTL_FLAG_REAL_SYNC = 0x02,
CTL_FLAG_MASTER_SHELF = 0x04
} ctl_gen_flags;
-struct ctl_wwpn_iid {
- int in_use;
- uint64_t wwpn;
- uint32_t iid;
- int32_t port;
+#define CTL_MAX_THREADS 16
+
+struct ctl_thread {
+ struct mtx_padalign queue_lock;
+ struct ctl_softc *ctl_softc;
+ struct thread *thread;
+ STAILQ_HEAD(, ctl_io_hdr) incoming_queue;
+ STAILQ_HEAD(, ctl_io_hdr) rtr_queue;
+ STAILQ_HEAD(, ctl_io_hdr) done_queue;
+ STAILQ_HEAD(, ctl_io_hdr) isc_queue;
};
struct ctl_softc {
@@ -426,27 +435,22 @@ struct ctl_softc {
struct sysctl_ctx_list sysctl_ctx;
struct sysctl_oid *sysctl_tree;
struct ctl_ioctl_info ioctl_info;
- struct ctl_lun lun;
struct ctl_io_pool *internal_pool;
struct ctl_io_pool *emergency_pool;
struct ctl_io_pool *othersc_pool;
- struct proc *work_thread;
+ struct proc *ctl_proc;
int targ_online;
uint32_t ctl_lun_mask[CTL_MAX_LUNS >> 5];
struct ctl_lun *ctl_luns[CTL_MAX_LUNS];
- struct ctl_wwpn_iid wwpn_iid[CTL_MAX_PORTS][CTL_MAX_INIT_PER_PORT];
uint32_t ctl_port_mask;
uint64_t aps_locked_lun;
STAILQ_HEAD(, ctl_lun) lun_list;
STAILQ_HEAD(, ctl_be_lun) pending_lun_queue;
- STAILQ_HEAD(, ctl_io_hdr) task_queue;
- STAILQ_HEAD(, ctl_io_hdr) incoming_queue;
- STAILQ_HEAD(, ctl_io_hdr) rtr_queue;
- STAILQ_HEAD(, ctl_io_hdr) done_queue;
- STAILQ_HEAD(, ctl_io_hdr) isc_queue;
uint32_t num_frontends;
STAILQ_HEAD(, ctl_frontend) fe_list;
- struct ctl_frontend *ctl_ports[CTL_MAX_PORTS];
+ uint32_t num_ports;
+ STAILQ_HEAD(, ctl_port) port_list;
+ struct ctl_port *ctl_ports[CTL_MAX_PORTS];
uint32_t num_backends;
STAILQ_HEAD(, ctl_backend_driver) be_list;
struct mtx pool_lock;
@@ -455,13 +459,16 @@ struct ctl_softc {
STAILQ_HEAD(, ctl_io_pool) io_pools;
time_t last_print_jiffies;
uint32_t skipped_prints;
+ struct ctl_thread threads[CTL_MAX_THREADS];
};
#ifdef _KERNEL
-extern struct ctl_cmd_entry ctl_cmd_table[];
+extern const struct ctl_cmd_entry ctl_cmd_table[256];
uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
+uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
+uint32_t ctl_port_idx(int port_num);
int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
uint32_t total_ctl_io, struct ctl_io_pool **npool);
void ctl_pool_free(struct ctl_io_pool *pool);
@@ -470,24 +477,40 @@ int ctl_scsi_reserve(struct ctl_scsiio *ctsio);
int ctl_start_stop(struct ctl_scsiio *ctsio);
int ctl_sync_cache(struct ctl_scsiio *ctsio);
int ctl_format(struct ctl_scsiio *ctsio);
+int ctl_read_buffer(struct ctl_scsiio *ctsio);
int ctl_write_buffer(struct ctl_scsiio *ctsio);
int ctl_write_same(struct ctl_scsiio *ctsio);
int ctl_unmap(struct ctl_scsiio *ctsio);
int ctl_mode_select(struct ctl_scsiio *ctsio);
int ctl_mode_sense(struct ctl_scsiio *ctsio);
int ctl_read_capacity(struct ctl_scsiio *ctsio);
-int ctl_service_action_in(struct ctl_scsiio *ctsio);
+int ctl_read_capacity_16(struct ctl_scsiio *ctsio);
int ctl_read_write(struct ctl_scsiio *ctsio);
+int ctl_cnw(struct ctl_scsiio *ctsio);
int ctl_report_luns(struct ctl_scsiio *ctsio);
int ctl_request_sense(struct ctl_scsiio *ctsio);
int ctl_tur(struct ctl_scsiio *ctsio);
+int ctl_verify(struct ctl_scsiio *ctsio);
int ctl_inquiry(struct ctl_scsiio *ctsio);
int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio);
int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio);
-int ctl_maintenance_in(struct ctl_scsiio *ctsio);
-void ctl_done_lock(union ctl_io *io, int have_lock);
+int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio);
+int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio);
+int ctl_report_supported_tmf(struct ctl_scsiio *ctsio);
+int ctl_report_timestamp(struct ctl_scsiio *ctsio);
int ctl_isc(struct ctl_scsiio *ctsio);
+void ctl_tpc_init(struct ctl_lun *lun);
+void ctl_tpc_shutdown(struct ctl_lun *lun);
+int ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len);
+int ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio);
+int ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio);
+int ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio);
+int ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio);
+int ctl_extended_copy_lid1(struct ctl_scsiio *ctsio);
+int ctl_extended_copy_lid4(struct ctl_scsiio *ctsio);
+int ctl_copy_operation_abort(struct ctl_scsiio *ctsio);
+
#endif /* _KERNEL */
#endif /* _CTL_PRIVATE_H_ */
diff --git a/sys/cam/ctl/ctl_ser_table.c b/sys/cam/ctl/ctl_ser_table.c
index ee4f3044c07ac..c680fb530f695 100644
--- a/sys/cam/ctl/ctl_ser_table.c
+++ b/sys/cam/ctl/ctl_ser_table.c
@@ -60,23 +60,19 @@
static ctl_serialize_action
ctl_serialize_table[CTL_SERIDX_COUNT][CTL_SERIDX_COUNT] = {
-/**>IDX_ :: 2nd:TUR RD WRT UNM MDSN MDSL RQSN INQ RDCP RES REL LSNS FMT STR PRIN PROT MAININ*/
-/*TUR */{ pS, pS, pS, pS, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*READ */{ pS, pS, xT, bK, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*WRITE */{ pS, xT, xT, bK, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*UNMAP */{ pS, bK, bK, pS, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*MD_SNS */{ bK, bK, bK, bK, pS, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*MD_SEL */{ bK, bK, bK, bK, bK, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*RQ_SNS */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*INQ */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*RD_CAP */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*RESV */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
-/*REL */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
-/*LOG_SNS */{ pS, pS, pS, pS, pS, bK, bK, pS, pS, bK, bK, pS, bK, bK, bK, bK, bK},
-/*FORMAT */{ pS, bK, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK, bK, bK, bK, bK, bK},
-/*START */{ bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK, bK, bK, bK, bK},
-/*PRES_IN */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
-/*PRES_OUT*/{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
-/*MAIN_IN */{ bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK, bK, bK, bK, pS}
+/**>IDX_ :: 2nd:TUR RD WRT UNM MDSN MDSL RQSN INQ RDCP RES LSNS FMT STR*/
+/*TUR */{ pS, pS, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
+/*READ */{ pS, pS, xT, bK, bK, bK, bK, pS, pS, bK, pS, bK, bK},
+/*WRITE */{ pS, xT, xT, bK, bK, bK, bK, pS, pS, bK, pS, bK, bK},
+/*UNMAP */{ pS, bK, bK, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
+/*MD_SNS */{ bK, bK, bK, bK, pS, bK, bK, pS, pS, bK, pS, bK, bK},
+/*MD_SEL */{ bK, bK, bK, bK, bK, bK, bK, pS, pS, bK, pS, bK, bK},
+/*RQ_SNS */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, pS, bK, bK},
+/*INQ */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK},
+/*RD_CAP */{ pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK},
+/*RES */{ bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK},
+/*LOG_SNS */{ pS, pS, pS, pS, pS, bK, bK, pS, pS, bK, pS, bK, bK},
+/*FORMAT */{ pS, bK, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK, bK},
+/*START */{ bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK},
};
diff --git a/sys/cam/ctl/ctl_tpc.c b/sys/cam/ctl/ctl_tpc.c
new file mode 100644
index 0000000000000..ad66ca9961bd6
--- /dev/null
+++ b/sys/cam/ctl/ctl_tpc.c
@@ -0,0 +1,1370 @@
+/*-
+ * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <machine/atomic.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_tpc.h>
+#include <cam/ctl/ctl_error.h>
+
+#define TPC_MAX_CSCDS 64
+#define TPC_MAX_SEGS 64
+#define TPC_MAX_SEG 0
+#define TPC_MAX_LIST 8192
+#define TPC_MAX_INLINE 0
+#define TPC_MAX_LISTS 255
+#define TPC_MAX_IO_SIZE (1024 * 1024)
+
+MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
+
+typedef enum {
+ TPC_ERR_RETRY = 0x000,
+ TPC_ERR_FAIL = 0x001,
+ TPC_ERR_MASK = 0x0ff,
+ TPC_ERR_NO_DECREMENT = 0x100
+} tpc_error_action;
+
+struct tpc_list;
+TAILQ_HEAD(runl, tpc_io);
+struct tpc_io {
+ union ctl_io *io;
+ uint64_t lun;
+ struct tpc_list *list;
+ struct runl run;
+ TAILQ_ENTRY(tpc_io) rlinks;
+ TAILQ_ENTRY(tpc_io) links;
+};
+
+struct tpc_list {
+ uint8_t service_action;
+ int init_port;
+ uint32_t init_idx;
+ uint32_t list_id;
+ uint8_t flags;
+ uint8_t *params;
+ struct scsi_ec_cscd *cscd;
+ struct scsi_ec_segment *seg[TPC_MAX_SEGS];
+ uint8_t *inl;
+ int ncscd;
+ int nseg;
+ int leninl;
+ int curseg;
+ off_t curbytes;
+ int curops;
+ int stage;
+ uint8_t *buf;
+ int segbytes;
+ int tbdio;
+ int error;
+ int abort;
+ int completed;
+ TAILQ_HEAD(, tpc_io) allio;
+ struct scsi_sense_data sense_data;
+ uint8_t sense_len;
+ uint8_t scsi_status;
+ struct ctl_scsiio *ctsio;
+ struct ctl_lun *lun;
+ TAILQ_ENTRY(tpc_list) links;
+};
+
+void
+ctl_tpc_init(struct ctl_lun *lun)
+{
+
+ TAILQ_INIT(&lun->tpc_lists);
+}
+
+void
+ctl_tpc_shutdown(struct ctl_lun *lun)
+{
+ struct tpc_list *list;
+
+ while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ KASSERT(list->completed,
+ ("Not completed TPC (%p) on shutdown", list));
+ free(list, M_CTL);
+ }
+}
+
+int
+ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
+{
+ struct scsi_vpd_tpc *tpc_ptr;
+ struct scsi_vpd_tpc_descriptor *d_ptr;
+ struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
+ struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
+ struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
+ struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
+ struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
+ struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
+ struct ctl_lun *lun;
+ int data_len;
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ data_len = sizeof(struct scsi_vpd_tpc) +
+ roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
+ 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) +
+ sizeof(struct scsi_vpd_tpc_descriptor_pd) +
+ roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
+ roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
+ sizeof(struct scsi_vpd_tpc_descriptor_gco);
+
+ ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
+ tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
+ ctsio->kern_sg_entries = 0;
+
+ if (data_len < alloc_len) {
+ ctsio->residual = alloc_len - data_len;
+ ctsio->kern_data_len = data_len;
+ ctsio->kern_total_len = data_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+
+ /*
+ * The control device is always connected. The disk device, on the
+ * other hand, may not be online all the time.
+ */
+ if (lun != NULL)
+ tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
+ lun->be_lun->lun_type;
+ else
+ tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
+ tpc_ptr->page_code = SVPD_SCSI_TPC;
+ scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
+
+ /* Supported commands */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
+ sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
+ sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7;
+ scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
+ scd_ptr = &sc_ptr->descr[0];
+ scd_ptr->opcode = EXTENDED_COPY;
+ scd_ptr->sa_length = 3;
+ scd_ptr->supported_service_actions[0] = EC_EC_LID1;
+ scd_ptr->supported_service_actions[1] = EC_EC_LID4;
+ scd_ptr->supported_service_actions[2] = EC_COA;
+ scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
+ &scd_ptr->supported_service_actions[scd_ptr->sa_length];
+ scd_ptr->opcode = RECEIVE_COPY_STATUS;
+ scd_ptr->sa_length = 4;
+ scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
+ scd_ptr->supported_service_actions[1] = RCS_RCFD;
+ scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
+ scd_ptr->supported_service_actions[3] = RCS_RCOP;
+
+ /* Parameter data. */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)
+ (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+ pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
+ scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
+ scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
+ scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
+ scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
+ scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
+
+ /* Supported Descriptors */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)
+ (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+ sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
+ scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
+ sd_ptr->list_length = 4;
+ sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
+ sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
+ sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
+ sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
+
+ /* Supported CSCD Descriptor IDs */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)
+ (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+ sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
+ scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
+ scsi_ulto2b(2, sdid_ptr->list_length);
+ scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
+
+ /* General Copy Operations */
+ d_ptr = (struct scsi_vpd_tpc_descriptor *)
+ (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
+ gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
+ scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
+ scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
+ scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
+ scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
+ scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
+ gco_ptr->data_segment_granularity = 0;
+ gco_ptr->inline_data_granularity = 0;
+
+ ctsio->scsi_status = SCSI_STATUS_OK;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_receive_copy_operating_parameters *cdb;
+ struct scsi_receive_copy_operating_parameters_data *data;
+ int retval;
+ int alloc_len, total_len;
+
+ CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
+
+ cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ total_len = sizeof(*data) + 4;
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
+ scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
+ data->snlid = RCOP_SNLID;
+ scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
+ scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
+ scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
+ scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
+ scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
+ scsi_ulto4b(0, data->held_data_limit);
+ scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
+ scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
+ data->maximum_concurrent_copies = TPC_MAX_LISTS;
+ data->data_segment_granularity = 0;
+ data->inline_data_granularity = 0;
+ data->held_data_granularity = 0;
+ data->implemented_descriptor_list_length = 4;
+ data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
+ data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
+ data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
+ data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_receive_copy_status_lid1 *cdb;
+ struct scsi_receive_copy_status_lid1_data *data;
+ struct tpc_list *list;
+ struct tpc_list list_copy;
+ int retval;
+ int alloc_len, total_len;
+ uint32_t list_id;
+
+ CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
+
+ cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ list_id = cdb->list_identifier;
+ mtx_lock(&lun->lun_lock);
+ TAILQ_FOREACH(list, &lun->tpc_lists, links) {
+ if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
+ EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
+ break;
+ }
+ if (list == NULL) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+ }
+ list_copy = *list;
+ if (list->completed) {
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ free(list, M_CTL);
+ }
+ mtx_unlock(&lun->lun_lock);
+
+ total_len = sizeof(*data);
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
+ scsi_ulto4b(sizeof(*data) - 4, data->available_data);
+ if (list_copy.completed) {
+ if (list_copy.error || list_copy.abort)
+ data->copy_command_status = RCS_CCS_ERROR;
+ else
+ data->copy_command_status = RCS_CCS_COMPLETED;
+ } else
+ data->copy_command_status = RCS_CCS_INPROG;
+ scsi_ulto2b(list_copy.curseg, data->segments_processed);
+ if (list_copy.curbytes <= UINT32_MAX) {
+ data->transfer_count_units = RCS_TC_BYTES;
+ scsi_ulto4b(list_copy.curbytes, data->transfer_count);
+ } else {
+ data->transfer_count_units = RCS_TC_MBYTES;
+ scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
+ }
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_receive_copy_failure_details *cdb;
+ struct scsi_receive_copy_failure_details_data *data;
+ struct tpc_list *list;
+ struct tpc_list list_copy;
+ int retval;
+ int alloc_len, total_len;
+ uint32_t list_id;
+
+ CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
+
+ cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ list_id = cdb->list_identifier;
+ mtx_lock(&lun->lun_lock);
+ TAILQ_FOREACH(list, &lun->tpc_lists, links) {
+ if (list->completed && (list->flags & EC_LIST_ID_USAGE_MASK) !=
+ EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
+ break;
+ }
+ if (list == NULL) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+ }
+ list_copy = *list;
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ free(list, M_CTL);
+ mtx_unlock(&lun->lun_lock);
+
+ total_len = sizeof(*data) + list_copy.sense_len;
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
+ if (list_copy.completed && (list_copy.error || list_copy.abort)) {
+ scsi_ulto4b(sizeof(*data) - 4, data->available_data);
+ data->copy_command_status = RCS_CCS_ERROR;
+ } else
+ scsi_ulto4b(0, data->available_data);
+ scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
+ memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_receive_copy_status_lid4 *cdb;
+ struct scsi_receive_copy_status_lid4_data *data;
+ struct tpc_list *list;
+ struct tpc_list list_copy;
+ int retval;
+ int alloc_len, total_len;
+ uint32_t list_id;
+
+ CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
+
+ cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ list_id = scsi_4btoul(cdb->list_identifier);
+ mtx_lock(&lun->lun_lock);
+ TAILQ_FOREACH(list, &lun->tpc_lists, links) {
+ if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
+ EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
+ break;
+ }
+ if (list == NULL) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+ }
+ list_copy = *list;
+ if (list->completed) {
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ free(list, M_CTL);
+ }
+ mtx_unlock(&lun->lun_lock);
+
+ total_len = sizeof(*data) + list_copy.sense_len;
+ alloc_len = scsi_4btoul(cdb->length);
+
+ ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
+
+ ctsio->kern_sg_entries = 0;
+
+ if (total_len < alloc_len) {
+ ctsio->residual = alloc_len - total_len;
+ ctsio->kern_data_len = total_len;
+ ctsio->kern_total_len = total_len;
+ } else {
+ ctsio->residual = 0;
+ ctsio->kern_data_len = alloc_len;
+ ctsio->kern_total_len = alloc_len;
+ }
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+
+ data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
+ scsi_ulto4b(sizeof(*data) - 4, data->available_data);
+ data->response_to_service_action = list_copy.service_action;
+ if (list_copy.completed) {
+ if (list_copy.error)
+ data->copy_command_status = RCS_CCS_ERROR;
+ else if (list_copy.abort)
+ data->copy_command_status = RCS_CCS_ABORTED;
+ else
+ data->copy_command_status = RCS_CCS_COMPLETED;
+ } else
+ data->copy_command_status = RCS_CCS_INPROG_FG;
+ scsi_ulto2b(list_copy.curops, data->operation_counter);
+ scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
+ if (list_copy.curbytes <= UINT32_MAX) {
+ data->transfer_count_units = RCS_TC_BYTES;
+ scsi_ulto4b(list_copy.curbytes, data->transfer_count);
+ } else {
+ data->transfer_count_units = RCS_TC_MBYTES;
+ scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
+ }
+ scsi_ulto2b(list_copy.curseg, data->segments_processed);
+ data->sense_data_length = list_copy.sense_len;
+ memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
+
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+
+ ctl_datamove((union ctl_io *)ctsio);
+ return (retval);
+}
+
+int
+ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
+{
+ struct ctl_lun *lun;
+ struct scsi_copy_operation_abort *cdb;
+ struct tpc_list *list;
+ int retval;
+ uint32_t list_id;
+
+ CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
+
+ cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+
+ retval = CTL_RETVAL_COMPLETE;
+
+ list_id = scsi_4btoul(cdb->list_identifier);
+ mtx_lock(&lun->lun_lock);
+ TAILQ_FOREACH(list, &lun->tpc_lists, links) {
+ if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
+ EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
+ break;
+ }
+ if (list == NULL) {
+ mtx_unlock(&lun->lun_lock);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+ }
+ list->abort = 1;
+ mtx_unlock(&lun->lun_lock);
+
+ ctl_set_success(ctsio);
+ ctl_done((union ctl_io *)ctsio);
+ return (retval);
+}
+
+static uint64_t
+tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss)
+{
+
+ if (idx == 0xffff) {
+ if (ss && list->lun->be_lun)
+ *ss = list->lun->be_lun->blocksize;
+ return (list->lun->lun);
+ }
+ if (idx >= list->ncscd)
+ return (UINT64_MAX);
+ return (tpcl_resolve(list->init_port, &list->cscd[idx], ss));
+}
+
+static int
+tpc_process_b2b(struct tpc_list *list)
+{
+ struct scsi_ec_segment_b2b *seg;
+ struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
+ struct tpc_io *tior, *tiow;
+ struct runl run, *prun;
+ uint64_t sl, dl;
+ off_t srclba, dstlba, numbytes, donebytes, roundbytes;
+ int numlba;
+ uint32_t srcblock, dstblock;
+
+ if (list->stage == 1) {
+complete:
+ while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
+ TAILQ_REMOVE(&list->allio, tior, links);
+ ctl_free_io(tior->io);
+ free(tior, M_CTL);
+ }
+ free(list->buf, M_CTL);
+ if (list->abort) {
+ ctl_set_task_aborted(list->ctsio);
+ return (CTL_RETVAL_ERROR);
+ } else if (list->error) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ } else {
+ list->curbytes += list->segbytes;
+ return (CTL_RETVAL_COMPLETE);
+ }
+ }
+
+ TAILQ_INIT(&list->allio);
+ seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
+ sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock);
+ dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock);
+ if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ }
+ sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
+ if (scsi_3btoul(sdstp->block_length) != 0)
+ srcblock = scsi_3btoul(sdstp->block_length);
+ ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
+ if (scsi_3btoul(ddstp->block_length) != 0)
+ dstblock = scsi_3btoul(ddstp->block_length);
+ numlba = scsi_2btoul(seg->number_of_blocks);
+ if (seg->flags & EC_SEG_DC)
+ numbytes = (off_t)numlba * dstblock;
+ else
+ numbytes = (off_t)numlba * srcblock;
+ srclba = scsi_8btou64(seg->src_lba);
+ dstlba = scsi_8btou64(seg->dst_lba);
+
+// printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
+// (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
+// dl, scsi_8btou64(seg->dst_lba));
+
+ if (numbytes == 0)
+ return (CTL_RETVAL_COMPLETE);
+
+ if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ }
+
+ list->buf = malloc(numbytes, M_CTL, M_WAITOK);
+ list->segbytes = numbytes;
+ donebytes = 0;
+ TAILQ_INIT(&run);
+ prun = &run;
+ list->tbdio = 1;
+ while (donebytes < numbytes) {
+ roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
+
+ tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&tior->run);
+ tior->list = list;
+ TAILQ_INSERT_TAIL(&list->allio, tior, links);
+ tior->io = tpcl_alloc_io();
+ if (tior->io == NULL) {
+ list->error = 1;
+ goto complete;
+ }
+ ctl_scsi_read_write(tior->io,
+ /*data_ptr*/ &list->buf[donebytes],
+ /*data_len*/ roundbytes,
+ /*read_op*/ 1,
+ /*byte2*/ 0,
+ /*minimum_cdb_size*/ 0,
+ /*lba*/ srclba + donebytes / srcblock,
+ /*num_blocks*/ roundbytes / srcblock,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ tior->io->io_hdr.retries = 3;
+ tior->lun = sl;
+ tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
+
+ tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&tiow->run);
+ tiow->list = list;
+ TAILQ_INSERT_TAIL(&list->allio, tiow, links);
+ tiow->io = tpcl_alloc_io();
+ if (tiow->io == NULL) {
+ list->error = 1;
+ goto complete;
+ }
+ ctl_scsi_read_write(tiow->io,
+ /*data_ptr*/ &list->buf[donebytes],
+ /*data_len*/ roundbytes,
+ /*read_op*/ 0,
+ /*byte2*/ 0,
+ /*minimum_cdb_size*/ 0,
+ /*lba*/ dstlba + donebytes / dstblock,
+ /*num_blocks*/ roundbytes / dstblock,
+ /*tag_type*/ CTL_TAG_SIMPLE,
+ /*control*/ 0);
+ tiow->io->io_hdr.retries = 3;
+ tiow->lun = dl;
+ tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
+
+ TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
+ TAILQ_INSERT_TAIL(prun, tior, rlinks);
+ prun = &tior->run;
+ donebytes += roundbytes;
+ }
+
+ while ((tior = TAILQ_FIRST(&run)) != NULL) {
+ TAILQ_REMOVE(&run, tior, rlinks);
+ if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
+ panic("tpcl_queue() error");
+ }
+
+ list->stage++;
+ return (CTL_RETVAL_QUEUED);
+}
+
+static int
+tpc_process_verify(struct tpc_list *list)
+{
+ struct scsi_ec_segment_verify *seg;
+ struct tpc_io *tio;
+ uint64_t sl;
+
+ if (list->stage == 1) {
+complete:
+ while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
+ TAILQ_REMOVE(&list->allio, tio, links);
+ ctl_free_io(tio->io);
+ free(tio, M_CTL);
+ }
+ if (list->abort) {
+ ctl_set_task_aborted(list->ctsio);
+ return (CTL_RETVAL_ERROR);
+ } else if (list->error) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ } else
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ TAILQ_INIT(&list->allio);
+ seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
+ sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL);
+ if (sl >= CTL_MAX_LUNS) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ }
+
+// printf("Verify %ju\n", sl);
+
+ if ((seg->tur & 0x01) == 0)
+ return (CTL_RETVAL_COMPLETE);
+
+ list->tbdio = 1;
+ tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&tio->run);
+ tio->list = list;
+ TAILQ_INSERT_TAIL(&list->allio, tio, links);
+ tio->io = tpcl_alloc_io();
+ if (tio->io == NULL) {
+ list->error = 1;
+ goto complete;
+ }
+ ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
+ tio->io->io_hdr.retries = 3;
+ tio->lun = sl;
+ tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
+ list->stage++;
+ if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
+ panic("tpcl_queue() error");
+ return (CTL_RETVAL_QUEUED);
+}
+
+static int
+tpc_process_register_key(struct tpc_list *list)
+{
+ struct scsi_ec_segment_register_key *seg;
+ struct tpc_io *tio;
+ uint64_t dl;
+ int datalen;
+
+ if (list->stage == 1) {
+complete:
+ while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
+ TAILQ_REMOVE(&list->allio, tio, links);
+ ctl_free_io(tio->io);
+ free(tio, M_CTL);
+ }
+ free(list->buf, M_CTL);
+ if (list->abort) {
+ ctl_set_task_aborted(list->ctsio);
+ return (CTL_RETVAL_ERROR);
+ } else if (list->error) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ } else
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ TAILQ_INIT(&list->allio);
+ seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
+ dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL);
+ if (dl >= CTL_MAX_LUNS) {
+ ctl_set_sense(list->ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
+ return (CTL_RETVAL_ERROR);
+ }
+
+// printf("Register Key %ju\n", dl);
+
+ list->tbdio = 1;
+ tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&tio->run);
+ tio->list = list;
+ TAILQ_INSERT_TAIL(&list->allio, tio, links);
+ tio->io = tpcl_alloc_io();
+ if (tio->io == NULL) {
+ list->error = 1;
+ goto complete;
+ }
+ datalen = sizeof(struct scsi_per_res_out_parms);
+ list->buf = malloc(datalen, M_CTL, M_WAITOK);
+ ctl_scsi_persistent_res_out(tio->io,
+ list->buf, datalen, SPRO_REGISTER, -1,
+ scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
+ /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
+ tio->io->io_hdr.retries = 3;
+ tio->lun = dl;
+ tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
+ list->stage++;
+ if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
+ panic("tpcl_queue() error");
+ return (CTL_RETVAL_QUEUED);
+}
+
+static void
+tpc_process(struct tpc_list *list)
+{
+ struct ctl_lun *lun = list->lun;
+ struct scsi_ec_segment *seg;
+ struct ctl_scsiio *ctsio = list->ctsio;
+ int retval = CTL_RETVAL_COMPLETE;
+
+//printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
+ while (list->curseg < list->nseg) {
+ seg = list->seg[list->curseg];
+ switch (seg->type_code) {
+ case EC_SEG_B2B:
+ retval = tpc_process_b2b(list);
+ break;
+ case EC_SEG_VERIFY:
+ retval = tpc_process_verify(list);
+ break;
+ case EC_SEG_REGISTER_KEY:
+ retval = tpc_process_register_key(list);
+ break;
+ default:
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_COPY_ABORTED,
+ /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
+ goto done;
+ }
+ if (retval == CTL_RETVAL_QUEUED)
+ return;
+ if (retval == CTL_RETVAL_ERROR) {
+ list->error = 1;
+ goto done;
+ }
+ list->curseg++;
+ list->stage = 0;
+ }
+
+ ctl_set_success(ctsio);
+
+done:
+//printf("ZZZ done\n");
+ mtx_lock(&lun->lun_lock);
+ if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
+ TAILQ_REMOVE(&lun->tpc_lists, list, links);
+ free(list, M_CTL);
+ } else {
+ list->completed = 1;
+ list->sense_data = ctsio->sense_data;
+ list->sense_len = ctsio->sense_len;
+ list->scsi_status = ctsio->scsi_status;
+ }
+ mtx_unlock(&lun->lun_lock);
+
+ ctl_done((union ctl_io *)ctsio);
+}
+
+/*
+ * For any sort of check condition, busy, etc., we just retry. We do not
+ * decrement the retry count for unit attention type errors. These are
+ * normal, and we want to save the retry count for "real" errors. Otherwise,
+ * we could end up with situations where a command will succeed in some
+ * situations and fail in others, depending on whether a unit attention is
+ * pending. Also, some of our error recovery actions, most notably the
+ * LUN reset action, will cause a unit attention.
+ *
+ * We can add more detail here later if necessary.
+ */
+static tpc_error_action
+tpc_checkcond_parse(union ctl_io *io)
+{
+ tpc_error_action error_action;
+ int error_code, sense_key, asc, ascq;
+
+ /*
+ * Default to retrying the command.
+ */
+ error_action = TPC_ERR_RETRY;
+
+ scsi_extract_sense_len(&io->scsiio.sense_data,
+ io->scsiio.sense_len,
+ &error_code,
+ &sense_key,
+ &asc,
+ &ascq,
+ /*show_errors*/ 1);
+
+ switch (error_code) {
+ case SSD_DEFERRED_ERROR:
+ case SSD_DESC_DEFERRED_ERROR:
+ error_action |= TPC_ERR_NO_DECREMENT;
+ break;
+ case SSD_CURRENT_ERROR:
+ case SSD_DESC_CURRENT_ERROR:
+ default:
+ switch (sense_key) {
+ case SSD_KEY_UNIT_ATTENTION:
+ error_action |= TPC_ERR_NO_DECREMENT;
+ break;
+ case SSD_KEY_HARDWARE_ERROR:
+ /*
+ * This is our generic "something bad happened"
+ * error code. It often isn't recoverable.
+ */
+ if ((asc == 0x44) && (ascq == 0x00))
+ error_action = TPC_ERR_FAIL;
+ break;
+ case SSD_KEY_NOT_READY:
+ /*
+ * If the LUN is powered down, there likely isn't
+ * much point in retrying right now.
+ */
+ if ((asc == 0x04) && (ascq == 0x02))
+ error_action = TPC_ERR_FAIL;
+ /*
+ * If the LUN is offline, there probably isn't much
+ * point in retrying, either.
+ */
+ if ((asc == 0x04) && (ascq == 0x03))
+ error_action = TPC_ERR_FAIL;
+ break;
+ }
+ }
+ return (error_action);
+}
+
+static tpc_error_action
+tpc_error_parse(union ctl_io *io)
+{
+ tpc_error_action error_action = TPC_ERR_RETRY;
+
+ switch (io->io_hdr.io_type) {
+ case CTL_IO_SCSI:
+ switch (io->io_hdr.status & CTL_STATUS_MASK) {
+ case CTL_SCSI_ERROR:
+ switch (io->scsiio.scsi_status) {
+ case SCSI_STATUS_CHECK_COND:
+ error_action = tpc_checkcond_parse(io);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case CTL_IO_TASK:
+ break;
+ default:
+ panic("%s: invalid ctl_io type %d\n", __func__,
+ io->io_hdr.io_type);
+ break;
+ }
+ return (error_action);
+}
+
+void
+tpc_done(union ctl_io *io)
+{
+ struct tpc_io *tio, *tior;
+
+ /*
+ * Very minimal retry logic. We basically retry if we got an error
+ * back, and the retry count is greater than 0. If we ever want
+ * more sophisticated initiator type behavior, the CAM error
+ * recovery code in ../common might be helpful.
+ */
+// if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+// ctl_io_error_print(io, NULL);
+ tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
+ if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ && (io->io_hdr.retries > 0)) {
+ ctl_io_status old_status;
+ tpc_error_action error_action;
+
+ error_action = tpc_error_parse(io);
+ switch (error_action & TPC_ERR_MASK) {
+ case TPC_ERR_FAIL:
+ break;
+ case TPC_ERR_RETRY:
+ default:
+ if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
+ io->io_hdr.retries--;
+ old_status = io->io_hdr.status;
+ io->io_hdr.status = CTL_STATUS_NONE;
+ io->io_hdr.flags &= ~CTL_FLAG_ABORT;
+ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
+ if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
+ printf("%s: error returned from ctl_queue()!\n",
+ __func__);
+ io->io_hdr.status = old_status;
+ } else
+ return;
+ }
+ }
+
+ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
+ tio->list->error = 1;
+ else
+ atomic_add_int(&tio->list->curops, 1);
+ if (!tio->list->error && !tio->list->abort) {
+ while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
+ TAILQ_REMOVE(&tio->run, tior, rlinks);
+ atomic_add_int(&tio->list->tbdio, 1);
+ if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
+ panic("tpcl_queue() error");
+ }
+ }
+ if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
+ tpc_process(tio->list);
+}
+
+int
+ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
+{
+ struct scsi_extended_copy *cdb;
+ struct scsi_extended_copy_lid1_data *data;
+ struct ctl_lun *lun;
+ struct tpc_list *list, *tlist;
+ uint8_t *ptr;
+ char *value;
+ int len, off, lencscd, lenseg, leninl, nseg;
+
+ CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_extended_copy *)ctsio->cdb;
+ len = scsi_4btoul(cdb->length);
+
+ if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
+ len > sizeof(struct scsi_extended_copy_lid1_data) +
+ TPC_MAX_LIST + TPC_MAX_INLINE) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+ /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+
+ /*
+ * If we've got a kernel request that hasn't been malloced yet,
+ * malloc it and tell the caller the data buffer is here.
+ */
+ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+ ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+ ctsio->kern_data_len = len;
+ ctsio->kern_total_len = len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
+ lencscd = scsi_2btoul(data->cscd_list_length);
+ lenseg = scsi_4btoul(data->segment_list_length);
+ leninl = scsi_4btoul(data->inline_data_length);
+ if (len < sizeof(struct scsi_extended_copy_lid1_data) +
+ lencscd + lenseg + leninl ||
+ leninl > TPC_MAX_INLINE) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+ /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+ if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
+ goto done;
+ }
+ if (lencscd + lenseg > TPC_MAX_LIST) {
+ ctl_set_param_len_error(ctsio);
+ goto done;
+ }
+
+ list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+ list->service_action = cdb->service_action;
+ value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
+ if (value != NULL && strcmp(value, "on") == 0)
+ list->init_port = -1;
+ else
+ list->init_port = ctsio->io_hdr.nexus.targ_port;
+ list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->list_id = data->list_identifier;
+ list->flags = data->flags;
+ list->params = ctsio->kern_data_ptr;
+ list->cscd = (struct scsi_ec_cscd *)&data->data[0];
+ ptr = &data->data[lencscd];
+ for (nseg = 0, off = 0; off < lenseg; nseg++) {
+ if (nseg >= TPC_MAX_SEGS) {
+ free(list, M_CTL);
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
+ goto done;
+ }
+ list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
+ off += sizeof(struct scsi_ec_segment) +
+ scsi_2btoul(list->seg[nseg]->descr_length);
+ }
+ list->inl = &data->data[lencscd + lenseg];
+ list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
+ list->nseg = nseg;
+ list->leninl = leninl;
+ list->ctsio = ctsio;
+ list->lun = lun;
+ mtx_lock(&lun->lun_lock);
+ if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
+ TAILQ_FOREACH(tlist, &lun->tpc_lists, links) {
+ if ((tlist->flags & EC_LIST_ID_USAGE_MASK) !=
+ EC_LIST_ID_USAGE_NONE &&
+ tlist->list_id == list->list_id)
+ break;
+ }
+ if (tlist != NULL && !tlist->completed) {
+ mtx_unlock(&lun->lun_lock);
+ free(list, M_CTL);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto done;
+ }
+ if (tlist != NULL) {
+ TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+ free(tlist, M_CTL);
+ }
+ }
+ TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+ mtx_unlock(&lun->lun_lock);
+
+ tpc_process(list);
+ return (CTL_RETVAL_COMPLETE);
+
+done:
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+}
+
+int
+ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
+{
+ struct scsi_extended_copy *cdb;
+ struct scsi_extended_copy_lid4_data *data;
+ struct ctl_lun *lun;
+ struct tpc_list *list, *tlist;
+ uint8_t *ptr;
+ char *value;
+ int len, off, lencscd, lenseg, leninl, nseg;
+
+ CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
+
+ lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
+ cdb = (struct scsi_extended_copy *)ctsio->cdb;
+ len = scsi_4btoul(cdb->length);
+
+ if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
+ len > sizeof(struct scsi_extended_copy_lid4_data) +
+ TPC_MAX_LIST + TPC_MAX_INLINE) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
+ /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+
+ /*
+ * If we've got a kernel request that hasn't been malloced yet,
+ * malloc it and tell the caller the data buffer is here.
+ */
+ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
+ ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
+ ctsio->kern_data_len = len;
+ ctsio->kern_total_len = len;
+ ctsio->kern_data_resid = 0;
+ ctsio->kern_rel_offset = 0;
+ ctsio->kern_sg_entries = 0;
+ ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
+ ctsio->be_move_done = ctl_config_move_done;
+ ctl_datamove((union ctl_io *)ctsio);
+
+ return (CTL_RETVAL_COMPLETE);
+ }
+
+ data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
+ lencscd = scsi_2btoul(data->cscd_list_length);
+ lenseg = scsi_2btoul(data->segment_list_length);
+ leninl = scsi_2btoul(data->inline_data_length);
+ if (len < sizeof(struct scsi_extended_copy_lid4_data) +
+ lencscd + lenseg + leninl ||
+ leninl > TPC_MAX_INLINE) {
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
+ /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
+ goto done;
+ }
+ if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
+ goto done;
+ }
+ if (lencscd + lenseg > TPC_MAX_LIST) {
+ ctl_set_param_len_error(ctsio);
+ goto done;
+ }
+
+ list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
+ list->service_action = cdb->service_action;
+ value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
+ if (value != NULL && strcmp(value, "on") == 0)
+ list->init_port = -1;
+ else
+ list->init_port = ctsio->io_hdr.nexus.targ_port;
+ list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
+ list->list_id = scsi_4btoul(data->list_identifier);
+ list->flags = data->flags;
+ list->params = ctsio->kern_data_ptr;
+ list->cscd = (struct scsi_ec_cscd *)&data->data[0];
+ ptr = &data->data[lencscd];
+ for (nseg = 0, off = 0; off < lenseg; nseg++) {
+ if (nseg >= TPC_MAX_SEGS) {
+ free(list, M_CTL);
+ ctl_set_sense(ctsio, /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
+ goto done;
+ }
+ list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
+ off += sizeof(struct scsi_ec_segment) +
+ scsi_2btoul(list->seg[nseg]->descr_length);
+ }
+ list->inl = &data->data[lencscd + lenseg];
+ list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
+ list->nseg = nseg;
+ list->leninl = leninl;
+ list->ctsio = ctsio;
+ list->lun = lun;
+ mtx_lock(&lun->lun_lock);
+ if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
+ TAILQ_FOREACH(tlist, &lun->tpc_lists, links) {
+ if ((tlist->flags & EC_LIST_ID_USAGE_MASK) !=
+ EC_LIST_ID_USAGE_NONE &&
+ tlist->list_id == list->list_id)
+ break;
+ }
+ if (tlist != NULL && !tlist->completed) {
+ mtx_unlock(&lun->lun_lock);
+ free(list, M_CTL);
+ ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
+ /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
+ /*bit*/ 0);
+ goto done;
+ }
+ if (tlist != NULL) {
+ TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
+ free(tlist, M_CTL);
+ }
+ }
+ TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
+ mtx_unlock(&lun->lun_lock);
+
+ tpc_process(list);
+ return (CTL_RETVAL_COMPLETE);
+
+done:
+ ctl_done((union ctl_io *)ctsio);
+ return (CTL_RETVAL_COMPLETE);
+}
+
diff --git a/sys/cam/ctl/ctl_tpc.h b/sys/cam/ctl/ctl_tpc.h
new file mode 100644
index 0000000000000..ecbaec105d304
--- /dev/null
+++ b/sys/cam/ctl/ctl_tpc.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CTL_TPC_H
+#define _CTL_TPC_H 1
+
+void tpc_done(union ctl_io *io);
+
+uint64_t tpcl_resolve(int init_port, struct scsi_ec_cscd *cscd, uint32_t *ss);
+union ctl_io * tpcl_alloc_io(void);
+int tpcl_queue(union ctl_io *io, uint64_t lun);
+
+#endif /* _CTL_TPC_H */
diff --git a/sys/cam/ctl/ctl_tpc_local.c b/sys/cam/ctl/ctl_tpc_local.c
new file mode 100644
index 0000000000000..8fb797865ad2f
--- /dev/null
+++ b/sys/cam/ctl/ctl_tpc_local.c
@@ -0,0 +1,387 @@
+/*-
+ * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
+ * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/malloc.h>
+#include <sys/conf.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_da.h>
+#include <cam/ctl/ctl_io.h>
+#include <cam/ctl/ctl.h>
+#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_frontend_internal.h>
+#include <cam/ctl/ctl_util.h>
+#include <cam/ctl/ctl_backend.h>
+#include <cam/ctl/ctl_ioctl.h>
+#include <cam/ctl/ctl_ha.h>
+#include <cam/ctl/ctl_private.h>
+#include <cam/ctl/ctl_debug.h>
+#include <cam/ctl/ctl_scsi_all.h>
+#include <cam/ctl/ctl_tpc.h>
+#include <cam/ctl/ctl_error.h>
+
+struct tpcl_softc {
+ struct ctl_port port;
+ int cur_tag_num;
+};
+
+extern struct ctl_softc *control_softc;
+static struct tpcl_softc tpcl_softc;
+
+static int tpcl_init(void);
+static void tpcl_shutdown(void);
+static void tpcl_online(void *arg);
+static void tpcl_offline(void *arg);
+static int tpcl_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
+static int tpcl_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
+static void tpcl_datamove(union ctl_io *io);
+static void tpcl_done(union ctl_io *io);
+
+
+static struct ctl_frontend tpcl_frontend =
+{
+ .name = "tpc",
+ .init = tpcl_init,
+ .shutdown = tpcl_shutdown,
+};
+CTL_FRONTEND_DECLARE(ctltpc, tpcl_frontend);
+
+static int
+tpcl_init(void)
+{
+ struct ctl_softc *softc = control_softc;
+ struct tpcl_softc *tsoftc = &tpcl_softc;
+ struct ctl_port *port;
+ struct scsi_transportid_spi *tid;
+ int len;
+
+ memset(tsoftc, 0, sizeof(*tsoftc));
+
+ port = &tsoftc->port;
+ port->frontend = &tpcl_frontend;
+ port->port_type = CTL_PORT_INTERNAL;
+ port->num_requested_ctl_io = 100;
+ port->port_name = "tpc";
+ port->port_online = tpcl_online;
+ port->port_offline = tpcl_offline;
+ port->onoff_arg = tsoftc;
+ port->lun_enable = tpcl_lun_enable;
+ port->lun_disable = tpcl_lun_disable;
+ port->targ_lun_arg = tsoftc;
+ port->fe_datamove = tpcl_datamove;
+ port->fe_done = tpcl_done;
+ port->max_targets = 1;
+ port->max_target_id = 0;
+ port->max_initiators = 1;
+
+ if (ctl_port_register(port, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
+ {
+ printf("%s: tpc frontend registration failed\n", __func__);
+ return (0);
+ }
+
+ len = sizeof(struct scsi_transportid_spi);
+ port->init_devid = malloc(sizeof(struct ctl_devid) + len,
+ M_CTL, M_WAITOK | M_ZERO);
+ port->init_devid->len = len;
+ tid = (struct scsi_transportid_spi *)port->init_devid->data;
+ tid->format_protocol = SCSI_TRN_SPI_FORMAT_DEFAULT | SCSI_PROTO_SPI;
+ scsi_ulto2b(0, tid->scsi_addr);
+ scsi_ulto2b(port->targ_port, tid->rel_trgt_port_id);
+
+ ctl_port_online(port);
+ return (0);
+}
+
+void
+tpcl_shutdown(void)
+{
+ struct tpcl_softc *tsoftc = &tpcl_softc;
+ struct ctl_port *port;
+
+ port = &tsoftc->port;
+ ctl_port_offline(port);
+ if (ctl_port_deregister(&tsoftc->port) != 0)
+ printf("%s: ctl_frontend_deregister() failed\n", __func__);
+}
+
+static void
+tpcl_online(void *arg)
+{
+}
+
+static void
+tpcl_offline(void *arg)
+{
+}
+
+static int
+tpcl_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
+{
+
+ return (0);
+}
+
+static int
+tpcl_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
+{
+
+ return (0);
+}
+
+static void
+tpcl_datamove(union ctl_io *io)
+{
+ struct ctl_sg_entry *ext_sglist, *kern_sglist;
+ struct ctl_sg_entry ext_entry, kern_entry;
+ int ext_sg_entries, kern_sg_entries;
+ int ext_sg_start, ext_offset;
+ int len_to_copy, len_copied;
+ int kern_watermark, ext_watermark;
+ struct ctl_scsiio *ctsio;
+ int i, j;
+
+ ext_sg_start = 0;
+ ext_offset = 0;
+ ext_sglist = NULL;
+
+ CTL_DEBUG_PRINT(("%s\n", __func__));
+
+ ctsio = &io->scsiio;
+
+ /*
+ * If this is the case, we're probably doing a BBR read and don't
+ * actually need to transfer the data. This will effectively
+ * bit-bucket the data.
+ */
+ if (ctsio->ext_data_ptr == NULL)
+ goto bailout;
+
+ /*
+ * To simplify things here, if we have a single buffer, stick it in
+ * a S/G entry and just make it a single entry S/G list.
+ */
+ if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
+ int len_seen;
+
+ ext_sglist = (struct ctl_sg_entry *)ctsio->ext_data_ptr;
+ ext_sg_entries = ctsio->ext_sg_entries;
+ ext_sg_start = 0;
+ ext_offset = 0;
+ len_seen = 0;
+ for (i = 0; i < ext_sg_entries; i++) {
+ if ((len_seen + ext_sglist[i].len) >=
+ ctsio->ext_data_filled) {
+ ext_sg_start = i;
+ ext_offset = ctsio->ext_data_filled - len_seen;
+ break;
+ }
+ len_seen += ext_sglist[i].len;
+ }
+ } else {
+ ext_sglist = &ext_entry;
+ ext_sglist->addr = ctsio->ext_data_ptr;
+ ext_sglist->len = ctsio->ext_data_len;
+ ext_sg_entries = 1;
+ ext_sg_start = 0;
+ ext_offset = ctsio->ext_data_filled;
+ }
+
+ if (ctsio->kern_sg_entries > 0) {
+ kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
+ kern_sg_entries = ctsio->kern_sg_entries;
+ } else {
+ kern_sglist = &kern_entry;
+ kern_sglist->addr = ctsio->kern_data_ptr;
+ kern_sglist->len = ctsio->kern_data_len;
+ kern_sg_entries = 1;
+ }
+
+ kern_watermark = 0;
+ ext_watermark = ext_offset;
+ len_copied = 0;
+ for (i = ext_sg_start, j = 0;
+ i < ext_sg_entries && j < kern_sg_entries;) {
+ uint8_t *ext_ptr, *kern_ptr;
+
+ len_to_copy = min(ext_sglist[i].len - ext_watermark,
+ kern_sglist[j].len - kern_watermark);
+
+ ext_ptr = (uint8_t *)ext_sglist[i].addr;
+ ext_ptr = ext_ptr + ext_watermark;
+ if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
+ /*
+ * XXX KDM fix this!
+ */
+ panic("need to implement bus address support");
+#if 0
+ kern_ptr = bus_to_virt(kern_sglist[j].addr);
+#endif
+ } else
+ kern_ptr = (uint8_t *)kern_sglist[j].addr;
+ kern_ptr = kern_ptr + kern_watermark;
+
+ kern_watermark += len_to_copy;
+ ext_watermark += len_to_copy;
+
+ if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
+ CTL_FLAG_DATA_IN) {
+ CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
+ __func__, len_to_copy));
+ CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
+ kern_ptr, ext_ptr));
+ memcpy(ext_ptr, kern_ptr, len_to_copy);
+ } else {
+ CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
+ __func__, len_to_copy));
+ CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
+ ext_ptr, kern_ptr));
+ memcpy(kern_ptr, ext_ptr, len_to_copy);
+ }
+
+ len_copied += len_to_copy;
+
+ if (ext_sglist[i].len == ext_watermark) {
+ i++;
+ ext_watermark = 0;
+ }
+
+ if (kern_sglist[j].len == kern_watermark) {
+ j++;
+ kern_watermark = 0;
+ }
+ }
+
+ ctsio->ext_data_filled += len_copied;
+
+ CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
+ __func__, ext_sg_entries, kern_sg_entries));
+ CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
+ __func__, ctsio->ext_data_len, ctsio->kern_data_len));
+
+ /* XXX KDM set residual?? */
+bailout:
+ io->scsiio.be_move_done(io);
+}
+
+static void
+tpcl_done(union ctl_io *io)
+{
+
+ tpc_done(io);
+}
+
+uint64_t
+tpcl_resolve(int init_port, struct scsi_ec_cscd *cscd, uint32_t *ss)
+{
+ struct ctl_softc *softc = control_softc;
+ struct scsi_ec_cscd_id *cscdid;
+ struct ctl_port *port;
+ struct ctl_lun *lun;
+ uint64_t lunid = UINT64_MAX, l;
+ int i;
+
+ if (cscd->type_code != EC_CSCD_ID)
+ return (lunid);
+
+ cscdid = (struct scsi_ec_cscd_id *)cscd;
+ mtx_lock(&softc->ctl_lock);
+ if (init_port >= 0) {
+ port = softc->ctl_ports[ctl_port_idx(init_port)];
+ if (port == NULL || port->lun_map == NULL)
+ init_port = -1;
+ }
+ if (init_port < 0) {
+ STAILQ_FOREACH(lun, &softc->lun_list, links) {
+ if (lun->lun_devid == NULL)
+ continue;
+ if (scsi_devid_match(lun->lun_devid->data,
+ lun->lun_devid->len, &cscdid->codeset,
+ cscdid->length + 4) == 0) {
+ lunid = lun->lun;
+ if (ss && lun->be_lun)
+ *ss = lun->be_lun->blocksize;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < CTL_MAX_LUNS; i++) {
+ l = port->lun_map(port->targ_lun_arg, i);
+ if (l >= CTL_MAX_LUNS)
+ continue;
+ lun = softc->ctl_luns[l];
+ if (lun == NULL || lun->lun_devid == NULL)
+ continue;
+ if (scsi_devid_match(lun->lun_devid->data,
+ lun->lun_devid->len, &cscdid->codeset,
+ cscdid->length + 4) == 0) {
+ lunid = lun->lun;
+ if (ss && lun->be_lun)
+ *ss = lun->be_lun->blocksize;
+ break;
+ }
+ }
+ }
+ mtx_unlock(&softc->ctl_lock);
+ return (lunid);
+};
+
+union ctl_io *
+tpcl_alloc_io(void)
+{
+ struct tpcl_softc *tsoftc = &tpcl_softc;
+
+ return (ctl_alloc_io(tsoftc->port.ctl_pool_ref));
+};
+
+int
+tpcl_queue(union ctl_io *io, uint64_t lun)
+{
+ struct tpcl_softc *tsoftc = &tpcl_softc;
+
+ io->io_hdr.nexus.initid.id = 0;
+ io->io_hdr.nexus.targ_port = tsoftc->port.targ_port;
+ io->io_hdr.nexus.targ_target.id = 0;
+ io->io_hdr.nexus.targ_lun = lun;
+ io->scsiio.tag_num = atomic_fetchadd_int(&tsoftc->cur_tag_num, 1);
+ io->scsiio.ext_data_filled = 0;
+ return (ctl_queue(io));
+}
diff --git a/sys/cam/ctl/ctl_util.c b/sys/cam/ctl/ctl_util.c
index 3ca0aa243fcec..b32fbd832762b 100644
--- a/sys/cam/ctl/ctl_util.c
+++ b/sys/cam/ctl/ctl_util.c
@@ -84,6 +84,7 @@ static struct ctl_task_desc ctl_task_table[] = {
{CTL_TASK_ABORT_TASK_SET, "Abort Task Set"},
{CTL_TASK_CLEAR_ACA, "Clear ACA"},
{CTL_TASK_CLEAR_TASK_SET, "Clear Task Set"},
+ {CTL_TASK_I_T_NEXUS_RESET, "I_T Nexus Reset"},
{CTL_TASK_LUN_RESET, "LUN Reset"},
{CTL_TASK_TARGET_RESET, "Target Reset"},
{CTL_TASK_BUS_RESET, "Bus Reset"},
diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c
index 2ed8d2800f0d4..3529683f6208a 100644
--- a/sys/cam/ctl/scsi_ctl.c
+++ b/sys/cam/ctl/scsi_ctl.c
@@ -77,7 +77,7 @@ typedef enum {
} ctlfe_ccb_types;
struct ctlfe_softc {
- struct ctl_frontend fe;
+ struct ctl_port port;
path_id_t path_id;
u_int maxio;
struct cam_sim *sim;
@@ -198,7 +198,7 @@ MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
int ctlfeinitialize(void);
void ctlfeshutdown(void);
-static periph_init_t ctlfeinit;
+static periph_init_t ctlfeperiphinit;
static void ctlfeasync(void *callback_arg, uint32_t code,
struct cam_path *path, void *arg);
static periph_ctor_t ctlferegister;
@@ -211,8 +211,6 @@ static void ctlfedone(struct cam_periph *periph,
static void ctlfe_onoffline(void *arg, int online);
static void ctlfe_online(void *arg);
static void ctlfe_offline(void *arg);
-static int ctlfe_targ_enable(void *arg, struct ctl_id targ_id);
-static int ctlfe_targ_disable(void *arg, struct ctl_id targ_id);
static int ctlfe_lun_enable(void *arg, struct ctl_id targ_id,
int lun_id);
static int ctlfe_lun_disable(void *arg, struct ctl_id targ_id,
@@ -225,26 +223,18 @@ static void ctlfe_dump(void);
static struct periph_driver ctlfe_driver =
{
- ctlfeinit, "ctl",
+ ctlfeperiphinit, "ctl",
TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0
};
-static int ctlfe_module_event_handler(module_t, int /*modeventtype_t*/, void *);
-
-/*
- * We're not using PERIPHDRIVER_DECLARE(), because it runs at SI_SUB_DRIVERS,
- * and that happens before CTL gets initialised.
- */
-static moduledata_t ctlfe_moduledata = {
- "ctlfe",
- ctlfe_module_event_handler,
- NULL
+static struct ctl_frontend ctlfe_frontend =
+{
+ .name = "camtarget",
+ .init = ctlfeinitialize,
+ .fe_dump = ctlfe_dump,
+ .shutdown = ctlfeshutdown,
};
-
-DECLARE_MODULE(ctlfe, ctlfe_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
-MODULE_VERSION(ctlfe, 1);
-MODULE_DEPEND(ctlfe, ctl, 1, 1, 1);
-MODULE_DEPEND(ctlfe, cam, 1, 1, 1);
+CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend);
extern struct ctl_softc *control_softc;
@@ -254,41 +244,29 @@ ctlfeshutdown(void)
return;
}
-void
-ctlfeinit(void)
+int
+ctlfeinitialize(void)
{
- cam_status status;
STAILQ_INIT(&ctlfe_softc_list);
-
mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF);
+ periphdriver_register(&ctlfe_driver);
+ return (0);
+}
- KASSERT(control_softc != NULL, ("CTL is not initialized!"));
+void
+ctlfeperiphinit(void)
+{
+ cam_status status;
status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED |
AC_CONTRACT, ctlfeasync, NULL, NULL);
-
if (status != CAM_REQ_CMP) {
printf("ctl: Failed to attach async callback due to CAM "
"status 0x%x!\n", status);
}
}
-static int
-ctlfe_module_event_handler(module_t mod, int what, void *arg)
-{
-
- switch (what) {
- case MOD_LOAD:
- periphdriver_register(&ctlfe_driver);
- return (0);
- case MOD_UNLOAD:
- return (EBUSY);
- default:
- return (EOPNOTSUPP);
- }
-}
-
static void
ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
{
@@ -304,7 +282,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
*/
switch (code) {
case AC_PATH_REGISTERED: {
- struct ctl_frontend *fe;
+ struct ctl_port *port;
struct ctlfe_softc *bus_softc;
struct ccb_pathinq *cpi;
int retval;
@@ -386,56 +364,56 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
MTX_DEF);
STAILQ_INIT(&bus_softc->lun_softc_list);
- fe = &bus_softc->fe;
+ port = &bus_softc->port;
+ port->frontend = &ctlfe_frontend;
/*
* XXX KDM should we be more accurate here ?
*/
if (cpi->transport == XPORT_FC)
- fe->port_type = CTL_PORT_FC;
+ port->port_type = CTL_PORT_FC;
+ else if (cpi->transport == XPORT_SAS)
+ port->port_type = CTL_PORT_SAS;
else
- fe->port_type = CTL_PORT_SCSI;
+ port->port_type = CTL_PORT_SCSI;
/* XXX KDM what should the real number be here? */
- fe->num_requested_ctl_io = 4096;
+ port->num_requested_ctl_io = 4096;
snprintf(bus_softc->port_name, sizeof(bus_softc->port_name),
"%s%d", cpi->dev_name, cpi->unit_number);
/*
* XXX KDM it would be nice to allocate storage in the
* frontend structure itself.
*/
- fe->port_name = bus_softc->port_name;
- fe->physical_port = cpi->unit_number;
- fe->virtual_port = cpi->bus_id;
- fe->port_online = ctlfe_online;
- fe->port_offline = ctlfe_offline;
- fe->onoff_arg = bus_softc;
- fe->targ_enable = ctlfe_targ_enable;
- fe->targ_disable = ctlfe_targ_disable;
- fe->lun_enable = ctlfe_lun_enable;
- fe->lun_disable = ctlfe_lun_disable;
- fe->targ_lun_arg = bus_softc;
- fe->fe_datamove = ctlfe_datamove_done;
- fe->fe_done = ctlfe_datamove_done;
- fe->fe_dump = ctlfe_dump;
+ port->port_name = bus_softc->port_name;
+ port->physical_port = cpi->unit_number;
+ port->virtual_port = cpi->bus_id;
+ port->port_online = ctlfe_online;
+ port->port_offline = ctlfe_offline;
+ port->onoff_arg = bus_softc;
+ port->lun_enable = ctlfe_lun_enable;
+ port->lun_disable = ctlfe_lun_disable;
+ port->targ_lun_arg = bus_softc;
+ port->fe_datamove = ctlfe_datamove_done;
+ port->fe_done = ctlfe_datamove_done;
/*
* XXX KDM the path inquiry doesn't give us the maximum
* number of targets supported.
*/
- fe->max_targets = cpi->max_target;
- fe->max_target_id = cpi->max_target;
+ port->max_targets = cpi->max_target;
+ port->max_target_id = cpi->max_target;
/*
* XXX KDM need to figure out whether we're the master or
* slave.
*/
#ifdef CTLFEDEBUG
- printf("%s: calling ctl_frontend_register() for %s%d\n",
+ printf("%s: calling ctl_port_register() for %s%d\n",
__func__, cpi->dev_name, cpi->unit_number);
#endif
- retval = ctl_frontend_register(fe, /*master_SC*/ 1);
+ retval = ctl_port_register(port, /*master_SC*/ 1);
if (retval != 0) {
- printf("%s: ctl_frontend_register() failed with "
+ printf("%s: ctl_port_register() failed with "
"error %d!\n", __func__, retval);
mtx_destroy(&bus_softc->lun_softc_mtx);
free(bus_softc, M_CTLFE);
@@ -466,7 +444,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
* XXX KDM are we certain at this point that there
* are no outstanding commands for this frontend?
*/
- ctl_frontend_deregister(&softc->fe);
+ ctl_port_deregister(&softc->port);
mtx_destroy(&softc->lun_softc_mtx);
free(softc, M_CTLFE);
}
@@ -508,18 +486,18 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
break;
}
if (dev_chg->arrived != 0) {
- retval = ctl_add_initiator(dev_chg->wwpn,
- softc->fe.targ_port, dev_chg->target);
+ retval = ctl_add_initiator(&softc->port,
+ dev_chg->target, dev_chg->wwpn, NULL);
} else {
- retval = ctl_remove_initiator(
- softc->fe.targ_port, dev_chg->target);
+ retval = ctl_remove_initiator(&softc->port,
+ dev_chg->target);
}
- if (retval != 0) {
+ if (retval < 0) {
printf("%s: could not %s port %d iid %u "
"WWPN %#jx!\n", __func__,
(dev_chg->arrived != 0) ? "add" :
- "remove", softc->fe.targ_port,
+ "remove", softc->port.targ_port,
dev_chg->target,
(uintmax_t)dev_chg->wwpn);
}
@@ -826,8 +804,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
if (io == NULL) {
scsi_status = SCSI_STATUS_BUSY;
csio->sense_len = 0;
- } else if ((io->io_hdr.status & CTL_STATUS_MASK) ==
- CTL_CMD_ABORTED) {
+ } else if ((io->io_hdr.flags & CTL_FLAG_ABORT) &&
+ (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) {
io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED;
/*
@@ -1206,7 +1184,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
* Allocate a ctl_io, pass it to CTL, and wait for the
* datamove or done.
*/
- io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(bus_softc->port.ctl_pool_ref);
if (io == NULL) {
atio->ccb_h.flags &= ~CAM_DIR_MASK;
atio->ccb_h.flags |= CAM_DIR_NONE;
@@ -1239,7 +1217,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
*/
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.nexus.initid.id = atio->init_id;
- io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
io->scsiio.tag_num = atio->tag_id;
@@ -1511,7 +1489,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
"seq %#x\n", __func__, inot->ccb_h.status,
inot->tag_id, inot->seq_id);
- io = ctl_alloc_io(bus_softc->fe.ctl_pool_ref);
+ io = ctl_alloc_io(bus_softc->port.ctl_pool_ref);
if (io != NULL) {
int send_ctl_io;
@@ -1522,7 +1500,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
inot->ccb_h.io_ptr = io;
io->io_hdr.nexus.initid.id = inot->initiator_id;
- io->io_hdr.nexus.targ_port = bus_softc->fe.targ_port;
+ io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
/* XXX KDM should this be the tag_id? */
@@ -1734,7 +1712,7 @@ ctlfe_onoffline(void *arg, int online)
* This should be replaced later with ddb_GetWWNN,
* or possibly a more centralized scheme. (It
* would be nice to have the WWNN/WWPN for each
- * port stored in the ctl_frontend structure.)
+ * port stored in the ctl_port structure.)
*/
#ifdef RANDOM_WWNN
ccb->knob.xport_specific.fc.wwnn =
@@ -1747,7 +1725,7 @@ ctlfe_onoffline(void *arg, int online)
0x0000000fffffff00ULL) |
/* Company ID */ 0x5000ED5000000000ULL |
/* NL-Port */ 0x3000 |
- /* Port Num */ (bus_softc->fe.targ_port & 0xff);
+ /* Port Num */ (bus_softc->port.targ_port & 0xff);
/*
* This is a bit of an API break/reversal, but if
@@ -1756,10 +1734,9 @@ ctlfe_onoffline(void *arg, int online)
* using with the frontend code so it's reported
* accurately.
*/
- bus_softc->fe.wwnn =
- ccb->knob.xport_specific.fc.wwnn;
- bus_softc->fe.wwpn =
- ccb->knob.xport_specific.fc.wwpn;
+ ctl_port_set_wwns(&bus_softc->port,
+ true, ccb->knob.xport_specific.fc.wwnn,
+ true, ccb->knob.xport_specific.fc.wwpn);
set_wwnn = 1;
#else /* RANDOM_WWNN */
/*
@@ -1767,18 +1744,17 @@ ctlfe_onoffline(void *arg, int online)
* down to the SIM. Otherwise, record what the SIM
* has reported.
*/
- if ((bus_softc->fe.wwnn != 0)
- && (bus_softc->fe.wwpn != 0)) {
+ if ((bus_softc->port.wwnn != 0)
+ && (bus_softc->port.wwpn != 0)) {
ccb->knob.xport_specific.fc.wwnn =
- bus_softc->fe.wwnn;
+ bus_softc->port.wwnn;
ccb->knob.xport_specific.fc.wwpn =
- bus_softc->fe.wwpn;
+ bus_softc->port.wwpn;
set_wwnn = 1;
} else {
- bus_softc->fe.wwnn =
- ccb->knob.xport_specific.fc.wwnn;
- bus_softc->fe.wwpn =
- ccb->knob.xport_specific.fc.wwpn;
+ ctl_port_set_wwns(&bus_softc->port,
+ true, ccb->knob.xport_specific.fc.wwnn,
+ true, ccb->knob.xport_specific.fc.wwpn);
}
#endif /* RANDOM_WWNN */
@@ -1927,18 +1903,6 @@ ctlfe_offline(void *arg)
xpt_free_path(path);
}
-static int
-ctlfe_targ_enable(void *arg, struct ctl_id targ_id)
-{
- return (0);
-}
-
-static int
-ctlfe_targ_disable(void *arg, struct ctl_id targ_id)
-{
- return (0);
-}
-
/*
* This will get called to enable a LUN on every bus that is attached to
* CTL. So we only need to create a path/periph for this particular bus.
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index ff9dc1711c08d..8351aa80f37cb 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -44,11 +44,13 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
+#include <sys/ctype.h>
#else
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <ctype.h>
#endif
#include <cam/cam.h>
@@ -471,7 +473,8 @@ static struct op_table_entry scsi_op_codes[] = {
*/
/* 88 MM O O O READ(16) */
{ 0x88, D | T | W | O | B, "READ(16)" },
- /* 89 */
+ /* 89 O COMPARE AND WRITE*/
+ { 0x89, D, "COMPARE AND WRITE" },
/* 8A OM O O O WRITE(16) */
{ 0x8A, D | T | W | O | B, "WRITE(16)" },
/* 8B O ORWRITE */
@@ -5458,6 +5461,989 @@ scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len,
return (NULL);
}
+int
+scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr,
+ uint32_t valid_len)
+{
+ switch (hdr->format_protocol & SCSI_TRN_PROTO_MASK) {
+ case SCSI_PROTO_FC: {
+ struct scsi_transportid_fcp *fcp;
+ uint64_t n_port_name;
+
+ fcp = (struct scsi_transportid_fcp *)hdr;
+
+ n_port_name = scsi_8btou64(fcp->n_port_name);
+
+ sbuf_printf(sb, "FCP address: 0x%.16jx",(uintmax_t)n_port_name);
+ break;
+ }
+ case SCSI_PROTO_SPI: {
+ struct scsi_transportid_spi *spi;
+
+ spi = (struct scsi_transportid_spi *)hdr;
+
+ sbuf_printf(sb, "SPI address: %u,%u",
+ scsi_2btoul(spi->scsi_addr),
+ scsi_2btoul(spi->rel_trgt_port_id));
+ break;
+ }
+ case SCSI_PROTO_SSA:
+ /*
+ * XXX KDM there is no transport ID defined in SPC-4 for
+ * SSA.
+ */
+ break;
+ case SCSI_PROTO_1394: {
+ struct scsi_transportid_1394 *sbp;
+ uint64_t eui64;
+
+ sbp = (struct scsi_transportid_1394 *)hdr;
+
+ eui64 = scsi_8btou64(sbp->eui64);
+ sbuf_printf(sb, "SBP address: 0x%.16jx", (uintmax_t)eui64);
+ break;
+ }
+ case SCSI_PROTO_RDMA: {
+ struct scsi_transportid_rdma *rdma;
+ unsigned int i;
+
+ rdma = (struct scsi_transportid_rdma *)hdr;
+
+ sbuf_printf(sb, "RDMA address: 0x");
+ for (i = 0; i < sizeof(rdma->initiator_port_id); i++)
+ sbuf_printf(sb, "%02x", rdma->initiator_port_id[i]);
+ break;
+ }
+ case SCSI_PROTO_ISCSI: {
+ uint32_t add_len, i;
+ uint8_t *iscsi_name = NULL;
+ int nul_found = 0;
+
+ sbuf_printf(sb, "iSCSI address: ");
+ if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) ==
+ SCSI_TRN_ISCSI_FORMAT_DEVICE) {
+ struct scsi_transportid_iscsi_device *dev;
+
+ dev = (struct scsi_transportid_iscsi_device *)hdr;
+
+ /*
+ * Verify how much additional data we really have.
+ */
+ add_len = scsi_2btoul(dev->additional_length);
+ add_len = MIN(add_len, valid_len -
+ __offsetof(struct scsi_transportid_iscsi_device,
+ iscsi_name));
+ iscsi_name = &dev->iscsi_name[0];
+
+ } else if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) ==
+ SCSI_TRN_ISCSI_FORMAT_PORT) {
+ struct scsi_transportid_iscsi_port *port;
+
+ port = (struct scsi_transportid_iscsi_port *)hdr;
+
+ add_len = scsi_2btoul(port->additional_length);
+ add_len = MIN(add_len, valid_len -
+ __offsetof(struct scsi_transportid_iscsi_port,
+ iscsi_name));
+ iscsi_name = &port->iscsi_name[0];
+ } else {
+ sbuf_printf(sb, "unknown format %x",
+ (hdr->format_protocol &
+ SCSI_TRN_FORMAT_MASK) >>
+ SCSI_TRN_FORMAT_SHIFT);
+ break;
+ }
+ if (add_len == 0) {
+ sbuf_printf(sb, "not enough data");
+ break;
+ }
+ /*
+ * This is supposed to be a NUL-terminated ASCII
+ * string, but you never know. So we're going to
+ * check. We need to do this because there is no
+ * sbuf equivalent of strncat().
+ */
+ for (i = 0; i < add_len; i++) {
+ if (iscsi_name[i] == '\0') {
+ nul_found = 1;
+ break;
+ }
+ }
+ /*
+ * If there is a NUL in the name, we can just use
+ * sbuf_cat(). Otherwise we need to use sbuf_bcat().
+ */
+ if (nul_found != 0)
+ sbuf_cat(sb, iscsi_name);
+ else
+ sbuf_bcat(sb, iscsi_name, add_len);
+ break;
+ }
+ case SCSI_PROTO_SAS: {
+ struct scsi_transportid_sas *sas;
+ uint64_t sas_addr;
+
+ sas = (struct scsi_transportid_sas *)hdr;
+
+ sas_addr = scsi_8btou64(sas->sas_address);
+ sbuf_printf(sb, "SAS address: 0x%.16jx", (uintmax_t)sas_addr);
+ break;
+ }
+ case SCSI_PROTO_ADITP:
+ case SCSI_PROTO_ATA:
+ case SCSI_PROTO_UAS:
+ /*
+ * No Transport ID format for ADI, ATA or USB is defined in
+ * SPC-4.
+ */
+ sbuf_printf(sb, "No known Transport ID format for protocol "
+ "%#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK);
+ break;
+ case SCSI_PROTO_SOP: {
+ struct scsi_transportid_sop *sop;
+ struct scsi_sop_routing_id_norm *rid;
+
+ sop = (struct scsi_transportid_sop *)hdr;
+ rid = (struct scsi_sop_routing_id_norm *)sop->routing_id;
+
+ /*
+ * Note that there is no alternate format specified in SPC-4
+ * for the PCIe routing ID, so we don't really have a way
+ * to know whether the second byte of the routing ID is
+ * a device and function or just a function. So we just
+ * assume bus,device,function.
+ */
+ sbuf_printf(sb, "SOP Routing ID: %u,%u,%u",
+ rid->bus, rid->devfunc >> SCSI_TRN_SOP_DEV_SHIFT,
+ rid->devfunc & SCSI_TRN_SOP_FUNC_NORM_MAX);
+ break;
+ }
+ case SCSI_PROTO_NONE:
+ default:
+ sbuf_printf(sb, "Unknown protocol %#x",
+ hdr->format_protocol & SCSI_TRN_PROTO_MASK);
+ break;
+ }
+
+ return (0);
+}
+
+struct scsi_nv scsi_proto_map[] = {
+ { "fcp", SCSI_PROTO_FC },
+ { "spi", SCSI_PROTO_SPI },
+ { "ssa", SCSI_PROTO_SSA },
+ { "sbp", SCSI_PROTO_1394 },
+ { "1394", SCSI_PROTO_1394 },
+ { "srp", SCSI_PROTO_RDMA },
+ { "rdma", SCSI_PROTO_RDMA },
+ { "iscsi", SCSI_PROTO_ISCSI },
+ { "iqn", SCSI_PROTO_ISCSI },
+ { "sas", SCSI_PROTO_SAS },
+ { "aditp", SCSI_PROTO_ADITP },
+ { "ata", SCSI_PROTO_ATA },
+ { "uas", SCSI_PROTO_UAS },
+ { "usb", SCSI_PROTO_UAS },
+ { "sop", SCSI_PROTO_SOP }
+};
+
+const char *
+scsi_nv_to_str(struct scsi_nv *table, int num_table_entries, uint64_t value)
+{
+ int i;
+
+ for (i = 0; i < num_table_entries; i++) {
+ if (table[i].value == value)
+ return (table[i].name);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Given a name/value table, find a value matching the given name.
+ * Return values:
+ * SCSI_NV_FOUND - match found
+ * SCSI_NV_AMBIGUOUS - more than one match, none of them exact
+ * SCSI_NV_NOT_FOUND - no match found
+ */
+scsi_nv_status
+scsi_get_nv(struct scsi_nv *table, int num_table_entries,
+ char *name, int *table_entry, scsi_nv_flags flags)
+{
+ int i, num_matches = 0;
+
+ for (i = 0; i < num_table_entries; i++) {
+ size_t table_len, name_len;
+
+ table_len = strlen(table[i].name);
+ name_len = strlen(name);
+
+ if ((((flags & SCSI_NV_FLAG_IG_CASE) != 0)
+ && (strncasecmp(table[i].name, name, name_len) == 0))
+ || (((flags & SCSI_NV_FLAG_IG_CASE) == 0)
+ && (strncmp(table[i].name, name, name_len) == 0))) {
+ *table_entry = i;
+
+ /*
+ * Check for an exact match. If we have the same
+ * number of characters in the table as the argument,
+ * and we already know they're the same, we have
+ * an exact match.
+ */
+ if (table_len == name_len)
+ return (SCSI_NV_FOUND);
+
+ /*
+ * Otherwise, bump up the number of matches. We'll
+ * see later how many we have.
+ */
+ num_matches++;
+ }
+ }
+
+ if (num_matches > 1)
+ return (SCSI_NV_AMBIGUOUS);
+ else if (num_matches == 1)
+ return (SCSI_NV_FOUND);
+ else
+ return (SCSI_NV_NOT_FOUND);
+}
+
+/*
+ * Parse transport IDs for Fibre Channel, 1394 and SAS. Since these are
+ * all 64-bit numbers, the code is similar.
+ */
+int
+scsi_parse_transportid_64bit(int proto_id, char *id_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len)
+{
+ uint64_t value;
+ char *endptr;
+ int retval;
+ size_t alloc_size;
+
+ retval = 0;
+
+ value = strtouq(id_str, &endptr, 0);
+ if (*endptr != '\0') {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: error "
+ "parsing ID %s, 64-bit number required",
+ __func__, id_str);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ switch (proto_id) {
+ case SCSI_PROTO_FC:
+ alloc_size = sizeof(struct scsi_transportid_fcp);
+ break;
+ case SCSI_PROTO_1394:
+ alloc_size = sizeof(struct scsi_transportid_1394);
+ break;
+ case SCSI_PROTO_SAS:
+ alloc_size = sizeof(struct scsi_transportid_sas);
+ break;
+ default:
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: unsupoprted "
+ "protocol %d", __func__, proto_id);
+ }
+ retval = 1;
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+#ifdef _KERNEL
+ *hdr = malloc(alloc_size, type, flags);
+#else /* _KERNEL */
+ *hdr = malloc(alloc_size);
+#endif /*_KERNEL */
+ if (*hdr == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: unable to "
+ "allocate %zu bytes", __func__, alloc_size);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ *alloc_len = alloc_size;
+
+ bzero(*hdr, alloc_size);
+
+ switch (proto_id) {
+ case SCSI_PROTO_FC: {
+ struct scsi_transportid_fcp *fcp;
+
+ fcp = (struct scsi_transportid_fcp *)(*hdr);
+ fcp->format_protocol = SCSI_PROTO_FC |
+ SCSI_TRN_FCP_FORMAT_DEFAULT;
+ scsi_u64to8b(value, fcp->n_port_name);
+ break;
+ }
+ case SCSI_PROTO_1394: {
+ struct scsi_transportid_1394 *sbp;
+
+ sbp = (struct scsi_transportid_1394 *)(*hdr);
+ sbp->format_protocol = SCSI_PROTO_1394 |
+ SCSI_TRN_1394_FORMAT_DEFAULT;
+ scsi_u64to8b(value, sbp->eui64);
+ break;
+ }
+ case SCSI_PROTO_SAS: {
+ struct scsi_transportid_sas *sas;
+
+ sas = (struct scsi_transportid_sas *)(*hdr);
+ sas->format_protocol = SCSI_PROTO_SAS |
+ SCSI_TRN_SAS_FORMAT_DEFAULT;
+ scsi_u64to8b(value, sas->sas_address);
+ break;
+ }
+ default:
+ break;
+ }
+bailout:
+ return (retval);
+}
+
+/*
+ * Parse a SPI (Parallel SCSI) address of the form: id,rel_tgt_port
+ */
+int
+scsi_parse_transportid_spi(char *id_str, struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len)
+{
+ unsigned long scsi_addr, target_port;
+ struct scsi_transportid_spi *spi;
+ char *tmpstr, *endptr;
+ int retval;
+
+ retval = 0;
+
+ tmpstr = strsep(&id_str, ",");
+ if (tmpstr == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len,
+ "%s: no ID found", __func__);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ scsi_addr = strtoul(tmpstr, &endptr, 0);
+ if (*endptr != '\0') {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: error "
+ "parsing SCSI ID %s, number required",
+ __func__, tmpstr);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ if (id_str == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: no relative "
+ "target port found", __func__);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ target_port = strtoul(id_str, &endptr, 0);
+ if (*endptr != '\0') {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: error "
+ "parsing relative target port %s, number "
+ "required", __func__, id_str);
+ }
+ retval = 1;
+ goto bailout;
+ }
+#ifdef _KERNEL
+ spi = malloc(sizeof(*spi), type, flags);
+#else
+ spi = malloc(sizeof(*spi));
+#endif
+ if (spi == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: unable to "
+ "allocate %zu bytes", __func__,
+ sizeof(*spi));
+ }
+ retval = 1;
+ goto bailout;
+ }
+ *alloc_len = sizeof(*spi);
+ bzero(spi, sizeof(*spi));
+
+ spi->format_protocol = SCSI_PROTO_SPI | SCSI_TRN_SPI_FORMAT_DEFAULT;
+ scsi_ulto2b(scsi_addr, spi->scsi_addr);
+ scsi_ulto2b(target_port, spi->rel_trgt_port_id);
+
+ *hdr = (struct scsi_transportid_header *)spi;
+bailout:
+ return (retval);
+}
+
+/*
+ * Parse an RDMA/SRP Initiator Port ID string. This is 32 hexadecimal digits,
+ * optionally prefixed by "0x" or "0X".
+ */
+int
+scsi_parse_transportid_rdma(char *id_str, struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len)
+{
+ struct scsi_transportid_rdma *rdma;
+ int retval;
+ size_t id_len, rdma_id_size;
+ uint8_t rdma_id[SCSI_TRN_RDMA_PORT_LEN];
+ char *tmpstr;
+ unsigned int i, j;
+
+ retval = 0;
+ id_len = strlen(id_str);
+ rdma_id_size = SCSI_TRN_RDMA_PORT_LEN;
+
+ /*
+ * Check the size. It needs to be either 32 or 34 characters long.
+ */
+ if ((id_len != (rdma_id_size * 2))
+ && (id_len != ((rdma_id_size * 2) + 2))) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: RDMA ID "
+ "must be 32 hex digits (0x prefix "
+ "optional), only %zu seen", __func__, id_len);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ tmpstr = id_str;
+ /*
+ * If the user gave us 34 characters, the string needs to start
+ * with '0x'.
+ */
+ if (id_len == ((rdma_id_size * 2) + 2)) {
+ if ((tmpstr[0] == '0')
+ && ((tmpstr[1] == 'x') || (tmpstr[1] == 'X'))) {
+ tmpstr += 2;
+ } else {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: RDMA "
+ "ID prefix, if used, must be \"0x\", "
+ "got %s", __func__, tmpstr);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ }
+ bzero(rdma_id, sizeof(rdma_id));
+
+ /*
+ * Convert ASCII hex into binary bytes. There is no standard
+ * 128-bit integer type, and so no strtou128t() routine to convert
+ * from hex into a large integer. In the end, we're not going to
+ * an integer, but rather to a byte array, so that and the fact
+ * that we require the user to give us 32 hex digits simplifies the
+ * logic.
+ */
+ for (i = 0; i < (rdma_id_size * 2); i++) {
+ int cur_shift;
+ unsigned char c;
+
+ /* Increment the byte array one for every 2 hex digits */
+ j = i >> 1;
+
+ /*
+ * The first digit in every pair is the most significant
+ * 4 bits. The second is the least significant 4 bits.
+ */
+ if ((i % 2) == 0)
+ cur_shift = 4;
+ else
+ cur_shift = 0;
+
+ c = tmpstr[i];
+ /* Convert the ASCII hex character into a number */
+ if (isdigit(c))
+ c -= '0';
+ else if (isalpha(c))
+ c -= isupper(c) ? 'A' - 10 : 'a' - 10;
+ else {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: "
+ "RDMA ID must be hex digits, got "
+ "invalid character %c", __func__,
+ tmpstr[i]);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ /*
+ * The converted number can't be less than 0; the type is
+ * unsigned, and the subtraction logic will not give us
+ * a negative number. So we only need to make sure that
+ * the value is not greater than 0xf. (i.e. make sure the
+ * user didn't give us a value like "0x12jklmno").
+ */
+ if (c > 0xf) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: "
+ "RDMA ID must be hex digits, got "
+ "invalid character %c", __func__,
+ tmpstr[i]);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ rdma_id[j] |= c << cur_shift;
+ }
+
+#ifdef _KERNEL
+ rdma = malloc(sizeof(*rdma), type, flags);
+#else
+ rdma = malloc(sizeof(*rdma));
+#endif
+ if (rdma == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: unable to "
+ "allocate %zu bytes", __func__,
+ sizeof(*rdma));
+ }
+ retval = 1;
+ goto bailout;
+ }
+ *alloc_len = sizeof(*rdma);
+ bzero(rdma, sizeof(rdma));
+
+ rdma->format_protocol = SCSI_PROTO_RDMA | SCSI_TRN_RDMA_FORMAT_DEFAULT;
+ bcopy(rdma_id, rdma->initiator_port_id, SCSI_TRN_RDMA_PORT_LEN);
+
+ *hdr = (struct scsi_transportid_header *)rdma;
+
+bailout:
+ return (retval);
+}
+
+/*
+ * Parse an iSCSI name. The format is either just the name:
+ *
+ * iqn.2012-06.com.example:target0
+ * or the name, separator and initiator session ID:
+ *
+ * iqn.2012-06.com.example:target0,i,0x123
+ *
+ * The separator format is exact.
+ */
+int
+scsi_parse_transportid_iscsi(char *id_str, struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len)
+{
+ size_t id_len, sep_len, id_size, name_len;
+ int is_full_id, retval;
+ unsigned int i, sep_pos, sep_found;
+ const char *sep_template = ",i,0x";
+ const char *iqn_prefix = "iqn.";
+ struct scsi_transportid_iscsi_device *iscsi;
+
+ is_full_id = 0;
+ retval = 0;
+ sep_found = 0;
+
+ id_len = strlen(id_str);
+ sep_len = strlen(sep_template);
+
+ /*
+ * The separator is defined as exactly ',i,0x'. Any other commas,
+ * or any other form, is an error. So look for a comma, and once
+ * we find that, the next few characters must match the separator
+ * exactly. Once we get through the separator, there should be at
+ * least one character.
+ */
+ for (i = 0, sep_pos = 0; i < id_len; i++) {
+ if (sep_pos == 0) {
+ if (id_str[i] == sep_template[sep_pos])
+ sep_pos++;
+
+ continue;
+ }
+ if (sep_pos < sep_len) {
+ if (id_str[i] == sep_template[sep_pos]) {
+ sep_pos++;
+ continue;
+ }
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: "
+ "invalid separator in iSCSI name "
+ "\"%s\"",
+ __func__, id_str);
+ }
+ retval = 1;
+ goto bailout;
+ } else {
+ sep_found = 1;
+ break;
+ }
+ }
+
+ /*
+ * Check to see whether we have a separator but no digits after it.
+ */
+ if ((sep_pos != 0)
+ && (sep_found == 0)) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: no digits "
+ "found after separator in iSCSI name \"%s\"",
+ __func__, id_str);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ /*
+ * The incoming ID string has the "iqn." prefix stripped off. We
+ * need enough space for the base structure (the structures are the
+ * same for the two iSCSI forms), the prefix, the ID string and a
+ * terminating NUL.
+ */
+ id_size = sizeof(*iscsi) + strlen(iqn_prefix) + id_len + 1;
+
+#ifdef _KERNEL
+ iscsi = malloc(id_size, type, flags);
+#else
+ iscsi = malloc(id_size);
+#endif
+ if (iscsi == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: unable to "
+ "allocate %zu bytes", __func__, id_size);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ *alloc_len = id_size;
+ bzero(iscsi, id_size);
+
+ iscsi->format_protocol = SCSI_PROTO_ISCSI;
+ if (sep_found == 0)
+ iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_DEVICE;
+ else
+ iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_PORT;
+ name_len = id_size - sizeof(*iscsi);
+ scsi_ulto2b(name_len, iscsi->additional_length);
+ snprintf(iscsi->iscsi_name, name_len, "%s%s", iqn_prefix, id_str);
+
+ *hdr = (struct scsi_transportid_header *)iscsi;
+
+bailout:
+ return (retval);
+}
+
+/*
+ * Parse a SCSI over PCIe (SOP) identifier. The Routing ID can either be
+ * of the form 'bus,device,function' or 'bus,function'.
+ */
+int
+scsi_parse_transportid_sop(char *id_str, struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len)
+{
+ struct scsi_transportid_sop *sop;
+ unsigned long bus, device, function;
+ char *tmpstr, *endptr;
+ int retval, device_spec;
+
+ retval = 0;
+ device_spec = 0;
+ device = 0;
+
+ tmpstr = strsep(&id_str, ",");
+ if ((tmpstr == NULL)
+ || (*tmpstr == '\0')) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: no ID found",
+ __func__);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ bus = strtoul(tmpstr, &endptr, 0);
+ if (*endptr != '\0') {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: error "
+ "parsing PCIe bus %s, number required",
+ __func__, tmpstr);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ if ((id_str == NULL)
+ || (*id_str == '\0')) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: no PCIe "
+ "device or function found", __func__);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ tmpstr = strsep(&id_str, ",");
+ function = strtoul(tmpstr, &endptr, 0);
+ if (*endptr != '\0') {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: error "
+ "parsing PCIe device/function %s, number "
+ "required", __func__, tmpstr);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ /*
+ * Check to see whether the user specified a third value. If so,
+ * the second is the device.
+ */
+ if (id_str != NULL) {
+ if (*id_str == '\0') {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: "
+ "no PCIe function found", __func__);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ device = function;
+ device_spec = 1;
+ function = strtoul(id_str, &endptr, 0);
+ if (*endptr != '\0') {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: "
+ "error parsing PCIe function %s, "
+ "number required", __func__, id_str);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ }
+ if (bus > SCSI_TRN_SOP_BUS_MAX) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: bus value "
+ "%lu greater than maximum %u", __func__,
+ bus, SCSI_TRN_SOP_BUS_MAX);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ if ((device_spec != 0)
+ && (device > SCSI_TRN_SOP_DEV_MASK)) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: device value "
+ "%lu greater than maximum %u", __func__,
+ device, SCSI_TRN_SOP_DEV_MAX);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ if (((device_spec != 0)
+ && (function > SCSI_TRN_SOP_FUNC_NORM_MAX))
+ || ((device_spec == 0)
+ && (function > SCSI_TRN_SOP_FUNC_ALT_MAX))) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: function value "
+ "%lu greater than maximum %u", __func__,
+ function, (device_spec == 0) ?
+ SCSI_TRN_SOP_FUNC_ALT_MAX :
+ SCSI_TRN_SOP_FUNC_NORM_MAX);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+#ifdef _KERNEL
+ sop = malloc(sizeof(*sop), type, flags);
+#else
+ sop = malloc(sizeof(*sop));
+#endif
+ if (sop == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: unable to "
+ "allocate %zu bytes", __func__, sizeof(*sop));
+ }
+ retval = 1;
+ goto bailout;
+ }
+ *alloc_len = sizeof(*sop);
+ bzero(sop, sizeof(*sop));
+ sop->format_protocol = SCSI_PROTO_SOP | SCSI_TRN_SOP_FORMAT_DEFAULT;
+ if (device_spec != 0) {
+ struct scsi_sop_routing_id_norm rid;
+
+ rid.bus = bus;
+ rid.devfunc = (device << SCSI_TRN_SOP_DEV_SHIFT) | function;
+ bcopy(&rid, sop->routing_id, MIN(sizeof(rid),
+ sizeof(sop->routing_id)));
+ } else {
+ struct scsi_sop_routing_id_alt rid;
+
+ rid.bus = bus;
+ rid.function = function;
+ bcopy(&rid, sop->routing_id, MIN(sizeof(rid),
+ sizeof(sop->routing_id)));
+ }
+
+ *hdr = (struct scsi_transportid_header *)sop;
+bailout:
+ return (retval);
+}
+
+/*
+ * transportid_str: NUL-terminated string with format: protcol,id
+ * The ID is protocol specific.
+ * hdr: Storage will be allocated for the transport ID.
+ * alloc_len: The amount of memory allocated is returned here.
+ * type: Malloc bucket (kernel only).
+ * flags: Malloc flags (kernel only).
+ * error_str: If non-NULL, it will contain error information (without
+ * a terminating newline) if an error is returned.
+ * error_str_len: Allocated length of the error string.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+scsi_parse_transportid(char *transportid_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len)
+{
+ char *tmpstr;
+ scsi_nv_status status;
+ int retval, num_proto_entries, table_entry;
+
+ retval = 0;
+ table_entry = 0;
+
+ /*
+ * We do allow a period as well as a comma to separate the protocol
+ * from the ID string. This is to accommodate iSCSI names, which
+ * start with "iqn.".
+ */
+ tmpstr = strsep(&transportid_str, ",.");
+ if (tmpstr == NULL) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len,
+ "%s: transportid_str is NULL", __func__);
+ }
+ retval = 1;
+ goto bailout;
+ }
+
+ num_proto_entries = sizeof(scsi_proto_map) /
+ sizeof(scsi_proto_map[0]);
+ status = scsi_get_nv(scsi_proto_map, num_proto_entries, tmpstr,
+ &table_entry, SCSI_NV_FLAG_IG_CASE);
+ if (status != SCSI_NV_FOUND) {
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: %s protocol "
+ "name %s", __func__,
+ (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" :
+ "invalid", tmpstr);
+ }
+ retval = 1;
+ goto bailout;
+ }
+ switch (scsi_proto_map[table_entry].value) {
+ case SCSI_PROTO_FC:
+ case SCSI_PROTO_1394:
+ case SCSI_PROTO_SAS:
+ retval = scsi_parse_transportid_64bit(
+ scsi_proto_map[table_entry].value, transportid_str, hdr,
+ alloc_len,
+#ifdef _KERNEL
+ type, flags,
+#endif
+ error_str, error_str_len);
+ break;
+ case SCSI_PROTO_SPI:
+ retval = scsi_parse_transportid_spi(transportid_str, hdr,
+ alloc_len,
+#ifdef _KERNEL
+ type, flags,
+#endif
+ error_str, error_str_len);
+ break;
+ case SCSI_PROTO_RDMA:
+ retval = scsi_parse_transportid_rdma(transportid_str, hdr,
+ alloc_len,
+#ifdef _KERNEL
+ type, flags,
+#endif
+ error_str, error_str_len);
+ break;
+ case SCSI_PROTO_ISCSI:
+ retval = scsi_parse_transportid_iscsi(transportid_str, hdr,
+ alloc_len,
+#ifdef _KERNEL
+ type, flags,
+#endif
+ error_str, error_str_len);
+ break;
+ case SCSI_PROTO_SOP:
+ retval = scsi_parse_transportid_sop(transportid_str, hdr,
+ alloc_len,
+#ifdef _KERNEL
+ type, flags,
+#endif
+ error_str, error_str_len);
+ break;
+ case SCSI_PROTO_SSA:
+ case SCSI_PROTO_ADITP:
+ case SCSI_PROTO_ATA:
+ case SCSI_PROTO_UAS:
+ case SCSI_PROTO_NONE:
+ default:
+ /*
+ * There is no format defined for a Transport ID for these
+ * protocols. So even if the user gives us something, we
+ * have no way to turn it into a standard SCSI Transport ID.
+ */
+ retval = 1;
+ if (error_str != NULL) {
+ snprintf(error_str, error_str_len, "%s: no Transport "
+ "ID format exists for protocol %s",
+ __func__, tmpstr);
+ }
+ goto bailout;
+ break; /* NOTREACHED */
+ }
+bailout:
+ return (retval);
+}
+
void
scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
@@ -6409,6 +7395,63 @@ scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
}
+void
+scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int service_action,
+ uint8_t *data_ptr, uint32_t dxfer_len, int sense_len,
+ int timeout)
+{
+ struct scsi_per_res_in *scsi_cmd;
+
+ scsi_cmd = (struct scsi_per_res_in *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = PERSISTENT_RES_IN;
+ scsi_cmd->action = service_action;
+ scsi_ulto2b(dxfer_len, scsi_cmd->length);
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
+void
+scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, int service_action,
+ int scope, int res_type, uint8_t *data_ptr,
+ uint32_t dxfer_len, int sense_len, int timeout)
+{
+ struct scsi_per_res_out *scsi_cmd;
+
+ scsi_cmd = (struct scsi_per_res_out *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+
+ scsi_cmd->opcode = PERSISTENT_RES_OUT;
+ scsi_cmd->action = service_action;
+ scsi_cmd->scope_type = scope | res_type;
+ scsi_ulto4b(dxfer_len, scsi_cmd->length);
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_OUT,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+}
+
/*
* Try make as good a match as possible with
* available sub drivers
diff --git a/sys/cam/scsi/scsi_all.h b/sys/cam/scsi/scsi_all.h
index 30b5ccb81cdc9..6a4467e5b2c08 100644
--- a/sys/cam/scsi/scsi_all.h
+++ b/sys/cam/scsi/scsi_all.h
@@ -278,6 +278,7 @@ struct scsi_per_res_in
#define SPRI_RS 0x03
u_int8_t reserved[5];
u_int8_t length[2];
+#define SPRI_MAX_LEN 0xffff
u_int8_t control;
};
@@ -302,13 +303,21 @@ struct scsi_per_res_cap
{
uint8_t length[2];
uint8_t flags1;
-#define SPRI_CRH 0x10
-#define SPRI_SIP_C 0x08
-#define SPRI_ATP_C 0x04
-#define SPRI_PTPL_C 0x01
+#define SPRI_RLR_C 0x80
+#define SPRI_CRH 0x10
+#define SPRI_SIP_C 0x08
+#define SPRI_ATP_C 0x04
+#define SPRI_PTPL_C 0x01
uint8_t flags2;
-#define SPRI_TMV 0x80
-#define SPRI_PTPL_A 0x01
+#define SPRI_TMV 0x80
+#define SPRI_ALLOW_CMD_MASK 0x70
+#define SPRI_ALLOW_CMD_SHIFT 4
+#define SPRI_ALLOW_NA 0x00
+#define SPRI_ALLOW_1 0x10
+#define SPRI_ALLOW_2 0x20
+#define SPRI_ALLOW_3 0x30
+#define SPRI_ALLOW_4 0x40
+#define SPRI_PTPL_A 0x01
uint8_t type_mask[2];
#define SPRI_TM_WR_EX_AR 0x8000
#define SPRI_TM_EX_AC_RO 0x4000
@@ -322,7 +331,7 @@ struct scsi_per_res_cap
struct scsi_per_res_in_rsrv_data
{
uint8_t reservation[8];
- uint8_t obsolete1[4];
+ uint8_t scope_addr[4];
uint8_t reserved;
uint8_t scopetype;
#define SPRT_WE 0x01
@@ -331,7 +340,7 @@ struct scsi_per_res_in_rsrv_data
#define SPRT_EARO 0x06
#define SPRT_WEAR 0x07
#define SPRT_EAAR 0x08
- uint8_t obsolete2[2];
+ uint8_t extent_length[2];
};
struct scsi_per_res_in_rsrv
@@ -340,6 +349,26 @@ struct scsi_per_res_in_rsrv
struct scsi_per_res_in_rsrv_data data;
};
+struct scsi_per_res_in_full_desc
+{
+ struct scsi_per_res_key res_key;
+ uint8_t reserved1[4];
+ uint8_t flags;
+#define SPRI_FULL_ALL_TG_PT 0x02
+#define SPRI_FULL_R_HOLDER 0x01
+ uint8_t scopetype;
+ uint8_t reserved2[4];
+ uint8_t rel_trgt_port_id[2];
+ uint8_t additional_length[4];
+ uint8_t transport_id[];
+};
+
+struct scsi_per_res_in_full
+{
+ struct scsi_per_res_in_header header;
+ struct scsi_per_res_in_full_desc desc[];
+};
+
struct scsi_per_res_out
{
u_int8_t opcode;
@@ -352,13 +381,20 @@ struct scsi_per_res_out
#define SPRO_PRE_ABO 0x05
#define SPRO_REG_IGNO 0x06
#define SPRO_REG_MOVE 0x07
+#define SPRO_REPL_LOST_RES 0x08
#define SPRO_ACTION_MASK 0x1f
u_int8_t scope_type;
#define SPR_SCOPE_MASK 0xf0
+#define SPR_SCOPE_SHIFT 4
#define SPR_LU_SCOPE 0x00
+#define SPR_EXTENT_SCOPE 0x10
+#define SPR_ELEMENT_SCOPE 0x20
#define SPR_TYPE_MASK 0x0f
+#define SPR_TYPE_RD_SHARED 0x00
#define SPR_TYPE_WR_EX 0x01
+#define SPR_TYPE_RD_EX 0x02
#define SPR_TYPE_EX_AC 0x03
+#define SPR_TYPE_SHARED 0x04
#define SPR_TYPE_WR_EX_RO 0x05
#define SPR_TYPE_EX_AC_RO 0x06
#define SPR_TYPE_WR_EX_AR 0x07
@@ -372,15 +408,139 @@ struct scsi_per_res_out_parms
{
struct scsi_per_res_key res_key;
u_int8_t serv_act_res_key[8];
- u_int8_t obsolete1[4];
+ u_int8_t scope_spec_address[4];
u_int8_t flags;
#define SPR_SPEC_I_PT 0x08
#define SPR_ALL_TG_PT 0x04
#define SPR_APTPL 0x01
u_int8_t reserved1;
- u_int8_t obsolete2[2];
+ u_int8_t extent_length[2];
+ u_int8_t transport_id_list[];
+};
+
+struct scsi_per_res_out_trans_ids {
+ u_int8_t additional_length[4];
+ u_int8_t transport_ids[];
+};
+
+/*
+ * Used with REGISTER AND MOVE serivce action of the PERSISTENT RESERVE OUT
+ * command.
+ */
+struct scsi_per_res_reg_move
+{
+ struct scsi_per_res_key res_key;
+ u_int8_t serv_act_res_key[8];
+ u_int8_t reserved;
+ u_int8_t flags;
+#define SPR_REG_MOVE_UNREG 0x02
+#define SPR_REG_MOVE_APTPL 0x01
+ u_int8_t rel_trgt_port_id[2];
+ u_int8_t transport_id_length[4];
+ u_int8_t transport_id[];
+};
+
+struct scsi_transportid_header
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_FORMAT_MASK 0xc0
+#define SCSI_TRN_FORMAT_SHIFT 6
+#define SCSI_TRN_PROTO_MASK 0x0f
};
+struct scsi_transportid_fcp
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_FCP_FORMAT_DEFAULT 0x00
+ uint8_t reserved1[7];
+ uint8_t n_port_name[8];
+ uint8_t reserved2[8];
+};
+
+struct scsi_transportid_spi
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_SPI_FORMAT_DEFAULT 0x00
+ uint8_t reserved1;
+ uint8_t scsi_addr[2];
+ uint8_t obsolete[2];
+ uint8_t rel_trgt_port_id[2];
+ uint8_t reserved2[16];
+};
+
+struct scsi_transportid_1394
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_1394_FORMAT_DEFAULT 0x00
+ uint8_t reserved1[7];
+ uint8_t eui64[8];
+ uint8_t reserved2[8];
+};
+
+struct scsi_transportid_rdma
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_RDMA_FORMAT_DEFAULT 0x00
+ uint8_t reserved[7];
+#define SCSI_TRN_RDMA_PORT_LEN 16
+ uint8_t initiator_port_id[SCSI_TRN_RDMA_PORT_LEN];
+};
+
+struct scsi_transportid_iscsi_device
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_ISCSI_FORMAT_DEVICE 0x00
+ uint8_t reserved;
+ uint8_t additional_length[2];
+ uint8_t iscsi_name[];
+};
+
+struct scsi_transportid_iscsi_port
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_ISCSI_FORMAT_PORT 0x40
+ uint8_t reserved;
+ uint8_t additional_length[2];
+ uint8_t iscsi_name[];
+ /*
+ * Followed by a separator and iSCSI initiator session ID
+ */
+};
+
+struct scsi_transportid_sas
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_SAS_FORMAT_DEFAULT 0x00
+ uint8_t reserved1[3];
+ uint8_t sas_address[8];
+ uint8_t reserved2[12];
+};
+
+struct scsi_sop_routing_id_norm {
+ uint8_t bus;
+ uint8_t devfunc;
+#define SCSI_TRN_SOP_BUS_MAX 0xff
+#define SCSI_TRN_SOP_DEV_MAX 0x1f
+#define SCSI_TRN_SOP_DEV_MASK 0xf8
+#define SCSI_TRN_SOP_DEV_SHIFT 3
+#define SCSI_TRN_SOP_FUNC_NORM_MASK 0x07
+#define SCSI_TRN_SOP_FUNC_NORM_MAX 0x07
+};
+
+struct scsi_sop_routing_id_alt {
+ uint8_t bus;
+ uint8_t function;
+#define SCSI_TRN_SOP_FUNC_ALT_MAX 0xff
+};
+
+struct scsi_transportid_sop
+{
+ uint8_t format_protocol;
+#define SCSI_TRN_SOP_FORMAT_DEFAULT 0x00
+ uint8_t reserved1;
+ uint8_t routing_id[2];
+ uint8_t reserved2[20];
+};
struct scsi_log_sense
{
@@ -470,15 +630,24 @@ struct scsi_control_page {
#define SCP_QUEUE_ALG_MASK 0xF0
#define SCP_QUEUE_ALG_RESTRICTED 0x00
#define SCP_QUEUE_ALG_UNRESTRICTED 0x10
+#define SCP_NUAR 0x08 /*No UA on release*/
#define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/
#define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/
u_int8_t eca_and_aen;
#define SCP_EECA 0x80 /*Enable Extended CA*/
+#define SCP_RAC 0x40 /*Report a check*/
+#define SCP_SWP 0x08 /*Software Write Protect*/
#define SCP_RAENP 0x04 /*Ready AEN Permission*/
#define SCP_UAAENP 0x02 /*UA AEN Permission*/
#define SCP_EAENP 0x01 /*Error AEN Permission*/
- u_int8_t reserved;
+ u_int8_t flags4;
+#define SCP_ATO 0x80 /*Application tag owner*/
+#define SCP_TAS 0x40 /*Task aborted status*/
+#define SCP_ATMPE 0x20 /*Application tag mode page*/
+#define SCP_RWWP 0x10 /*Reject write without prot*/
u_int8_t aen_holdoff_period[2];
+ u_int8_t busy_timeout_period[2];
+ u_int8_t extended_selftest_completion_time[2];
};
struct scsi_cache_page {
@@ -597,21 +766,41 @@ struct scsi_info_exceptions_page {
u_int8_t report_count[4];
};
+/*
+ * SCSI protocol identifier values, current as of SPC4r36l.
+ */
+#define SCSI_PROTO_FC 0x00 /* Fibre Channel */
+#define SCSI_PROTO_SPI 0x01 /* Parallel SCSI */
+#define SCSI_PROTO_SSA 0x02 /* Serial Storage Arch. */
+#define SCSI_PROTO_1394 0x03 /* IEEE 1394 (Firewire) */
+#define SCSI_PROTO_RDMA 0x04 /* SCSI RDMA Protocol */
+#define SCSI_PROTO_ISCSI 0x05 /* Internet SCSI */
+#define SCSI_PROTO_iSCSI 0x05 /* Internet SCSI */
+#define SCSI_PROTO_SAS 0x06 /* SAS Serial SCSI Protocol */
+#define SCSI_PROTO_ADT 0x07 /* Automation/Drive Int. Trans. Prot.*/
+#define SCSI_PROTO_ADITP 0x07 /* Automation/Drive Int. Trans. Prot.*/
+#define SCSI_PROTO_ATA 0x08 /* AT Attachment Interface */
+#define SCSI_PROTO_UAS 0x09 /* USB Atached SCSI */
+#define SCSI_PROTO_SOP 0x0a /* SCSI over PCI Express */
+#define SCSI_PROTO_NONE 0x0f /* No specific protocol */
+
struct scsi_proto_specific_page {
u_int8_t page_code;
#define SPSP_PAGE_SAVABLE 0x80 /* Page is savable */
u_int8_t page_length;
u_int8_t protocol;
-#define SPSP_PROTO_FC 0x00
-#define SPSP_PROTO_SPI 0x01
-#define SPSP_PROTO_SSA 0x02
-#define SPSP_PROTO_1394 0x03
-#define SPSP_PROTO_RDMA 0x04
-#define SPSP_PROTO_ISCSI 0x05
-#define SPSP_PROTO_SAS 0x06
-#define SPSP_PROTO_ADT 0x07
-#define SPSP_PROTO_ATA 0x08
-#define SPSP_PROTO_NONE 0x0f
+#define SPSP_PROTO_FC SCSI_PROTO_FC
+#define SPSP_PROTO_SPI SCSI_PROTO_SPI
+#define SPSP_PROTO_SSA SCSI_PROTO_SSA
+#define SPSP_PROTO_1394 SCSI_PROTO_1394
+#define SPSP_PROTO_RDMA SCSI_PROTO_RDMA
+#define SPSP_PROTO_ISCSI SCSI_PROTO_ISCSI
+#define SPSP_PROTO_SAS SCSI_PROTO_SAS
+#define SPSP_PROTO_ADT SCSI_PROTO_ADITP
+#define SPSP_PROTO_ATA SCSI_PROTO_ATA
+#define SPSP_PROTO_UAS SCSI_PROTO_UAS
+#define SPSP_PROTO_SOP SCSI_PROTO_SOP
+#define SPSP_PROTO_NONE SCSI_PROTO_NONE
};
struct scsi_reserve
@@ -746,12 +935,16 @@ struct scsi_read_buffer
{
u_int8_t opcode;
u_int8_t byte2;
-#define RWB_MODE 0x07
+#define RWB_MODE 0x1F
#define RWB_MODE_HDR_DATA 0x00
#define RWB_MODE_VENDOR 0x01
#define RWB_MODE_DATA 0x02
+#define RWB_MODE_DESCR 0x03
#define RWB_MODE_DOWNLOAD 0x04
#define RWB_MODE_DOWNLOAD_SAVE 0x05
+#define RWB_MODE_ECHO 0x0A
+#define RWB_MODE_ECHO_DESCR 0x0B
+#define RWB_MODE_ERROR_HISTORY 0x1C
u_int8_t buffer_id;
u_int8_t offset[3];
u_int8_t length[3];
@@ -968,6 +1161,378 @@ struct scsi_maintenance_in
uint8_t control;
};
+struct scsi_report_supported_opcodes
+{
+ uint8_t opcode;
+ uint8_t service_action;
+ uint8_t options;
+#define RSO_RCTD 0x80
+#define RSO_OPTIONS_MASK 0x07
+#define RSO_OPTIONS_ALL 0x00
+#define RSO_OPTIONS_OC 0x01
+#define RSO_OPTIONS_OC_SA 0x02
+ uint8_t requested_opcode;
+ uint8_t requested_service_action[2];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_report_supported_opcodes_timeout
+{
+ uint8_t length[2];
+ uint8_t reserved;
+ uint8_t cmd_specific;
+ uint8_t nominal_time[4];
+ uint8_t recommended_time[4];
+};
+
+struct scsi_report_supported_opcodes_descr
+{
+ uint8_t opcode;
+ uint8_t reserved;
+ uint8_t service_action[2];
+ uint8_t reserved2;
+ uint8_t flags;
+#define RSO_SERVACTV 0x01
+#define RSO_CTDP 0x02
+ uint8_t cdb_length[2];
+ struct scsi_report_supported_opcodes_timeout timeout[0];
+};
+
+struct scsi_report_supported_opcodes_all
+{
+ uint8_t length[4];
+ struct scsi_report_supported_opcodes_descr descr[0];
+};
+
+struct scsi_report_supported_opcodes_one
+{
+ uint8_t reserved;
+ uint8_t support;
+#define RSO_ONE_CTDP 0x80
+ uint8_t cdb_length[2];
+ uint8_t cdb_usage[];
+};
+
+struct scsi_report_supported_tmf
+{
+ uint8_t opcode;
+ uint8_t service_action;
+ uint8_t reserved[4];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_report_supported_tmf_data
+{
+ uint8_t byte1;
+#define RST_WAKES 0x01
+#define RST_TRS 0x02
+#define RST_QTS 0x04
+#define RST_LURS 0x08
+#define RST_CTSS 0x10
+#define RST_CACAS 0x20
+#define RST_ATSS 0x40
+#define RST_ATS 0x80
+ uint8_t byte2;
+#define RST_ITNRS 0x01
+#define RST_QTSS 0x02
+#define RST_QAES 0x04
+ uint8_t reserved[2];
+};
+
+struct scsi_report_timestamp
+{
+ uint8_t opcode;
+ uint8_t service_action;
+ uint8_t reserved[4];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_report_timestamp_data
+{
+ uint8_t length[2];
+ uint8_t origin;
+#define RTS_ORIG_MASK 0x00
+#define RTS_ORIG_ZERO 0x00
+#define RTS_ORIG_SET 0x02
+#define RTS_ORIG_OUTSIDE 0x03
+ uint8_t reserved;
+ uint8_t timestamp[6];
+ uint8_t reserve2[2];
+};
+
+struct scsi_receive_copy_status_lid1
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define RCS_RCS_LID1 0x00
+ uint8_t list_identifier;
+ uint8_t reserved[7];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_receive_copy_status_lid1_data
+{
+ uint8_t available_data[4];
+ uint8_t copy_command_status;
+#define RCS_CCS_INPROG 0x00
+#define RCS_CCS_COMPLETED 0x01
+#define RCS_CCS_ERROR 0x02
+ uint8_t segments_processed[2];
+ uint8_t transfer_count_units;
+#define RCS_TC_BYTES 0x00
+#define RCS_TC_KBYTES 0x01
+#define RCS_TC_MBYTES 0x02
+#define RCS_TC_GBYTES 0x03
+#define RCS_TC_TBYTES 0x04
+#define RCS_TC_PBYTES 0x05
+#define RCS_TC_EBYTES 0x06
+#define RCS_TC_LBAS 0xf1
+ uint8_t transfer_count[4];
+};
+
+struct scsi_receive_copy_failure_details
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define RCS_RCFD 0x04
+ uint8_t list_identifier;
+ uint8_t reserved[7];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_receive_copy_failure_details_data
+{
+ uint8_t available_data[4];
+ uint8_t reserved[52];
+ uint8_t copy_command_status;
+ uint8_t reserved2;
+ uint8_t sense_data_length[2];
+ uint8_t sense_data[];
+};
+
+struct scsi_receive_copy_status_lid4
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define RCS_RCS_LID4 0x05
+ uint8_t list_identifier[4];
+ uint8_t reserved[4];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_receive_copy_status_lid4_data
+{
+ uint8_t available_data[4];
+ uint8_t response_to_service_action;
+ uint8_t copy_command_status;
+#define RCS_CCS_COMPLETED_PROD 0x03
+#define RCS_CCS_COMPLETED_RESID 0x04
+#define RCS_CCS_INPROG_FGBG 0x10
+#define RCS_CCS_INPROG_FG 0x11
+#define RCS_CCS_INPROG_BG 0x12
+#define RCS_CCS_ABORTED 0x60
+ uint8_t operation_counter[2];
+ uint8_t estimated_status_update_delay[4];
+ uint8_t extended_copy_completion_status;
+ uint8_t length_of_the_sense_data_field;
+ uint8_t sense_data_length;
+ uint8_t transfer_count_units;
+ uint8_t transfer_count[8];
+ uint8_t segments_processed[2];
+ uint8_t reserved[2];
+ uint8_t sense_data[];
+};
+
+struct scsi_receive_copy_operating_parameters
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define RCS_RCOP 0x03
+ uint8_t reserved[8];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_receive_copy_operating_parameters_data
+{
+ uint8_t length[4];
+ uint8_t snlid;
+#define RCOP_SNLID 0x01
+ uint8_t reserved[3];
+ uint8_t maximum_cscd_descriptor_count[2];
+ uint8_t maximum_segment_descriptor_count[2];
+ uint8_t maximum_descriptor_list_length[4];
+ uint8_t maximum_segment_length[4];
+ uint8_t maximum_inline_data_length[4];
+ uint8_t held_data_limit[4];
+ uint8_t maximum_stream_device_transfer_size[4];
+ uint8_t reserved2[2];
+ uint8_t total_concurrent_copies[2];
+ uint8_t maximum_concurrent_copies;
+ uint8_t data_segment_granularity;
+ uint8_t inline_data_granularity;
+ uint8_t held_data_granularity;
+ uint8_t reserved3[3];
+ uint8_t implemented_descriptor_list_length;
+ uint8_t list_of_implemented_descriptor_type_codes[0];
+};
+
+struct scsi_extended_copy
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define EC_EC_LID1 0x00
+#define EC_EC_LID4 0x01
+ uint8_t reserved[8];
+ uint8_t length[4];
+ uint8_t reserved1;
+ uint8_t control;
+};
+
+struct scsi_ec_cscd_dtsp
+{
+ uint8_t flags;
+#define EC_CSCD_FIXED 0x01
+#define EC_CSCD_PAD 0x04
+ uint8_t block_length[3];
+};
+
+struct scsi_ec_cscd
+{
+ uint8_t type_code;
+#define EC_CSCD_EXT 0xff
+ uint8_t luidt_pdt;
+#define EC_LUIDT_MASK 0xc0
+#define EC_LUIDT_LUN 0x00
+#define EC_LUIDT_PROXY_TOKEN 0x40
+ uint8_t relative_initiator_port[2];
+ uint8_t cscd_params[24];
+ struct scsi_ec_cscd_dtsp dtsp;
+};
+
+struct scsi_ec_cscd_id
+{
+ uint8_t type_code;
+#define EC_CSCD_ID 0xe4
+ uint8_t luidt_pdt;
+ uint8_t relative_initiator_port[2];
+ uint8_t codeset;
+ uint8_t id_type;
+ uint8_t reserved;
+ uint8_t length;
+ uint8_t designator[20];
+ struct scsi_ec_cscd_dtsp dtsp;
+};
+
+struct scsi_ec_segment
+{
+ uint8_t type_code;
+ uint8_t flags;
+#define EC_SEG_DC 0x02
+#define EC_SEG_CAT 0x01
+ uint8_t descr_length[2];
+ uint8_t params[];
+};
+
+struct scsi_ec_segment_b2b
+{
+ uint8_t type_code;
+#define EC_SEG_B2B 0x02
+ uint8_t flags;
+ uint8_t descr_length[2];
+ uint8_t src_cscd[2];
+ uint8_t dst_cscd[2];
+ uint8_t reserved[2];
+ uint8_t number_of_blocks[2];
+ uint8_t src_lba[8];
+ uint8_t dst_lba[8];
+};
+
+struct scsi_ec_segment_verify
+{
+ uint8_t type_code;
+#define EC_SEG_VERIFY 0x07
+ uint8_t reserved;
+ uint8_t descr_length[2];
+ uint8_t src_cscd[2];
+ uint8_t reserved2[2];
+ uint8_t tur;
+ uint8_t reserved3[3];
+};
+
+struct scsi_ec_segment_register_key
+{
+ uint8_t type_code;
+#define EC_SEG_REGISTER_KEY 0x14
+ uint8_t reserved;
+ uint8_t descr_length[2];
+ uint8_t reserved2[2];
+ uint8_t dst_cscd[2];
+ uint8_t res_key[8];
+ uint8_t sa_res_key[8];
+ uint8_t reserved3[4];
+};
+
+struct scsi_extended_copy_lid1_data
+{
+ uint8_t list_identifier;
+ uint8_t flags;
+#define EC_PRIORITY 0x07
+#define EC_LIST_ID_USAGE_MASK 0x18
+#define EC_LIST_ID_USAGE_FULL 0x08
+#define EC_LIST_ID_USAGE_NOHOLD 0x10
+#define EC_LIST_ID_USAGE_NONE 0x18
+#define EC_STR 0x20
+ uint8_t cscd_list_length[2];
+ uint8_t reserved[4];
+ uint8_t segment_list_length[4];
+ uint8_t inline_data_length[4];
+ uint8_t data[];
+};
+
+struct scsi_extended_copy_lid4_data
+{
+ uint8_t list_format;
+#define EC_LIST_FORMAT 0x01
+ uint8_t flags;
+ uint8_t header_cscd_list_length[2];
+ uint8_t reserved[11];
+ uint8_t flags2;
+#define EC_IMMED 0x01
+#define EC_G_SENSE 0x02
+ uint8_t header_cscd_type_code;
+ uint8_t reserved2[3];
+ uint8_t list_identifier[4];
+ uint8_t reserved3[18];
+ uint8_t cscd_list_length[2];
+ uint8_t segment_list_length[2];
+ uint8_t inline_data_length[2];
+ uint8_t data[];
+};
+
+struct scsi_copy_operation_abort
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define EC_COA 0x1c
+ uint8_t list_identifier[4];
+ uint8_t reserved[9];
+ uint8_t control;
+};
+
struct ata_pass_16 {
u_int8_t opcode;
u_int8_t protocol;
@@ -1039,10 +1604,14 @@ struct ata_pass_16 {
#define MODE_SENSE_10 0x5A
#define PERSISTENT_RES_IN 0x5E
#define PERSISTENT_RES_OUT 0x5F
+#define EXTENDED_COPY 0x83
+#define RECEIVE_COPY_STATUS 0x84
#define ATA_PASS_16 0x85
#define READ_16 0x88
+#define COMPARE_AND_WRITE 0x89
#define WRITE_16 0x8A
#define WRITE_VERIFY_16 0x8E
+#define VERIFY_16 0x8F
#define SYNCHRONIZE_CACHE_16 0x91
#define WRITE_SAME_16 0x93
#define SERVICE_ACTION_IN 0x9E
@@ -1054,6 +1623,7 @@ struct ata_pass_16 {
#define READ_12 0xA8
#define WRITE_12 0xAA
#define WRITE_VERIFY_12 0xAE
+#define VERIFY_12 0xAF
#define READ_ELEMENT_STATUS 0xB8
#define READ_CD 0xBE
@@ -1149,7 +1719,9 @@ struct scsi_inquiry_data
#define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
u_int8_t dev_qual2;
#define SID_QUAL2 0x7F
-#define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
+#define SID_LU_CONG 0x40
+#define SID_RMB 0x80
+#define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & SID_RMB) != 0)
u_int8_t version;
#define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
#define SCSI_REV_0 0
@@ -1297,15 +1869,9 @@ struct scsi_vpd_device_id
struct scsi_vpd_id_descriptor
{
u_int8_t proto_codeset;
-#define SCSI_PROTO_FC 0x00
-#define SCSI_PROTO_SPI 0x01
-#define SCSI_PROTO_SSA 0x02
-#define SCSI_PROTO_1394 0x03
-#define SCSI_PROTO_RDMA 0x04
-#define SCSI_PROTO_ISCSI 0x05
-#define SCSI_PROTO_SAS 0x06
-#define SCSI_PROTO_ADT 0x07
-#define SCSI_PROTO_ATA 0x08
+ /*
+ * See the SCSI_PROTO definitions above for the protocols.
+ */
#define SVPD_ID_PROTO_SHIFT 4
#define SVPD_ID_CODESET_BINARY 0x01
#define SVPD_ID_CODESET_ASCII 0x02
@@ -1447,12 +2013,114 @@ struct scsi_diag_page {
uint8_t params[0];
};
+struct scsi_vpd_port_designation
+{
+ uint8_t reserved[2];
+ uint8_t relative_port_id[2];
+ uint8_t reserved2[2];
+ uint8_t initiator_transportid_length[2];
+ uint8_t initiator_transportid[0];
+};
+
+struct scsi_vpd_port_designation_cont
+{
+ uint8_t reserved[2];
+ uint8_t target_port_descriptors_length[2];
+ struct scsi_vpd_id_descriptor target_port_descriptors[0];
+};
+
+struct scsi_vpd_scsi_ports
+{
+ u_int8_t device;
+ u_int8_t page_code;
+#define SVPD_SCSI_PORTS 0x88
+ u_int8_t page_length[2];
+ struct scsi_vpd_port_designation design[];
+};
+
/*
* ATA Information VPD Page based on
* T10/2126-D Revision 04
*/
#define SVPD_ATA_INFORMATION 0x89
+
+struct scsi_vpd_tpc_descriptor
+{
+ uint8_t desc_type[2];
+ uint8_t desc_length[2];
+ uint8_t parameters[];
+};
+
+struct scsi_vpd_tpc_descriptor_sc_descr
+{
+ uint8_t opcode;
+ uint8_t sa_length;
+ uint8_t supported_service_actions[0];
+};
+
+struct scsi_vpd_tpc_descriptor_sc
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_SC 0x0001
+ uint8_t desc_length[2];
+ uint8_t list_length;
+ struct scsi_vpd_tpc_descriptor_sc_descr descr[];
+};
+
+struct scsi_vpd_tpc_descriptor_pd
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_PD 0x0004
+ uint8_t desc_length[2];
+ uint8_t reserved[4];
+ uint8_t maximum_cscd_descriptor_count[2];
+ uint8_t maximum_segment_descriptor_count[2];
+ uint8_t maximum_descriptor_list_length[4];
+ uint8_t maximum_inline_data_length[4];
+ uint8_t reserved2[12];
+};
+
+struct scsi_vpd_tpc_descriptor_sd
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_SD 0x0008
+ uint8_t desc_length[2];
+ uint8_t list_length;
+ uint8_t supported_descriptor_codes[];
+};
+
+struct scsi_vpd_tpc_descriptor_sdid
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_SDID 0x000C
+ uint8_t desc_length[2];
+ uint8_t list_length[2];
+ uint8_t supported_descriptor_ids[];
+};
+
+struct scsi_vpd_tpc_descriptor_gco
+{
+ uint8_t desc_type[2];
+#define SVPD_TPC_GCO 0x8001
+ uint8_t desc_length[2];
+ uint8_t total_concurrent_copies[4];
+ uint8_t maximum_identified_concurrent_copies[4];
+ uint8_t maximum_segment_length[4];
+ uint8_t data_segment_granularity;
+ uint8_t inline_data_granularity;
+ uint8_t reserved[18];
+};
+
+struct scsi_vpd_tpc
+{
+ uint8_t device;
+ uint8_t page_code;
+#define SVPD_SCSI_TPC 0x8F
+ uint8_t page_length[2];
+ struct scsi_vpd_tpc_descriptor descr[];
+};
+
/*
* Block Device Characteristics VPD Page based on
* T10/1799-D Revision 31
@@ -1637,8 +2305,9 @@ struct scsi_target_group
{
uint8_t opcode;
uint8_t service_action;
+#define STG_PDF_MASK 0xe0
#define STG_PDF_LENGTH 0x00
-#define RPL_PDF_EXTENDED 0x20
+#define STG_PDF_EXTENDED 0x20
uint8_t reserved1[4];
uint8_t length[4];
uint8_t reserved2;
@@ -1688,7 +2357,7 @@ struct scsi_target_group_data {
struct scsi_target_group_data_extended {
uint8_t length[4]; /* length of returned data, in bytes */
- uint8_t format_type; /* STG_PDF_LENGTH or RPL_PDF_EXTENDED */
+ uint8_t format_type; /* STG_PDF_LENGTH or STG_PDF_EXTENDED */
uint8_t implicit_transition_time;
uint8_t reserved[2];
struct scsi_target_port_group_descriptor groups[];
@@ -2202,6 +2871,22 @@ typedef enum {
SSS_FLAG_PRINT_COMMAND = 0x01
} scsi_sense_string_flags;
+struct scsi_nv {
+ const char *name;
+ uint64_t value;
+};
+
+typedef enum {
+ SCSI_NV_FOUND,
+ SCSI_NV_AMBIGUOUS,
+ SCSI_NV_NOT_FOUND
+} scsi_nv_status;
+
+typedef enum {
+ SCSI_NV_FLAG_NONE = 0x00,
+ SCSI_NV_FLAG_IG_CASE = 0x01 /* Case insensitive comparison */
+} scsi_nv_flags;
+
struct ccb_scsiio;
struct cam_periph;
union ccb;
@@ -2343,6 +3028,64 @@ struct scsi_vpd_id_descriptor *
scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t len,
scsi_devid_checkfn_t ck_fn);
+int scsi_transportid_sbuf(struct sbuf *sb,
+ struct scsi_transportid_header *hdr,
+ uint32_t valid_len);
+
+const char * scsi_nv_to_str(struct scsi_nv *table, int num_table_entries,
+ uint64_t value);
+
+scsi_nv_status scsi_get_nv(struct scsi_nv *table, int num_table_entries,
+ char *name, int *table_entry, scsi_nv_flags flags);
+
+int scsi_parse_transportid_64bit(int proto_id, char *id_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len);
+
+int scsi_parse_transportid_spi(char *id_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len);
+
+int scsi_parse_transportid_rdma(char *id_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len);
+
+int scsi_parse_transportid_iscsi(char *id_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str,int error_str_len);
+
+int scsi_parse_transportid_sop(char *id_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str,int error_str_len);
+
+int scsi_parse_transportid(char *transportid_str,
+ struct scsi_transportid_header **hdr,
+ unsigned int *alloc_len,
+#ifdef _KERNEL
+ struct malloc_type *type, int flags,
+#endif
+ char *error_str, int error_str_len);
+
void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *,
union ccb *),
@@ -2538,6 +3281,20 @@ void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
u_int8_t tag_action, int start, int load_eject,
int immediate, u_int8_t sense_len, u_int32_t timeout);
+void scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *,union ccb *),
+ uint8_t tag_action, int service_action,
+ uint8_t *data_ptr, uint32_t dxfer_len,
+ int sense_len, int timeout);
+
+void scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ uint8_t tag_action, int service_action,
+ int scope, int res_type, uint8_t *data_ptr,
+ uint32_t dxfer_len, int sense_len,
+ int timeout);
+
int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry);
int scsi_static_inquiry_match(caddr_t inqbuffer,
caddr_t table_entry);
diff --git a/sys/cam/scsi/scsi_cd.c b/sys/cam/scsi/scsi_cd.c
index 81c29a6fb54cd..47a5a438d2942 100644
--- a/sys/cam/scsi/scsi_cd.c
+++ b/sys/cam/scsi/scsi_cd.c
@@ -277,15 +277,12 @@ static int cd_retry_count = CD_DEFAULT_RETRY;
static int cd_timeout = CD_DEFAULT_TIMEOUT;
static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver");
-SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RWTUN,
&cd_poll_period, 0, "Media polling period in seconds");
-TUNABLE_INT("kern.cam.cd.poll_period", &cd_poll_period);
-SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&cd_retry_count, 0, "Normal I/O retry count");
-TUNABLE_INT("kern.cam.cd.retry_count", &cd_retry_count);
-SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RWTUN,
&cd_timeout, 0, "Timeout, in us, for read operations");
-TUNABLE_INT("kern.cam.cd.timeout", &cd_timeout);
static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers");
diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c
index de2035323b349..0b1f8d595e366 100644
--- a/sys/cam/scsi/scsi_da.c
+++ b/sys/cam/scsi/scsi_da.c
@@ -1188,18 +1188,14 @@ static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
-SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
&da_poll_period, 0, "Media polling period in seconds");
-TUNABLE_INT("kern.cam.da.poll_period", &da_poll_period);
-SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&da_retry_count, 0, "Normal I/O retry count");
-TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
-SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
&da_default_timeout, 0, "Normal I/O timeout (in seconds)");
-TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
-SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
&da_send_ordered, 0, "Send Ordered Tags");
-TUNABLE_INT("kern.cam.da.send_ordered", &da_send_ordered);
/*
* DA_ORDEREDTAG_INTERVAL determines how often, relative
@@ -1383,10 +1379,7 @@ dastrategy(struct bio *bp)
* Place it in the queue of disk activities for this disk
*/
if (bp->bio_cmd == BIO_DELETE) {
- if (DA_SIO)
- bioq_disksort(&softc->delete_queue, bp);
- else
- bioq_insert_tail(&softc->delete_queue, bp);
+ bioq_disksort(&softc->delete_queue, bp);
} else if (DA_SIO) {
bioq_disksort(&softc->bio_queue, bp);
} else {
@@ -2805,16 +2798,9 @@ cmd6workaround(union ccb *ccb)
da_delete_method_desc[old_method],
da_delete_method_desc[softc->delete_method]);
- if (DA_SIO) {
- while ((bp = bioq_takefirst(&softc->delete_run_queue))
- != NULL)
- bioq_disksort(&softc->delete_queue, bp);
- } else {
- while ((bp = bioq_takefirst(&softc->delete_run_queue))
- != NULL)
- bioq_insert_tail(&softc->delete_queue, bp);
- }
- bioq_insert_tail(&softc->delete_queue,
+ while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
+ bioq_disksort(&softc->delete_queue, bp);
+ bioq_disksort(&softc->delete_queue,
(struct bio *)ccb->ccb_h.ccb_bp);
ccb->ccb_h.ccb_bp = NULL;
return (0);
diff --git a/sys/cam/scsi/scsi_da.h b/sys/cam/scsi/scsi_da.h
index 4fbd7256af91e..a27b17325b1ee 100644
--- a/sys/cam/scsi/scsi_da.h
+++ b/sys/cam/scsi/scsi_da.h
@@ -222,18 +222,49 @@ struct scsi_read_format_capacities
uint8_t reserved1[3];
};
-struct scsi_verify
+struct scsi_verify_10
{
- uint8_t opcode; /* VERIFY */
+ uint8_t opcode; /* VERIFY(10) */
uint8_t byte2;
#define SVFY_LUN_MASK 0xE0
#define SVFY_RELADR 0x01
-#define SVFY_BYTECHK 0x02
+#define SVFY_BYTCHK 0x02
#define SVFY_DPO 0x10
uint8_t addr[4]; /* LBA to begin verification at */
- uint8_t reserved0[1];
- uint8_t len[2]; /* number of blocks to verify */
- uint8_t reserved1[3];
+ uint8_t group;
+ uint8_t length[2]; /* number of blocks to verify */
+ uint8_t control;
+};
+
+struct scsi_verify_12
+{
+ uint8_t opcode; /* VERIFY(12) */
+ uint8_t byte2;
+ uint8_t addr[4]; /* LBA to begin verification at */
+ uint8_t length[4]; /* number of blocks to verify */
+ uint8_t group;
+ uint8_t control;
+};
+
+struct scsi_verify_16
+{
+ uint8_t opcode; /* VERIFY(16) */
+ uint8_t byte2;
+ uint8_t addr[8]; /* LBA to begin verification at */
+ uint8_t length[4]; /* number of blocks to verify */
+ uint8_t group;
+ uint8_t control;
+};
+
+struct scsi_compare_and_write
+{
+ uint8_t opcode; /* COMPARE AND WRITE */
+ uint8_t byte2;
+ uint8_t addr[8]; /* LBA to begin verification at */
+ uint8_t reserved[3];
+ uint8_t length; /* number of blocks */
+ uint8_t group;
+ uint8_t control;
};
struct scsi_write_and_verify
diff --git a/sys/cam/scsi/scsi_enc_safte.c b/sys/cam/scsi/scsi_enc_safte.c
index 8282d013bb584..89f70a5ada232 100644
--- a/sys/cam/scsi/scsi_enc_safte.c
+++ b/sys/cam/scsi/scsi_enc_safte.c
@@ -226,9 +226,8 @@ static char *safte_2little = "Too Little Data Returned (%d) at line %d\n";
int emulate_array_devices = 1;
SYSCTL_DECL(_kern_cam_enc);
-SYSCTL_INT(_kern_cam_enc, OID_AUTO, emulate_array_devices, CTLFLAG_RW,
+SYSCTL_INT(_kern_cam_enc, OID_AUTO, emulate_array_devices, CTLFLAG_RWTUN,
&emulate_array_devices, 0, "Emulate Array Devices for SAF-TE");
-TUNABLE_INT("kern.cam.enc.emulate_array_devices", &emulate_array_devices);
static int
safte_fill_read_buf_io(enc_softc_t *enc, struct enc_fsm_state *state,
diff --git a/sys/cam/scsi/scsi_sa.c b/sys/cam/scsi/scsi_sa.c
index c1cd0f0bfdf60..16b7b149036f3 100644
--- a/sys/cam/scsi/scsi_sa.c
+++ b/sys/cam/scsi/scsi_sa.c
@@ -445,9 +445,10 @@ static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT;
* is bad behavior, because it hides the true tape block size from the
* application.
*/
-TUNABLE_INT("kern.cam.sa.allow_io_split", &sa_allow_io_split);
static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0,
"CAM Sequential Access Tape Driver");
+SYSCTL_INT(_kern_cam_sa, OID_AUTO, allow_io_split, CTLFLAG_RDTUN,
+ &sa_allow_io_split, 0, "Default I/O split value");
static struct periph_driver sadriver =
{
@@ -1494,7 +1495,7 @@ sasysctlinit(void *context, int pending)
goto bailout;
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
- OID_AUTO, "allow_io_split", CTLTYPE_INT | CTLFLAG_RDTUN,
+ OID_AUTO, "allow_io_split", CTLTYPE_INT | CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&softc->allow_io_split, 0, "Allow Splitting I/O");
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "maxio", CTLTYPE_INT | CTLFLAG_RD,
diff --git a/sys/cam/scsi/scsi_sg.c b/sys/cam/scsi/scsi_sg.c
index f1cb75bf283f2..3e80ac3e763f6 100644
--- a/sys/cam/scsi/scsi_sg.c
+++ b/sys/cam/scsi/scsi_sg.c
@@ -494,7 +494,7 @@ sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
struct ccb_scsiio *csio;
struct cam_periph *periph;
struct sg_softc *softc;
- struct sg_io_hdr req;
+ struct sg_io_hdr *req;
int dir, error;
periph = (struct cam_periph *)dev->si_drv1;
@@ -507,40 +507,22 @@ sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
error = 0;
switch (cmd) {
- case LINUX_SCSI_GET_BUS_NUMBER: {
- int busno;
-
- busno = xpt_path_path_id(periph->path);
- error = copyout(&busno, arg, sizeof(busno));
- break;
- }
- case LINUX_SCSI_GET_IDLUN: {
- struct scsi_idlun idlun;
- struct cam_sim *sim;
+ case SG_GET_VERSION_NUM:
+ {
+ int *version = (int *)arg;
- idlun.dev_id = xpt_path_target_id(periph->path);
- sim = xpt_path_sim(periph->path);
- idlun.host_unique_id = sim->unit_number;
- error = copyout(&idlun, arg, sizeof(idlun));
+ *version = sg_version;
break;
}
- case SG_GET_VERSION_NUM:
- case LINUX_SG_GET_VERSION_NUM:
- error = copyout(&sg_version, arg, sizeof(sg_version));
- break;
case SG_SET_TIMEOUT:
- case LINUX_SG_SET_TIMEOUT: {
- u_int user_timeout;
+ {
+ u_int user_timeout = *(u_int *)arg;
- error = copyin(arg, &user_timeout, sizeof(u_int));
- if (error == 0) {
- softc->sg_user_timeout = user_timeout;
- softc->sg_timeout = user_timeout / SG_DEFAULT_HZ * hz;
- }
+ softc->sg_user_timeout = user_timeout;
+ softc->sg_timeout = user_timeout / SG_DEFAULT_HZ * hz;
break;
}
case SG_GET_TIMEOUT:
- case LINUX_SG_GET_TIMEOUT:
/*
* The value is returned directly to the syscall.
*/
@@ -548,17 +530,14 @@ sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
error = 0;
break;
case SG_IO:
- case LINUX_SG_IO:
- error = copyin(arg, &req, sizeof(req));
- if (error)
- break;
+ req = (struct sg_io_hdr *)arg;
- if (req.cmd_len > IOCDBLEN) {
+ if (req->cmd_len > IOCDBLEN) {
error = EINVAL;
break;
}
- if (req.iovec_count != 0) {
+ if (req->iovec_count != 0) {
error = EOPNOTSUPP;
break;
}
@@ -566,14 +545,14 @@ sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
csio = &ccb->csio;
- error = copyin(req.cmdp, &csio->cdb_io.cdb_bytes,
- req.cmd_len);
+ error = copyin(req->cmdp, &csio->cdb_io.cdb_bytes,
+ req->cmd_len);
if (error) {
xpt_release_ccb(ccb);
break;
}
- switch(req.dxfer_direction) {
+ switch(req->dxfer_direction) {
case SG_DXFER_TO_DEV:
dir = CAM_DIR_OUT;
break;
@@ -594,62 +573,64 @@ sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
sgdone,
dir|CAM_DEV_QFRZDIS,
MSG_SIMPLE_Q_TAG,
- req.dxferp,
- req.dxfer_len,
- req.mx_sb_len,
- req.cmd_len,
- req.timeout);
+ req->dxferp,
+ req->dxfer_len,
+ req->mx_sb_len,
+ req->cmd_len,
+ req->timeout);
error = sgsendccb(periph, ccb);
if (error) {
- req.host_status = DID_ERROR;
- req.driver_status = DRIVER_INVALID;
+ req->host_status = DID_ERROR;
+ req->driver_status = DRIVER_INVALID;
xpt_release_ccb(ccb);
break;
}
- req.status = csio->scsi_status;
- req.masked_status = (csio->scsi_status >> 1) & 0x7f;
- sg_scsiio_status(csio, &req.host_status, &req.driver_status);
- req.resid = csio->resid;
- req.duration = csio->ccb_h.timeout;
- req.info = 0;
+ req->status = csio->scsi_status;
+ req->masked_status = (csio->scsi_status >> 1) & 0x7f;
+ sg_scsiio_status(csio, &req->host_status, &req->driver_status);
+ req->resid = csio->resid;
+ req->duration = csio->ccb_h.timeout;
+ req->info = 0;
- error = copyout(&req, arg, sizeof(req));
- if ((error == 0) && (csio->ccb_h.status & CAM_AUTOSNS_VALID)
- && (req.sbp != NULL)) {
- req.sb_len_wr = req.mx_sb_len - csio->sense_resid;
- error = copyout(&csio->sense_data, req.sbp,
- req.sb_len_wr);
+ if ((csio->ccb_h.status & CAM_AUTOSNS_VALID)
+ && (req->sbp != NULL)) {
+ req->sb_len_wr = req->mx_sb_len - csio->sense_resid;
+ error = copyout(&csio->sense_data, req->sbp,
+ req->sb_len_wr);
}
xpt_release_ccb(ccb);
break;
case SG_GET_RESERVED_SIZE:
- case LINUX_SG_GET_RESERVED_SIZE: {
- int size = 32768;
-
- error = copyout(&size, arg, sizeof(size));
+ {
+ int *size = (int *)arg;
+ *size = DFLTPHYS;
break;
}
case SG_GET_SCSI_ID:
- case LINUX_SG_GET_SCSI_ID:
{
- struct sg_scsi_id id;
+ struct sg_scsi_id *id = (struct sg_scsi_id *)arg;
- id.host_no = cam_sim_path(xpt_path_sim(periph->path));
- id.channel = xpt_path_path_id(periph->path);
- id.scsi_id = xpt_path_target_id(periph->path);
- id.lun = xpt_path_lun_id(periph->path);
- id.scsi_type = softc->pd_type;
- id.h_cmd_per_lun = 1;
- id.d_queue_depth = 1;
- id.unused[0] = 0;
- id.unused[1] = 0;
+ id->host_no = cam_sim_path(xpt_path_sim(periph->path));
+ id->channel = xpt_path_path_id(periph->path);
+ id->scsi_id = xpt_path_target_id(periph->path);
+ id->lun = xpt_path_lun_id(periph->path);
+ id->scsi_type = softc->pd_type;
+ id->h_cmd_per_lun = 1;
+ id->d_queue_depth = 1;
+ id->unused[0] = 0;
+ id->unused[1] = 0;
+ break;
+ }
- error = copyout(&id, arg, sizeof(id));
+ case SG_GET_SG_TABLESIZE:
+ {
+ int *size = (int *)arg;
+ *size = 0;
break;
}
@@ -664,7 +645,6 @@ sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
case SG_GET_ACCESS_COUNT:
case SG_SET_FORCE_LOW_DMA:
case SG_GET_LOW_DMA:
- case SG_GET_SG_TABLESIZE:
case SG_SET_FORCE_PACK_ID:
case SG_GET_PACK_ID:
case SG_SET_RESERVED_SIZE:
@@ -672,25 +652,6 @@ sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
case SG_SET_COMMAND_Q:
case SG_SET_DEBUG:
case SG_NEXT_CMD_LEN:
- case LINUX_SG_EMULATED_HOST:
- case LINUX_SG_SET_TRANSFORM:
- case LINUX_SG_GET_TRANSFORM:
- case LINUX_SG_GET_NUM_WAITING:
- case LINUX_SG_SCSI_RESET:
- case LINUX_SG_GET_REQUEST_TABLE:
- case LINUX_SG_SET_KEEP_ORPHAN:
- case LINUX_SG_GET_KEEP_ORPHAN:
- case LINUX_SG_GET_ACCESS_COUNT:
- case LINUX_SG_SET_FORCE_LOW_DMA:
- case LINUX_SG_GET_LOW_DMA:
- case LINUX_SG_GET_SG_TABLESIZE:
- case LINUX_SG_SET_FORCE_PACK_ID:
- case LINUX_SG_GET_PACK_ID:
- case LINUX_SG_SET_RESERVED_SIZE:
- case LINUX_SG_GET_COMMAND_Q:
- case LINUX_SG_SET_COMMAND_Q:
- case LINUX_SG_SET_DEBUG:
- case LINUX_SG_NEXT_CMD_LEN:
default:
#ifdef CAMDEBUG
printf("sgioctl: rejecting cmd 0x%lx\n", cmd);
@@ -729,6 +690,12 @@ sgwrite(struct cdev *dev, struct uio *uio, int ioflag)
if (error)
goto out_hdr;
+ /* XXX: We don't support SG 3.x read/write API. */
+ if (hdr->reply_len < 0) {
+ error = ENODEV;
+ goto out_hdr;
+ }
+
ccb = xpt_alloc_ccb();
if (ccb == NULL) {
error = ENOMEM;
diff --git a/sys/cam/scsi/scsi_sg.h b/sys/cam/scsi/scsi_sg.h
index 60dbbfe7c64fe..210eec5f4f460 100644
--- a/sys/cam/scsi/scsi_sg.h
+++ b/sys/cam/scsi/scsi_sg.h
@@ -8,31 +8,31 @@
#define _SCSI_SG_H
#define SGIOC '"'
-#define SG_SET_TIMEOUT _IO(SGIOC, 0x01)
+#define SG_SET_TIMEOUT _IOW(SGIOC, 0x01, u_int)
#define SG_GET_TIMEOUT _IO(SGIOC, 0x02)
-#define SG_EMULATED_HOST _IO(SGIOC, 0x03)
+#define SG_EMULATED_HOST _IOR(SGIOC, 0x03, int)
#define SG_SET_TRANSFORM _IO(SGIOC, 0x04)
#define SG_GET_TRANSFORM _IO(SGIOC, 0x05)
-#define SG_GET_COMMAND_Q _IO(SGIOC, 0x70)
-#define SG_SET_COMMAND_Q _IO(SGIOC, 0x71)
-#define SG_GET_RESERVED_SIZE _IO(SGIOC, 0x72)
-#define SG_SET_RESERVED_SIZE _IO(SGIOC, 0x75)
-#define SG_GET_SCSI_ID _IO(SGIOC, 0x76)
-#define SG_SET_FORCE_LOW_DMA _IO(SGIOC, 0x79)
-#define SG_GET_LOW_DMA _IO(SGIOC, 0x7a)
-#define SG_SET_FORCE_PACK_ID _IO(SGIOC, 0x7b)
-#define SG_GET_PACK_ID _IO(SGIOC, 0x7c)
-#define SG_GET_NUM_WAITING _IO(SGIOC, 0x7d)
-#define SG_SET_DEBUG _IO(SGIOC, 0x7e)
-#define SG_GET_SG_TABLESIZE _IO(SGIOC, 0x7f)
-#define SG_GET_VERSION_NUM _IO(SGIOC, 0x82)
-#define SG_NEXT_CMD_LEN _IO(SGIOC, 0x83)
-#define SG_SCSI_RESET _IO(SGIOC, 0x84)
-#define SG_IO _IO(SGIOC, 0x85)
+#define SG_GET_COMMAND_Q _IOW(SGIOC, 0x70, int)
+#define SG_SET_COMMAND_Q _IOR(SGIOC, 0x71, int)
+#define SG_GET_RESERVED_SIZE _IOR(SGIOC, 0x72, int)
+#define SG_SET_RESERVED_SIZE _IOW(SGIOC, 0x75, int)
+#define SG_GET_SCSI_ID _IOR(SGIOC, 0x76, struct sg_scsi_id)
+#define SG_SET_FORCE_LOW_DMA _IOW(SGIOC, 0x79, int)
+#define SG_GET_LOW_DMA _IOR(SGIOC, 0x7a, int)
+#define SG_SET_FORCE_PACK_ID _IOW(SGIOC, 0x7b, int)
+#define SG_GET_PACK_ID _IOR(SGIOC, 0x7c, int)
+#define SG_GET_NUM_WAITING _IOR(SGIOC, 0x7d, int)
+#define SG_SET_DEBUG _IOW(SGIOC, 0x7e, int)
+#define SG_GET_SG_TABLESIZE _IOR(SGIOC, 0x7f, int)
+#define SG_GET_VERSION_NUM _IOR(SGIOC, 0x82, int)
+#define SG_NEXT_CMD_LEN _IOW(SGIOC, 0x83, int)
+#define SG_SCSI_RESET _IOW(SGIOC, 0x84, int)
+#define SG_IO _IOWR(SGIOC, 0x85, struct sg_io_hdr)
#define SG_GET_REQUEST_TABLE _IO(SGIOC, 0x86)
-#define SG_SET_KEEP_ORPHAN _IO(SGIOC, 0x87)
-#define SG_GET_KEEP_ORPHAN _IO(SGIOC, 0x88)
-#define SG_GET_ACCESS_COUNT _IO(SGIOC, 0x89)
+#define SG_SET_KEEP_ORPHAN _IOW(SGIOC, 0x87, int)
+#define SG_GET_KEEP_ORPHAN _IOR(SGIOC, 0x88, int)
+#define SG_GET_ACCESS_COUNT _IOR(SGIOC, 0x89, int)
struct sg_io_hdr {
int interface_id;
@@ -59,6 +59,31 @@ struct sg_io_hdr {
u_int info;
};
+struct sg_io_hdr32 {
+ int interface_id;
+ int dxfer_direction;
+ u_char cmd_len;
+ u_char mx_sb_len;
+ u_short iovec_count;
+ u_int dxfer_len;
+ uint32_t dxferp;
+ uint32_t cmdp;
+ uint32_t sbp;
+ u_int timeout;
+ u_int flags;
+ int pack_id;
+ uint32_t usr_ptr;
+ u_char status;
+ u_char masked_status;
+ u_char msg_status;
+ u_char sb_len_wr;
+ u_short host_status;
+ u_short driver_status;
+ int resid;
+ u_int duration;
+ u_int info;
+};
+
#define SG_DXFER_NONE -1
#define SG_DXFER_TO_DEV -2
#define SG_DXFER_FROM_DEV -3
diff --git a/sys/cam/scsi/scsi_xpt.c b/sys/cam/scsi/scsi_xpt.c
index 942f7121cf5a1..42b8774fd88ad 100644
--- a/sys/cam/scsi/scsi_xpt.c
+++ b/sys/cam/scsi/scsi_xpt.c
@@ -78,9 +78,8 @@ struct scsi_quirk_entry {
#define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
static int cam_srch_hi = 0;
-TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
-SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
+SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT | CTLFLAG_RWTUN, 0, 0,
sysctl_cam_search_luns, "I",
"allow search above LUN 7 for SCSI3 and greater devices");