aboutsummaryrefslogtreecommitdiff
path: root/share/examples/scsi_target
diff options
context:
space:
mode:
Diffstat (limited to 'share/examples/scsi_target')
-rw-r--r--share/examples/scsi_target/Makefile14
-rw-r--r--share/examples/scsi_target/scsi_cmds.c809
-rw-r--r--share/examples/scsi_target/scsi_target.8156
-rw-r--r--share/examples/scsi_target/scsi_target.c990
-rw-r--r--share/examples/scsi_target/scsi_target.h133
5 files changed, 2102 insertions, 0 deletions
diff --git a/share/examples/scsi_target/Makefile b/share/examples/scsi_target/Makefile
new file mode 100644
index 000000000000..42dd2ca58513
--- /dev/null
+++ b/share/examples/scsi_target/Makefile
@@ -0,0 +1,14 @@
+PACKAGE=examples
+FILESDIR=${SHAREDIR}/examples/${PROG}
+PROG= scsi_target
+SRCS= scsi_target.h scsi_target.c scsi_cmds.c
+DPADD= ${LIBCAM} ${LIBSBUF}
+LIBADD+= cam
+LIBADD+= sbuf
+# cast-qual is triggered only in a code path where the volatile keyword doesn't
+# matter
+CFLAGS.scsi_cmds.c= -Wno-cast-qual
+
+MAN= scsi_target.8
+
+.include <bsd.prog.mk>
diff --git a/share/examples/scsi_target/scsi_cmds.c b/share/examples/scsi_target/scsi_cmds.c
new file mode 100644
index 000000000000..122d4dec6287
--- /dev/null
+++ b/share/examples/scsi_target/scsi_cmds.c
@@ -0,0 +1,809 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * SCSI Disk Emulator
+ *
+ * Copyright (c) 2002 Nate Lawson.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <err.h>
+#include <aio.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_targetio.h>
+#include "scsi_target.h"
+
+typedef int targ_start_func(struct ccb_accept_tio *, struct ccb_scsiio *);
+typedef void targ_done_func(struct ccb_accept_tio *, struct ccb_scsiio *,
+ io_ops);
+#ifndef REPORT_LUNS
+#define REPORT_LUNS 0xa0
+#endif
+
+struct targ_cdb_handlers {
+ u_int8_t cmd;
+ targ_start_func *start;
+ targ_done_func *done;
+#define ILLEGAL_CDB 0xFF
+};
+
+static targ_start_func tcmd_inquiry;
+static targ_start_func tcmd_req_sense;
+static targ_start_func tcmd_rd_cap;
+#ifdef READ_16
+static targ_start_func tcmd_rd_cap16;
+#endif
+static targ_start_func tcmd_rdwr;
+static targ_start_func tcmd_rdwr_decode;
+static targ_done_func tcmd_rdwr_done;
+static targ_start_func tcmd_null_ok;
+static targ_start_func tcmd_illegal_req;
+static int start_io(struct ccb_accept_tio *atio,
+ struct ccb_scsiio *ctio, int dir);
+static int init_inquiry(u_int16_t req_flags, u_int16_t sim_flags);
+static struct initiator_state *
+ tcmd_get_istate(u_int init_id);
+static void cdb_debug(u_int8_t *cdb, const char *msg, ...);
+
+static struct targ_cdb_handlers cdb_handlers[] = {
+ { READ_10, tcmd_rdwr, tcmd_rdwr_done },
+ { WRITE_10, tcmd_rdwr, tcmd_rdwr_done },
+ { READ_6, tcmd_rdwr, tcmd_rdwr_done },
+ { WRITE_6, tcmd_rdwr, tcmd_rdwr_done },
+ { INQUIRY, tcmd_inquiry, NULL },
+ { REQUEST_SENSE, tcmd_req_sense, NULL },
+ { READ_CAPACITY, tcmd_rd_cap, NULL },
+ { TEST_UNIT_READY, tcmd_null_ok, NULL },
+ { START_STOP_UNIT, tcmd_null_ok, NULL },
+ { SYNCHRONIZE_CACHE, tcmd_null_ok, NULL },
+ { MODE_SENSE_6, tcmd_illegal_req, NULL },
+ { MODE_SELECT_6, tcmd_illegal_req, NULL },
+ { REPORT_LUNS, tcmd_illegal_req, NULL },
+#ifdef READ_16
+ { READ_16, tcmd_rdwr, tcmd_rdwr_done },
+ { WRITE_16, tcmd_rdwr, tcmd_rdwr_done },
+ { SERVICE_ACTION_IN, tcmd_rd_cap16, NULL },
+#endif
+ { ILLEGAL_CDB, NULL, NULL }
+};
+
+static struct scsi_inquiry_data inq_data;
+static struct initiator_state istates[MAX_INITIATORS];
+
+cam_status
+tcmd_init(u_int16_t req_inq_flags, u_int16_t sim_inq_flags)
+{
+ struct initiator_state *istate;
+ int i, ret;
+
+ /* Initialize our inquiry data */
+ ret = init_inquiry(req_inq_flags, sim_inq_flags);
+ if (ret != 0)
+ return (ret);
+
+ /* We start out life with a UA to indicate power-on/reset. */
+ for (i = 0; i < MAX_INITIATORS; i++) {
+ istate = tcmd_get_istate(i);
+ bzero(istate, sizeof(*istate));
+ istate->pending_ua = UA_POWER_ON;
+ }
+
+ return (0);
+}
+
+/* Caller allocates CTIO, sets its init_id
+return 0 if done, 1 if more processing needed
+on 0, caller sets SEND_STATUS */
+int
+tcmd_handle(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio, io_ops event)
+{
+ static struct targ_cdb_handlers *last_cmd;
+ struct initiator_state *istate;
+ struct atio_descr *a_descr;
+ int ret;
+
+ if (debug) {
+ warnx("tcmd_handle atio %p ctio %p atioflags %#x", atio, ctio,
+ atio->ccb_h.flags);
+ }
+ ret = 0;
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+
+ /* Do a full lookup if one-behind cache failed */
+ if (last_cmd == NULL || last_cmd->cmd != a_descr->cdb[0]) {
+ struct targ_cdb_handlers *h;
+
+ for (h = cdb_handlers; h->cmd != ILLEGAL_CDB; h++) {
+ if (a_descr->cdb[0] == h->cmd)
+ break;
+ }
+ last_cmd = h;
+ }
+
+ /* call completion and exit */
+ if (event != ATIO_WORK) {
+ if (last_cmd->done != NULL)
+ last_cmd->done(atio, ctio, event);
+ else
+ free_ccb((union ccb *)ctio);
+ return (1);
+ }
+
+ if (last_cmd->cmd == ILLEGAL_CDB) {
+ if (event != ATIO_WORK) {
+ warnx("no done func for %#x???", a_descr->cdb[0]);
+ abort();
+ }
+ /* Not found, return illegal request */
+ warnx("cdb %#x not handled", a_descr->cdb[0]);
+ tcmd_illegal_req(atio, ctio);
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ return (0);
+ }
+
+ istate = tcmd_get_istate(ctio->init_id);
+ if (istate == NULL) {
+ tcmd_illegal_req(atio, ctio);
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ return (0);
+ }
+
+ if (istate->pending_ca == 0 && istate->pending_ua != 0 &&
+ a_descr->cdb[0] != INQUIRY) {
+ tcmd_sense(ctio->init_id, ctio, SSD_KEY_UNIT_ATTENTION,
+ 0x29, istate->pending_ua == UA_POWER_ON ? 1 : 2);
+ istate->pending_ca = CA_UNIT_ATTN;
+ if (debug) {
+ cdb_debug(a_descr->cdb, "UA active for %u: ",
+ atio->init_id);
+ }
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ return (0);
+ }
+
+ /* Store current CA and UA for later */
+ istate->orig_ua = istate->pending_ua;
+ istate->orig_ca = istate->pending_ca;
+
+ /*
+ * As per SAM2, any command that occurs
+ * after a CA is reported, clears the CA. We must
+ * also clear the UA condition, if any, that caused
+ * the CA to occur assuming the UA is not for a
+ * persistent condition.
+ */
+ istate->pending_ca = CA_NONE;
+ if (istate->orig_ca == CA_UNIT_ATTN)
+ istate->pending_ua = UA_NONE;
+
+ /* If we have a valid handler, call start or completion function */
+ if (last_cmd->cmd != ILLEGAL_CDB) {
+ ret = last_cmd->start(atio, ctio);
+ /* XXX hack */
+ if (last_cmd->start != tcmd_rdwr) {
+ a_descr->init_req += ctio->dxfer_len;
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ }
+ }
+
+ return (ret);
+}
+
+static struct initiator_state *
+tcmd_get_istate(u_int init_id)
+{
+ if (init_id >= MAX_INITIATORS) {
+ warnx("illegal init_id %d, max %d", init_id, MAX_INITIATORS - 1);
+ return (NULL);
+ } else {
+ return (&istates[init_id]);
+ }
+}
+
+void
+tcmd_sense(u_int init_id, struct ccb_scsiio *ctio, u_int8_t flags,
+ u_int8_t asc, u_int8_t ascq)
+{
+ struct initiator_state *istate;
+ struct scsi_sense_data_fixed *sense;
+
+ /* Set our initiator's istate */
+ istate = tcmd_get_istate(init_id);
+ if (istate == NULL)
+ return;
+ istate->pending_ca |= CA_CMD_SENSE; /* XXX set instead of or? */
+ sense = (struct scsi_sense_data_fixed *)&istate->sense_data;
+ bzero(sense, sizeof(*sense));
+ sense->error_code = SSD_CURRENT_ERROR;
+ sense->flags = flags;
+ sense->add_sense_code = asc;
+ sense->add_sense_code_qual = ascq;
+ sense->extra_len =
+ offsetof(struct scsi_sense_data_fixed, sense_key_spec[2]) -
+ offsetof(struct scsi_sense_data_fixed, extra_len);
+
+ /* Fill out the supplied CTIO */
+ if (ctio != NULL) {
+ bcopy(sense, &ctio->sense_data, sizeof(*sense));
+ ctio->sense_len = sizeof(*sense); /* XXX */
+ ctio->ccb_h.flags &= ~CAM_DIR_MASK;
+ ctio->ccb_h.flags |= CAM_DIR_NONE | CAM_SEND_SENSE |
+ CAM_SEND_STATUS;
+ ctio->dxfer_len = 0;
+ ctio->scsi_status = SCSI_STATUS_CHECK_COND;
+ }
+}
+
+void
+tcmd_ua(u_int init_id, ua_types new_ua)
+{
+ struct initiator_state *istate;
+ u_int start, end;
+
+ if (init_id == CAM_TARGET_WILDCARD) {
+ start = 0;
+ end = MAX_INITIATORS - 1;
+ } else {
+ start = end = init_id;
+ }
+
+ for (; start <= end; start++) {
+ istate = tcmd_get_istate(start);
+ if (istate == NULL)
+ break;
+ istate->pending_ua = new_ua;
+ }
+}
+
+static int
+tcmd_inquiry(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ struct scsi_inquiry *inq;
+ struct atio_descr *a_descr;
+ struct initiator_state *istate;
+ struct scsi_sense_data_fixed *sense;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ inq = (struct scsi_inquiry *)a_descr->cdb;
+
+ if (debug)
+ cdb_debug(a_descr->cdb, "INQUIRY from %u: ", atio->init_id);
+ /*
+ * Validate the command. We don't support any VPD pages, so
+ * complain if EVPD or CMDDT is set.
+ */
+ istate = tcmd_get_istate(ctio->init_id);
+ sense = (struct scsi_sense_data_fixed *)&istate->sense_data;
+ if ((inq->byte2 & SI_EVPD) != 0) {
+ tcmd_illegal_req(atio, ctio);
+ sense->sense_key_spec[0] = SSD_SCS_VALID | SSD_FIELDPTR_CMD |
+ SSD_BITPTR_VALID | /*bit value*/1;
+ sense->sense_key_spec[1] = 0;
+ sense->sense_key_spec[2] =
+ offsetof(struct scsi_inquiry, byte2);
+ } else if (inq->page_code != 0) {
+ tcmd_illegal_req(atio, ctio);
+ sense->sense_key_spec[0] = SSD_SCS_VALID | SSD_FIELDPTR_CMD;
+ sense->sense_key_spec[1] = 0;
+ sense->sense_key_spec[2] =
+ offsetof(struct scsi_inquiry, page_code);
+ } else {
+ bcopy(&inq_data, ctio->data_ptr, sizeof(inq_data));
+ ctio->dxfer_len = inq_data.additional_length + 4;
+ ctio->dxfer_len = min(ctio->dxfer_len,
+ scsi_2btoul(inq->length));
+ ctio->ccb_h.flags |= CAM_DIR_IN | CAM_SEND_STATUS;
+ ctio->scsi_status = SCSI_STATUS_OK;
+ }
+ return (0);
+}
+
+/* Initialize the inquiry response structure with the requested flags */
+static int
+init_inquiry(u_int16_t req_flags, u_int16_t sim_flags)
+{
+ struct scsi_inquiry_data *inq;
+
+ inq = &inq_data;
+ bzero(inq, sizeof(*inq));
+ inq->device = T_DIRECT | (SID_QUAL_LU_CONNECTED << 5);
+#ifdef SCSI_REV_SPC
+ inq->version = SCSI_REV_SPC; /* was 2 */
+#else
+ inq->version = SCSI_REV_3; /* was 2 */
+#endif
+
+ /*
+ * XXX cpi.hba_inquiry doesn't support Addr16 so we give the
+ * user what they want if they ask for it.
+ */
+ if ((req_flags & SID_Addr16) != 0) {
+ sim_flags |= SID_Addr16;
+ warnx("Not sure SIM supports Addr16 but enabling it anyway");
+ }
+
+ /* Advertise only what the SIM can actually support */
+ req_flags &= sim_flags;
+ scsi_ulto2b(req_flags, &inq->spc2_flags);
+
+ inq->response_format = 2; /* SCSI2 Inquiry Format */
+ inq->additional_length = SHORT_INQUIRY_LENGTH -
+ offsetof(struct scsi_inquiry_data, additional_length);
+ bcopy("FreeBSD ", inq->vendor, SID_VENDOR_SIZE);
+ bcopy("Emulated Disk ", inq->product, SID_PRODUCT_SIZE);
+ bcopy("0.1 ", inq->revision, SID_REVISION_SIZE);
+ return (0);
+}
+
+static int
+tcmd_req_sense(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ struct scsi_request_sense *rsense;
+ struct scsi_sense_data_fixed *sense;
+ struct initiator_state *istate;
+ size_t dlen;
+ struct atio_descr *a_descr;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ rsense = (struct scsi_request_sense *)a_descr->cdb;
+
+ istate = tcmd_get_istate(ctio->init_id);
+ sense = (struct scsi_sense_data_fixed *)&istate->sense_data;
+
+ if (debug) {
+ cdb_debug(a_descr->cdb, "REQ SENSE from %u: ", atio->init_id);
+ warnx("Sending sense: %#x %#x %#x", sense->flags,
+ sense->add_sense_code, sense->add_sense_code_qual);
+ }
+
+ if (istate->orig_ca == 0) {
+ tcmd_sense(ctio->init_id, NULL, SSD_KEY_NO_SENSE, 0, 0);
+ warnx("REQUEST SENSE from %u but no pending CA!",
+ ctio->init_id);
+ }
+
+ bcopy(sense, ctio->data_ptr, sizeof(struct scsi_sense_data));
+ dlen = offsetof(struct scsi_sense_data_fixed, extra_len) +
+ sense->extra_len + 1;
+ ctio->dxfer_len = min(dlen, SCSI_CDB6_LEN(rsense->length));
+ ctio->ccb_h.flags |= CAM_DIR_IN | CAM_SEND_STATUS;
+ ctio->scsi_status = SCSI_STATUS_OK;
+ return (0);
+}
+
+static int
+tcmd_rd_cap(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ struct scsi_read_capacity_data *srp;
+ struct atio_descr *a_descr;
+ uint32_t vsize;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ srp = (struct scsi_read_capacity_data *)ctio->data_ptr;
+
+ if (volume_size > 0xffffffff)
+ vsize = 0xffffffff;
+ else
+ vsize = (uint32_t)(volume_size - 1);
+
+ if (debug) {
+ cdb_debug(a_descr->cdb, "READ CAP from %u (%u, %u): ",
+ atio->init_id, vsize, sector_size);
+ }
+
+ bzero(srp, sizeof(*srp));
+ scsi_ulto4b(vsize, srp->addr);
+ scsi_ulto4b(sector_size, srp->length);
+
+ ctio->dxfer_len = sizeof(*srp);
+ ctio->ccb_h.flags |= CAM_DIR_IN | CAM_SEND_STATUS;
+ ctio->scsi_status = SCSI_STATUS_OK;
+ return (0);
+}
+
+#ifdef READ_16
+static int
+tcmd_rd_cap16(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ struct scsi_read_capacity_16 *scsi_cmd;
+ struct scsi_read_capacity_data_long *srp;
+ struct atio_descr *a_descr;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ scsi_cmd = (struct scsi_read_capacity_16 *)a_descr->cdb;
+ srp = (struct scsi_read_capacity_data_long *)ctio->data_ptr;
+
+ if (scsi_cmd->service_action != SRC16_SERVICE_ACTION) {
+ tcmd_illegal_req(atio, ctio);
+ return (0);
+ }
+
+ if (debug) {
+ cdb_debug(a_descr->cdb, "READ CAP16 from %u (%u, %u): ",
+ atio->init_id, volume_size - 1, sector_size);
+ }
+
+ bzero(srp, sizeof(*srp));
+ scsi_u64to8b(volume_size - 1, srp->addr);
+ scsi_ulto4b(sector_size, srp->length);
+
+ ctio->dxfer_len = sizeof(*srp);
+ ctio->ccb_h.flags |= CAM_DIR_IN | CAM_SEND_STATUS;
+ ctio->scsi_status = SCSI_STATUS_OK;
+ return (0);
+}
+#endif
+
+static int
+tcmd_rdwr(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ struct atio_descr *a_descr;
+ struct ctio_descr *c_descr;
+ int ret;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
+
+ /* Command needs to be decoded */
+ if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_BOTH) {
+ if (debug)
+ warnx("Calling rdwr_decode");
+ ret = tcmd_rdwr_decode(atio, ctio);
+ if (ret == 0) {
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ return (0);
+ }
+ }
+ ctio->ccb_h.flags |= a_descr->flags;
+
+ /* Call appropriate work function */
+ if ((a_descr->flags & CAM_DIR_IN) != 0) {
+ ret = start_io(atio, ctio, CAM_DIR_IN);
+ if (debug)
+ warnx("Starting %p DIR_IN @" OFF_FMT ":%u",
+ a_descr, c_descr->offset, a_descr->targ_req);
+ } else {
+ ret = start_io(atio, ctio, CAM_DIR_OUT);
+ if (debug)
+ warnx("Starting %p DIR_OUT @" OFF_FMT ":%u",
+ a_descr, c_descr->offset, a_descr->init_req);
+ }
+
+ return (ret);
+}
+
+static int
+tcmd_rdwr_decode(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ uint64_t blkno;
+ uint32_t count;
+ struct atio_descr *a_descr;
+ u_int8_t *cdb;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ cdb = a_descr->cdb;
+ if (debug)
+ cdb_debug(cdb, "R/W from %u: ", atio->init_id);
+
+ switch (cdb[0]) {
+ case READ_6:
+ case WRITE_6:
+ {
+ struct scsi_rw_6 *rw_6 = (struct scsi_rw_6 *)cdb;
+ blkno = scsi_3btoul(rw_6->addr);
+ count = rw_6->length;
+ break;
+ }
+ case READ_10:
+ case WRITE_10:
+ {
+ struct scsi_rw_10 *rw_10 = (struct scsi_rw_10 *)cdb;
+ blkno = scsi_4btoul(rw_10->addr);
+ count = scsi_2btoul(rw_10->length);
+ break;
+ }
+#ifdef READ_16
+ case READ_16:
+ case WRITE_16:
+ {
+ struct scsi_rw_16 *rw_16 = (struct scsi_rw_16 *)cdb;
+ blkno = scsi_8btou64(rw_16->addr);
+ count = scsi_4btoul(rw_16->length);
+ break;
+ }
+#endif
+ default:
+ tcmd_illegal_req(atio, ctio);
+ return (0);
+ }
+ if (((off_t)(blkno + count)) > volume_size) {
+ warnx("Attempt to access past end of volume");
+ tcmd_sense(ctio->init_id, ctio,
+ SSD_KEY_ILLEGAL_REQUEST, 0x21, 0);
+ return (0);
+ }
+
+ /* Get an (overall) data length and set direction */
+ a_descr->base_off = ((off_t)blkno) * sector_size;
+ a_descr->total_len = count * sector_size;
+ if (a_descr->total_len == 0) {
+ if (debug)
+ warnx("r/w 0 blocks @ blkno " OFF_FMT, blkno);
+ tcmd_null_ok(atio, ctio);
+ return (0);
+ } else if (cdb[0] == WRITE_6 || cdb[0] == WRITE_10) {
+ a_descr->flags |= CAM_DIR_OUT;
+ if (debug)
+ warnx("write %u blocks @ blkno " OFF_FMT, count, blkno);
+ } else {
+ a_descr->flags |= CAM_DIR_IN;
+ if (debug)
+ warnx("read %u blocks @ blkno " OFF_FMT, count, blkno);
+ }
+ return (1);
+}
+
+static int
+start_io(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio, int dir)
+{
+ struct atio_descr *a_descr;
+ struct ctio_descr *c_descr;
+ int ret;
+
+ /* Set up common structures */
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
+
+ if (dir == CAM_DIR_IN) {
+ c_descr->offset = a_descr->base_off + a_descr->targ_req;
+ ctio->dxfer_len = a_descr->total_len - a_descr->targ_req;
+ } else {
+ c_descr->offset = a_descr->base_off + a_descr->init_req;
+ ctio->dxfer_len = a_descr->total_len - a_descr->init_req;
+ }
+ ctio->dxfer_len = min(ctio->dxfer_len, buf_size);
+ assert(ctio->dxfer_len >= 0);
+
+ c_descr->aiocb.aio_offset = c_descr->offset;
+ c_descr->aiocb.aio_nbytes = ctio->dxfer_len;
+
+ /* If DIR_IN, start read from target, otherwise begin CTIO xfer. */
+ ret = 1;
+ if (dir == CAM_DIR_IN) {
+ if (notaio) {
+ if (debug)
+ warnx("read sync %lu @ block " OFF_FMT,
+ (unsigned long)
+ (ctio->dxfer_len / sector_size),
+ c_descr->offset / sector_size);
+ if (lseek(c_descr->aiocb.aio_fildes,
+ c_descr->aiocb.aio_offset, SEEK_SET) < 0) {
+ perror("lseek");
+ err(1, "lseek");
+ }
+ if (read(c_descr->aiocb.aio_fildes,
+ (void *)c_descr->aiocb.aio_buf,
+ ctio->dxfer_len) != ctio->dxfer_len) {
+ err(1, "read");
+ }
+ } else {
+ if (debug)
+ warnx("read async %lu @ block " OFF_FMT,
+ (unsigned long)
+ (ctio->dxfer_len / sector_size),
+ c_descr->offset / sector_size);
+ if (aio_read(&c_descr->aiocb) < 0) {
+ err(1, "aio_read"); /* XXX */
+ }
+ }
+ a_descr->targ_req += ctio->dxfer_len;
+ /* if we're done, we can mark the CCB as to send status */
+ if (a_descr->targ_req == a_descr->total_len) {
+ ctio->ccb_h.flags |= CAM_SEND_STATUS;
+ ctio->scsi_status = SCSI_STATUS_OK;
+ ret = 0;
+ }
+ if (notaio)
+ tcmd_rdwr_done(atio, ctio, AIO_DONE);
+ } else {
+ if (a_descr->targ_ack == a_descr->total_len)
+ tcmd_null_ok(atio, ctio);
+ a_descr->init_req += ctio->dxfer_len;
+ if (a_descr->init_req == a_descr->total_len &&
+ ctio->dxfer_len > 0) {
+ /*
+ * If data phase done, remove atio from workq.
+ * The completion handler will call work_atio to
+ * send the final status.
+ */
+ ret = 0;
+ }
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ }
+
+ return (ret);
+}
+
+static void
+tcmd_rdwr_done(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio,
+ io_ops event)
+{
+ struct atio_descr *a_descr;
+ struct ctio_descr *c_descr;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
+
+ switch (event) {
+ case AIO_DONE:
+ if (!notaio && aio_return(&c_descr->aiocb) < 0) {
+ warn("aio_return error");
+ /* XXX */
+ tcmd_sense(ctio->init_id, ctio,
+ SSD_KEY_MEDIUM_ERROR, 0, 0);
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ break;
+ }
+ a_descr->targ_ack += ctio->dxfer_len;
+ if ((a_descr->flags & CAM_DIR_IN) != 0) {
+ if (debug) {
+ if (notaio)
+ warnx("sending CTIO for AIO read");
+ else
+ warnx("sending CTIO for sync read");
+ }
+ a_descr->init_req += ctio->dxfer_len;
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ } else {
+ /* Use work function to send final status */
+ if (a_descr->init_req == a_descr->total_len)
+ work_atio(atio);
+ if (debug)
+ warnx("AIO done freeing CTIO");
+ free_ccb((union ccb *)ctio);
+ }
+ break;
+ case CTIO_DONE:
+ switch (ctio->ccb_h.status & CAM_STATUS_MASK) {
+ case CAM_REQ_CMP:
+ break;
+ case CAM_REQUEUE_REQ:
+ warnx("requeueing request");
+ if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
+ if (aio_write(&c_descr->aiocb) < 0) {
+ err(1, "aio_write"); /* XXX */
+ }
+ } else {
+ if (aio_read(&c_descr->aiocb) < 0) {
+ err(1, "aio_read"); /* XXX */
+ }
+ }
+ return;
+ default:
+ errx(1, "CTIO failed, status %#x", ctio->ccb_h.status);
+ }
+ a_descr->init_ack += ctio->dxfer_len;
+ if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT &&
+ ctio->dxfer_len > 0) {
+ a_descr->targ_req += ctio->dxfer_len;
+ if (notaio) {
+ if (debug)
+ warnx("write sync %lu @ block "
+ OFF_FMT, (unsigned long)
+ (ctio->dxfer_len / sector_size),
+ c_descr->offset / sector_size);
+ if (lseek(c_descr->aiocb.aio_fildes,
+ c_descr->aiocb.aio_offset, SEEK_SET) < 0) {
+ perror("lseek");
+ err(1, "lseek");
+ }
+ if (write(c_descr->aiocb.aio_fildes,
+ (void *) c_descr->aiocb.aio_buf,
+ ctio->dxfer_len) != ctio->dxfer_len) {
+ err(1, "write");
+ }
+ tcmd_rdwr_done(atio, ctio, AIO_DONE);
+ } else {
+ if (debug)
+ warnx("write async %lu @ block "
+ OFF_FMT, (unsigned long)
+ (ctio->dxfer_len / sector_size),
+ c_descr->offset / sector_size);
+ if (aio_write(&c_descr->aiocb) < 0) {
+ err(1, "aio_write"); /* XXX */
+ }
+ }
+ } else {
+ if (debug)
+ warnx("CTIO done freeing CTIO");
+ free_ccb((union ccb *)ctio);
+ }
+ break;
+ default:
+ warnx("Unknown completion code %d", event);
+ abort();
+ /* NOTREACHED */
+ }
+}
+
+/* Simple ok message used by TUR, SYNC_CACHE, etc. */
+static int
+tcmd_null_ok(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ if (debug) {
+ struct atio_descr *a_descr;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ cdb_debug(a_descr->cdb, "Sending null ok to %u : ", atio->init_id);
+ }
+
+ ctio->dxfer_len = 0;
+ ctio->ccb_h.flags &= ~CAM_DIR_MASK;
+ ctio->ccb_h.flags |= CAM_DIR_NONE | CAM_SEND_STATUS;
+ ctio->scsi_status = SCSI_STATUS_OK;
+ return (0);
+}
+
+/* Simple illegal request message used by MODE SENSE, etc. */
+static int
+tcmd_illegal_req(struct ccb_accept_tio *atio, struct ccb_scsiio *ctio)
+{
+ if (debug) {
+ struct atio_descr *a_descr;
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ cdb_debug(a_descr->cdb, "Sending ill req to %u: ", atio->init_id);
+ }
+
+ tcmd_sense(atio->init_id, ctio, SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/0x24, /*ascq*/0);
+ return (0);
+}
+
+static void
+cdb_debug(u_int8_t *cdb, const char *msg, ...)
+{
+ char msg_buf[512];
+ int len;
+ va_list ap;
+
+ va_start(ap, msg);
+ vsnprintf(msg_buf, sizeof(msg_buf), msg, ap);
+ va_end(ap);
+ len = strlen(msg_buf);
+ scsi_cdb_string(cdb, msg_buf + len, sizeof(msg_buf) - len);
+ warnx("%s", msg_buf);
+}
diff --git a/share/examples/scsi_target/scsi_target.8 b/share/examples/scsi_target/scsi_target.8
new file mode 100644
index 000000000000..f3bcfac33ee8
--- /dev/null
+++ b/share/examples/scsi_target/scsi_target.8
@@ -0,0 +1,156 @@
+.\" Copyright (c) 2002
+.\" Nate Lawson. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the author nor the names of any co-contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY Nate Lawson AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd November 15, 2002
+.Dt SCSI_TARGET 8
+.Os
+.Sh NAME
+.Nm scsi_target
+.Nd usermode SCSI disk emulator
+.Sh SYNOPSIS
+.Nm
+.Op Fl AdST
+.Op Fl b Ar size
+.Op Fl c Ar size
+.Op Fl s Ar size
+.Op Fl W Ar num
+.Ar bus : Ns Ar target : Ns Ar lun
+.Ar filename
+.Sh DESCRIPTION
+The
+.Nm
+utility emulates a SCSI target device using the
+.Xr targ 4
+device driver.
+It supports the basic commands of a direct access device, like
+.Xr da 4 .
+In typical operation, it opens a control device and
+enables target mode for the specified LUN.
+It then communicates with
+the SIM using CCBs exchanged via
+.Xr read 2
+and
+.Xr write 2 .
+READ and WRITE CDBs are satisfied with the specified backing store file.
+.Pp
+For performance, all backing store accesses use
+.Xr aio 4 .
+Thus,
+.Nm
+requires a kernel compiled with
+.Cd "options VFS_AIO" .
+.Pp
+Options:
+.Bl -tag -width indent
+.It Fl A
+Enable 16 addresses if supported by the SIM.
+Default is 8.
+.It Fl S
+Enable synchronous transfers if supported by the SIM.
+Default is disabled.
+.It Fl T
+Enable tagged queuing if supported by the SIM.
+Default is no tagged queuing.
+.It Fl W Cm 8 | 16 | 32
+Enable 16 or 32 bit wide transfers if supported by the SIM.
+Default is 8.
+.It Fl b Ar bufsize
+Set buffer size for transfers.
+Transfers larger than this will be split into multiple transfers.
+.It Fl c Ar sectorsize
+Set sector size for emulated volume.
+Default is 512.
+.It Fl d
+Enable debugging output in
+.Nm
+and its associated control device.
+.It Fl s Ar volsize
+Use a different size for the emulated volume.
+Must be less than or equal to the size of
+.Ar filename .
+If the number ends with a
+.Dq Li k ,
+.Dq Li m ,
+.Dq Li g ,
+.Dq Li t ,
+.Dq Li p ,
+or
+.Dq Li e ,
+the number is multiplied by 2^10 (1K), 2^20 (1M), 2^30 (1G), 2^40 (1T),
+2^50 (1P) and 2^60 (1E)
+respectively.
+.El
+.Pp
+Required arguments:
+.Bl -tag -width indent
+.It Ar bus : Ns Ar target : Ns Ar lun
+Attach to specified bus ID, target ID, and LUN.
+.It Ar filename
+File to use as a backing store.
+.El
+.Pp
+All options default to the minimal functionality of SCSI-1.
+To be safe,
+.Nm
+checks the SIM for the requested capability before enabling target mode.
+.Sh FILES
+.Bl -tag -width ".Pa /usr/share/examples/scsi_target" -compact
+.It Pa /dev/targ*
+Control devices.
+.It Pa /usr/share/examples/scsi_target
+Source directory.
+.El
+.Sh EXAMPLES
+Create a 5 megabyte backing store file.
+.Pp
+.Dl "dd if=/dev/zero of=vol size=1m count=5"
+.Pp
+Enable target mode on bus 0, target ID 1, LUN 0, using
+.Pa vol
+as the backing store for READ6/10 and WRITE6/10 commands.
+Only the first 1000 bytes of
+.Pa vol
+will be used.
+Debugging information will be output.
+16-bit wide transfers will be used if the SIM supports them.
+.Pp
+.Dl "scsi_target -d -s 1000 -W 16 0:1:0 vol"
+.Sh SEE ALSO
+.Xr scsi 4 ,
+.Xr targ 4
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+example first appeared in
+.Fx 3.0
+and was written by
+.An Justin T. Gibbs .
+It was rewritten for
+.Fx 5.0
+by
+.An Nate Lawson Aq Mt nate@root.org .
diff --git a/share/examples/scsi_target/scsi_target.c b/share/examples/scsi_target/scsi_target.c
new file mode 100644
index 000000000000..ee0a94e7a2a0
--- /dev/null
+++ b/share/examples/scsi_target/scsi_target.c
@@ -0,0 +1,990 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * SCSI Disk Emulator
+ *
+ * Copyright (c) 2002 Nate Lawson.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <ctype.h>
+#include <errno.h>
+#include <err.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+#include <aio.h>
+#include <assert.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <sys/event.h>
+#include <sys/param.h>
+#include <sys/disk.h>
+#include <cam/cam_queue.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_targetio.h>
+#include <cam/scsi/scsi_message.h>
+#include "scsi_target.h"
+
+/* Maximum amount to transfer per CTIO */
+#define MAX_XFER MAXPHYS
+/* Maximum number of allocated CTIOs */
+#define MAX_CTIOS 64
+/* Maximum sector size for emulated volume */
+#define MAX_SECTOR 32768
+
+/* Global variables */
+int debug;
+int notaio = 0;
+off_t volume_size;
+u_int sector_size;
+size_t buf_size;
+
+/* Local variables */
+static int targ_fd;
+static int kq_fd;
+static int file_fd;
+static int num_ctios;
+static struct ccb_queue pending_queue;
+static struct ccb_queue work_queue;
+static struct ioc_enable_lun ioc_enlun = {
+ CAM_BUS_WILDCARD,
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD,
+ 0,
+ 0
+};
+
+/* Local functions */
+static void cleanup(void);
+static int init_ccbs(void);
+static void request_loop(void);
+static void handle_read(void);
+/* static int work_atio(struct ccb_accept_tio *); */
+static void queue_io(struct ccb_scsiio *);
+static int run_queue(struct ccb_accept_tio *);
+static int work_inot(struct ccb_immediate_notify *);
+static struct ccb_scsiio *
+ get_ctio(void);
+/* static void free_ccb(union ccb *); */
+static cam_status get_sim_flags(u_int16_t *);
+static void rel_simq(void);
+static void abort_all_pending(void);
+static void usage(void);
+
+int
+main(int argc, char *argv[])
+{
+ int ch;
+ char *file_name;
+ u_int16_t req_flags, sim_flags;
+ off_t user_size;
+
+ /* Initialize */
+ debug = 0;
+ req_flags = sim_flags = 0;
+ user_size = 0;
+ targ_fd = file_fd = kq_fd = -1;
+ num_ctios = 0;
+ sector_size = SECTOR_SIZE;
+ buf_size = DFLTPHYS;
+
+ /* Prepare resource pools */
+ TAILQ_INIT(&pending_queue);
+ TAILQ_INIT(&work_queue);
+
+ while ((ch = getopt(argc, argv, "AdSTYb:c:s:W:")) != -1) {
+ switch(ch) {
+ case 'A':
+ req_flags |= SID_Addr16;
+ break;
+ case 'd':
+ debug = 1;
+ break;
+ case 'S':
+ req_flags |= SID_Sync;
+ break;
+ case 'T':
+ req_flags |= SID_CmdQue;
+ break;
+ case 'b':
+ buf_size = atoi(optarg);
+ if (buf_size < 256 || buf_size > MAX_XFER)
+ errx(1, "Unreasonable buf size: %s", optarg);
+ break;
+ case 'c':
+ sector_size = atoi(optarg);
+ if (sector_size < 512 || sector_size > MAX_SECTOR)
+ errx(1, "Unreasonable sector size: %s", optarg);
+ break;
+ case 's':
+ {
+ int last, shift = 0;
+
+ last = strlen(optarg) - 1;
+ if (last > 0) {
+ switch (tolower(optarg[last])) {
+ case 'e':
+ shift += 10;
+ /* FALLTHROUGH */
+ case 'p':
+ shift += 10;
+ /* FALLTHROUGH */
+ case 't':
+ shift += 10;
+ /* FALLTHROUGH */
+ case 'g':
+ shift += 10;
+ /* FALLTHROUGH */
+ case 'm':
+ shift += 10;
+ /* FALLTHROUGH */
+ case 'k':
+ shift += 10;
+ optarg[last] = 0;
+ break;
+ }
+ }
+ user_size = strtoll(optarg, (char **)NULL, /*base*/10);
+ user_size <<= shift;
+ if (user_size < 0)
+ errx(1, "Unreasonable volume size: %s", optarg);
+ break;
+ }
+ case 'W':
+ req_flags &= ~(SID_WBus16 | SID_WBus32);
+ switch (atoi(optarg)) {
+ case 8:
+ /* Leave req_flags zeroed */
+ break;
+ case 16:
+ req_flags |= SID_WBus16;
+ break;
+ case 32:
+ req_flags |= SID_WBus32;
+ break;
+ default:
+ warnx("Width %s not supported", optarg);
+ usage();
+ /* NOTREACHED */
+ }
+ break;
+ case 'Y':
+ notaio = 1;
+ break;
+ default:
+ usage();
+ /* NOTREACHED */
+ }
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 2)
+ usage();
+
+ sscanf(argv[0], "%u:%u:%ju", &ioc_enlun.path_id, &ioc_enlun.target_id,
+ &ioc_enlun.lun_id);
+ file_name = argv[1];
+
+ if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
+ ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
+ ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
+ warnx("Incomplete target path specified");
+ usage();
+ /* NOTREACHED */
+ }
+ /* We don't support any vendor-specific commands */
+ ioc_enlun.grp6_len = 0;
+ ioc_enlun.grp7_len = 0;
+
+ /* Open backing store for IO */
+ file_fd = open(file_name, O_RDWR);
+ if (file_fd < 0)
+ errx(EX_NOINPUT, "open backing store file");
+
+ /* Check backing store size or use the size user gave us */
+ if (user_size == 0) {
+ struct stat st;
+
+ if (fstat(file_fd, &st) < 0)
+ err(1, "fstat file");
+#if __FreeBSD_version >= 500000
+ if ((st.st_mode & S_IFCHR) != 0) {
+ /* raw device */
+ off_t mediasize;
+ if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
+ err(1, "DIOCGMEDIASIZE");
+
+ /* XXX get sector size by ioctl()?? */
+ volume_size = mediasize / sector_size;
+ } else
+#endif
+ volume_size = st.st_size / sector_size;
+ } else {
+ volume_size = user_size / sector_size;
+ }
+ if (debug)
+ warnx("volume_size: %d bytes x " OFF_FMT " sectors",
+ sector_size, volume_size);
+
+ if (volume_size <= 0)
+ errx(1, "volume must be larger than %d", sector_size);
+
+ if (notaio == 0) {
+ struct aiocb aio, *aiop;
+ void *aio_buf;
+
+ /* See if we have we have working AIO support */
+ memset(&aio, 0, sizeof(aio));
+ aio_buf = malloc(sector_size);
+ aio.aio_buf = aio_buf;
+ if (aio.aio_buf == NULL)
+ err(1, "malloc");
+ aio.aio_fildes = file_fd;
+ aio.aio_offset = 0;
+ aio.aio_nbytes = sector_size;
+ signal(SIGSYS, SIG_IGN);
+ if (aio_read(&aio) != 0) {
+ printf("AIO support is not available- switchin to"
+ " single-threaded mode.\n");
+ notaio = 1;
+ } else {
+ if (aio_waitcomplete(&aiop, NULL) != sector_size)
+ err(1, "aio_waitcomplete");
+ assert(aiop == &aio);
+ signal(SIGSYS, SIG_DFL);
+ }
+ free(aio_buf);
+ if (debug && notaio == 0)
+ warnx("aio support tested ok");
+ }
+
+ targ_fd = open("/dev/targ", O_RDWR);
+ if (targ_fd < 0)
+ err(1, "/dev/targ");
+ else
+ warnx("opened /dev/targ");
+
+ /* The first three are handled by kevent() later */
+ signal(SIGHUP, SIG_IGN);
+ signal(SIGINT, SIG_IGN);
+ signal(SIGTERM, SIG_IGN);
+ signal(SIGPROF, SIG_IGN);
+ signal(SIGALRM, SIG_IGN);
+ signal(SIGSTOP, SIG_IGN);
+ signal(SIGTSTP, SIG_IGN);
+
+ /* Register a cleanup handler to run when exiting */
+ atexit(cleanup);
+
+ /* Enable listening on the specified LUN */
+ if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
+ err(1, "TARGIOCENABLE");
+
+ /* Enable debugging if requested */
+ if (debug) {
+ if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
+ warnx("TARGIOCDEBUG");
+ }
+
+ /* Set up inquiry data according to what SIM supports */
+ if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
+ errx(1, "get_sim_flags");
+
+ if (tcmd_init(req_flags, sim_flags) != 0)
+ errx(1, "Initializing tcmd subsystem failed");
+
+ /* Queue ATIOs and INOTs on descriptor */
+ if (init_ccbs() != 0)
+ errx(1, "init_ccbs failed");
+
+ if (debug)
+ warnx("main loop beginning");
+
+ request_loop();
+
+ exit(0);
+}
+
+static void
+cleanup(void)
+{
+ struct ccb_hdr *ccb_h;
+
+ if (debug) {
+ warnx("cleanup called");
+ debug = 0;
+ ioctl(targ_fd, TARGIOCDEBUG, &debug);
+ }
+ ioctl(targ_fd, TARGIOCDISABLE, NULL);
+ close(targ_fd);
+
+ while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
+ TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
+ free_ccb((union ccb *)ccb_h);
+ }
+ while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
+ TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
+ free_ccb((union ccb *)ccb_h);
+ }
+
+ if (kq_fd != -1)
+ close(kq_fd);
+}
+
+/* Allocate ATIOs/INOTs and queue on HBA */
+static int
+init_ccbs(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_INITIATORS; i++) {
+ struct ccb_accept_tio *atio;
+ struct atio_descr *a_descr;
+ struct ccb_immediate_notify *inot;
+
+ atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
+ if (atio == NULL) {
+ warn("malloc ATIO");
+ return (-1);
+ }
+ a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
+ if (a_descr == NULL) {
+ free(atio);
+ warn("malloc atio_descr");
+ return (-1);
+ }
+ atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
+ atio->ccb_h.targ_descr = a_descr;
+ send_ccb((union ccb *)atio, /*priority*/1);
+
+ inot = (struct ccb_immediate_notify *)malloc(sizeof(*inot));
+ if (inot == NULL) {
+ warn("malloc INOT");
+ return (-1);
+ }
+ inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
+ send_ccb((union ccb *)inot, /*priority*/1);
+ }
+
+ return (0);
+}
+
+static void
+request_loop(void)
+{
+ struct kevent events[MAX_EVENTS];
+ struct timespec ts, *tptr;
+ int quit;
+
+ /* Register kqueue for event notification */
+ if ((kq_fd = kqueue()) < 0)
+ err(1, "init kqueue");
+
+ /* Set up some default events */
+ EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
+ EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
+ EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
+ EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
+ if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
+ err(1, "kevent signal registration");
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ tptr = NULL;
+ quit = 0;
+
+ /* Loop until user signal */
+ while (quit == 0) {
+ int retval, i, oo;
+ struct ccb_hdr *ccb_h;
+
+ /* Check for the next signal, read ready, or AIO completion */
+ retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
+ if (retval < 0) {
+ if (errno == EINTR) {
+ if (debug)
+ warnx("EINTR, looping");
+ continue;
+ }
+ else {
+ err(1, "kevent failed");
+ }
+ } else if (retval > MAX_EVENTS) {
+ errx(1, "kevent returned more events than allocated?");
+ }
+
+ /* Process all received events. */
+ for (oo = i = 0; i < retval; i++) {
+ if ((events[i].flags & EV_ERROR) != 0)
+ errx(1, "kevent registration failed");
+
+ switch (events[i].filter) {
+ case EVFILT_READ:
+ if (debug)
+ warnx("read ready");
+ handle_read();
+ break;
+ case EVFILT_AIO:
+ {
+ struct ccb_scsiio *ctio;
+ struct ctio_descr *c_descr;
+ if (debug)
+ warnx("aio ready");
+
+ ctio = (struct ccb_scsiio *)events[i].udata;
+ c_descr = (struct ctio_descr *)
+ ctio->ccb_h.targ_descr;
+ c_descr->event = AIO_DONE;
+ /* Queue on the appropriate ATIO */
+ queue_io(ctio);
+ /* Process any queued completions. */
+ oo += run_queue(c_descr->atio);
+ break;
+ }
+ case EVFILT_SIGNAL:
+ if (debug)
+ warnx("signal ready, setting quit");
+ quit = 1;
+ break;
+ default:
+ warnx("unknown event %d", events[i].filter);
+ break;
+ }
+
+ if (debug)
+ warnx("event %d done", events[i].filter);
+ }
+
+ if (oo) {
+ tptr = &ts;
+ continue;
+ }
+
+ /* Grab the first CCB and perform one work unit. */
+ if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
+ union ccb *ccb;
+
+ ccb = (union ccb *)ccb_h;
+ switch (ccb_h->func_code) {
+ case XPT_ACCEPT_TARGET_IO:
+ /* Start one more transfer. */
+ retval = work_atio(&ccb->atio);
+ break;
+ case XPT_IMMEDIATE_NOTIFY:
+ retval = work_inot(&ccb->cin1);
+ break;
+ default:
+ warnx("Unhandled ccb type %#x on workq",
+ ccb_h->func_code);
+ abort();
+ /* NOTREACHED */
+ }
+
+ /* Assume work function handled the exception */
+ if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
+ if (debug) {
+ warnx("Queue frozen receiving CCB, "
+ "releasing");
+ }
+ rel_simq();
+ }
+
+ /* No more work needed for this command. */
+ if (retval == 0) {
+ TAILQ_REMOVE(&work_queue, ccb_h,
+ periph_links.tqe);
+ }
+ }
+
+ /*
+ * Poll for new events (i.e. completions) while we
+ * are processing CCBs on the work_queue. Once it's
+ * empty, use an infinite wait.
+ */
+ if (!TAILQ_EMPTY(&work_queue))
+ tptr = &ts;
+ else
+ tptr = NULL;
+ }
+}
+
+/* CCBs are ready from the kernel */
+static void
+handle_read(void)
+{
+ union ccb *ccb_array[MAX_INITIATORS], *ccb;
+ int ccb_count, i;
+
+ ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
+ if (ccb_count <= 0) {
+ warn("read ccb ptrs");
+ return;
+ }
+ ccb_count /= sizeof(union ccb *);
+ if (ccb_count < 1) {
+ warnx("truncated read ccb ptr?");
+ return;
+ }
+
+ for (i = 0; i < ccb_count; i++) {
+ ccb = ccb_array[i];
+ TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_ACCEPT_TARGET_IO:
+ {
+ struct ccb_accept_tio *atio;
+ struct atio_descr *a_descr;
+
+ /* Initialize ATIO descr for this transaction */
+ atio = &ccb->atio;
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+ bzero(a_descr, sizeof(*a_descr));
+ TAILQ_INIT(&a_descr->cmplt_io);
+ a_descr->flags = atio->ccb_h.flags &
+ (CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
+ /* XXX add a_descr->priority */
+ if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
+ a_descr->cdb = atio->cdb_io.cdb_bytes;
+ else
+ a_descr->cdb = atio->cdb_io.cdb_ptr;
+
+ /* ATIOs are processed in FIFO order */
+ TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
+ periph_links.tqe);
+ break;
+ }
+ case XPT_CONT_TARGET_IO:
+ {
+ struct ccb_scsiio *ctio;
+ struct ctio_descr *c_descr;
+
+ ctio = &ccb->ctio;
+ c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
+ c_descr->event = CTIO_DONE;
+ /* Queue on the appropriate ATIO */
+ queue_io(ctio);
+ /* Process any queued completions. */
+ run_queue(c_descr->atio);
+ break;
+ }
+ case XPT_IMMEDIATE_NOTIFY:
+ /* INOTs are handled with priority */
+ TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
+ periph_links.tqe);
+ break;
+ default:
+ warnx("Unhandled ccb type %#x in handle_read",
+ ccb->ccb_h.func_code);
+ break;
+ }
+ }
+}
+
+/* Process an ATIO CCB from the kernel */
+int
+work_atio(struct ccb_accept_tio *atio)
+{
+ struct ccb_scsiio *ctio;
+ struct atio_descr *a_descr;
+ struct ctio_descr *c_descr;
+ cam_status status;
+ int ret;
+
+ if (debug)
+ warnx("Working on ATIO %p", atio);
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+
+ /* Get a CTIO and initialize it according to our known parameters */
+ ctio = get_ctio();
+ if (ctio == NULL) {
+ return (1);
+ }
+ ret = 0;
+ ctio->ccb_h.flags = a_descr->flags;
+ ctio->tag_id = atio->tag_id;
+ ctio->init_id = atio->init_id;
+ /* XXX priority needs to be added to a_descr */
+ c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
+ c_descr->atio = atio;
+ if ((a_descr->flags & CAM_DIR_IN) != 0)
+ c_descr->offset = a_descr->base_off + a_descr->targ_req;
+ else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
+ c_descr->offset = a_descr->base_off + a_descr->init_req;
+ else
+ c_descr->offset = a_descr->base_off;
+
+ /*
+ * Return a check condition if there was an error while
+ * receiving this ATIO.
+ */
+ if (atio->sense_len != 0) {
+ struct scsi_sense_data_fixed *sense;
+
+ if (debug) {
+ warnx("ATIO with %u bytes sense received",
+ atio->sense_len);
+ }
+ sense = (struct scsi_sense_data_fixed *)&atio->sense_data;
+ tcmd_sense(ctio->init_id, ctio, sense->flags,
+ sense->add_sense_code, sense->add_sense_code_qual);
+ send_ccb((union ccb *)ctio, /*priority*/1);
+ return (0);
+ }
+
+ status = atio->ccb_h.status & CAM_STATUS_MASK;
+ switch (status) {
+ case CAM_CDB_RECVD:
+ ret = tcmd_handle(atio, ctio, ATIO_WORK);
+ break;
+ case CAM_REQ_ABORTED:
+ warn("ATIO %p aborted", a_descr);
+ /* Requeue on HBA */
+ TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
+ send_ccb((union ccb *)atio, /*priority*/1);
+ ret = 1;
+ break;
+ default:
+ warnx("ATIO completed with unhandled status %#x", status);
+ abort();
+ /* NOTREACHED */
+ break;
+ }
+
+ return (ret);
+}
+
+static void
+queue_io(struct ccb_scsiio *ctio)
+{
+ struct ccb_hdr *ccb_h;
+ struct io_queue *ioq;
+ struct ctio_descr *c_descr;
+
+ c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
+ if (c_descr->atio == NULL) {
+ errx(1, "CTIO %p has NULL ATIO", ctio);
+ }
+ ioq = &((struct atio_descr *)c_descr->atio->ccb_h.targ_descr)->cmplt_io;
+
+ if (TAILQ_EMPTY(ioq)) {
+ TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
+ return;
+ }
+
+ TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
+ struct ctio_descr *curr_descr =
+ (struct ctio_descr *)ccb_h->targ_descr;
+ if (curr_descr->offset <= c_descr->offset) {
+ break;
+ }
+ }
+
+ if (ccb_h) {
+ TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h, periph_links.tqe);
+ } else {
+ TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
+ }
+}
+
+/*
+ * Go through all completed AIO/CTIOs for a given ATIO and advance data
+ * counts, start continuation IO, etc.
+ */
+static int
+run_queue(struct ccb_accept_tio *atio)
+{
+ struct atio_descr *a_descr;
+ struct ccb_hdr *ccb_h;
+ int sent_status, event;
+
+ if (atio == NULL)
+ return (0);
+
+ a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
+
+ while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
+ struct ccb_scsiio *ctio;
+ struct ctio_descr *c_descr;
+
+ ctio = (struct ccb_scsiio *)ccb_h;
+ c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
+
+ if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
+ TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
+ periph_links.tqe);
+ free_ccb((union ccb *)ctio);
+ send_ccb((union ccb *)atio, /*priority*/1);
+ continue;
+ }
+
+ /* If completed item is in range, call handler */
+ if ((c_descr->event == AIO_DONE &&
+ c_descr->offset == a_descr->base_off + a_descr->targ_ack)
+ || (c_descr->event == CTIO_DONE &&
+ c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
+ sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
+ event = c_descr->event;
+
+ TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
+ periph_links.tqe);
+ tcmd_handle(atio, ctio, c_descr->event);
+
+ /* If entire transfer complete, send back ATIO */
+ if (sent_status != 0 && event == CTIO_DONE)
+ send_ccb((union ccb *)atio, /*priority*/1);
+ } else {
+ /* Gap in offsets so wait until later callback */
+ if (/* debug */ 1)
+ warnx("IO %p:%p out of order %s", ccb_h,
+ a_descr, c_descr->event == AIO_DONE?
+ "aio" : "ctio");
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static int
+work_inot(struct ccb_immediate_notify *inot)
+{
+ cam_status status;
+
+ if (debug)
+ warnx("Working on INOT %p", inot);
+
+ status = inot->ccb_h.status;
+ status &= CAM_STATUS_MASK;
+
+ switch (status) {
+ case CAM_SCSI_BUS_RESET:
+ tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
+ abort_all_pending();
+ break;
+ case CAM_BDR_SENT:
+ tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
+ abort_all_pending();
+ break;
+ case CAM_MESSAGE_RECV:
+ switch (inot->arg) {
+ case MSG_TASK_COMPLETE:
+ case MSG_INITIATOR_DET_ERR:
+ case MSG_ABORT_TASK_SET:
+ case MSG_MESSAGE_REJECT:
+ case MSG_NOOP:
+ case MSG_PARITY_ERROR:
+ case MSG_TARGET_RESET:
+ case MSG_ABORT_TASK:
+ case MSG_CLEAR_TASK_SET:
+ default:
+ warnx("INOT message %#x", inot->arg);
+ break;
+ }
+ break;
+ case CAM_REQ_ABORTED:
+ warnx("INOT %p aborted", inot);
+ break;
+ default:
+ warnx("Unhandled INOT status %#x", status);
+ break;
+ }
+
+ /* Requeue on SIM */
+ TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
+ send_ccb((union ccb *)inot, /*priority*/1);
+
+ return (1);
+}
+
+void
+send_ccb(union ccb *ccb, int priority)
+{
+ if (debug)
+ warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
+ ccb->ccb_h.pinfo.priority = priority;
+ if (XPT_FC_IS_QUEUED(ccb)) {
+ TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
+ periph_links.tqe);
+ }
+ if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
+ warn("write ccb");
+ ccb->ccb_h.status = CAM_PROVIDE_FAIL;
+ }
+}
+
+/* Return a CTIO/descr/buf combo from the freelist or malloc one */
+static struct ccb_scsiio *
+get_ctio(void)
+{
+ struct ccb_scsiio *ctio;
+ struct ctio_descr *c_descr;
+ struct sigevent *se;
+
+ if (num_ctios == MAX_CTIOS) {
+ warnx("at CTIO max");
+ return (NULL);
+ }
+
+ ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
+ if (ctio == NULL) {
+ warn("malloc CTIO");
+ return (NULL);
+ }
+ c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
+ if (c_descr == NULL) {
+ free(ctio);
+ warn("malloc ctio_descr");
+ return (NULL);
+ }
+ c_descr->buf = malloc(buf_size);
+ if (c_descr->buf == NULL) {
+ free(c_descr);
+ free(ctio);
+ warn("malloc backing store");
+ return (NULL);
+ }
+ num_ctios++;
+
+ /* Initialize CTIO, CTIO descr, and AIO */
+ ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
+ ctio->ccb_h.retry_count = 2;
+ ctio->ccb_h.timeout = CAM_TIME_INFINITY;
+ ctio->data_ptr = c_descr->buf;
+ ctio->ccb_h.targ_descr = c_descr;
+ c_descr->aiocb.aio_buf = c_descr->buf;
+ c_descr->aiocb.aio_fildes = file_fd;
+ se = &c_descr->aiocb.aio_sigevent;
+ se->sigev_notify = SIGEV_KEVENT;
+ se->sigev_notify_kqueue = kq_fd;
+ se->sigev_value.sival_ptr = ctio;
+
+ return (ctio);
+}
+
+void
+free_ccb(union ccb *ccb)
+{
+ switch (ccb->ccb_h.func_code) {
+ case XPT_CONT_TARGET_IO:
+ {
+ struct ctio_descr *c_descr;
+
+ c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
+ free(c_descr->buf);
+ num_ctios--;
+ /* FALLTHROUGH */
+ }
+ case XPT_ACCEPT_TARGET_IO:
+ free(ccb->ccb_h.targ_descr);
+ /* FALLTHROUGH */
+ case XPT_IMMEDIATE_NOTIFY:
+ default:
+ free(ccb);
+ break;
+ }
+}
+
+static cam_status
+get_sim_flags(u_int16_t *flags)
+{
+ struct ccb_pathinq cpi;
+ cam_status status;
+
+ /* Find SIM capabilities */
+ bzero(&cpi, sizeof(cpi));
+ cpi.ccb_h.func_code = XPT_PATH_INQ;
+ send_ccb((union ccb *)&cpi, /*priority*/1);
+ status = cpi.ccb_h.status & CAM_STATUS_MASK;
+ if (status != CAM_REQ_CMP) {
+ fprintf(stderr, "CPI failed, status %#x\n", status);
+ return (status);
+ }
+
+ /* Can only enable on controllers that support target mode */
+ if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
+ fprintf(stderr, "HBA does not support target mode\n");
+ status = CAM_PATH_INVALID;
+ return (status);
+ }
+
+ *flags = cpi.hba_inquiry;
+ return (status);
+}
+
+static void
+rel_simq(void)
+{
+ struct ccb_relsim crs;
+
+ bzero(&crs, sizeof(crs));
+ crs.ccb_h.func_code = XPT_REL_SIMQ;
+ crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
+ crs.openings = 0;
+ crs.release_timeout = 0;
+ crs.qfrozen_cnt = 0;
+ send_ccb((union ccb *)&crs, /*priority*/0);
+}
+
+/* Cancel all pending CCBs. */
+static void
+abort_all_pending(void)
+{
+ struct ccb_abort cab;
+ struct ccb_hdr *ccb_h;
+
+ if (debug)
+ warnx("abort_all_pending");
+
+ bzero(&cab, sizeof(cab));
+ cab.ccb_h.func_code = XPT_ABORT;
+ TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
+ if (debug)
+ warnx("Aborting pending CCB %p\n", ccb_h);
+ cab.abort_ccb = (union ccb *)ccb_h;
+ send_ccb((union ccb *)&cab, /*priority*/1);
+ if (cab.ccb_h.status != CAM_REQ_CMP) {
+ warnx("Unable to abort CCB, status %#x\n",
+ cab.ccb_h.status);
+ }
+ }
+}
+
+static void
+usage(void)
+{
+ fprintf(stderr,
+ "Usage: scsi_target [-AdSTY] [-b bufsize] [-c sectorsize]\n"
+ "\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
+ "\t\tbus:target:lun filename\n");
+ exit(1);
+}
diff --git a/share/examples/scsi_target/scsi_target.h b/share/examples/scsi_target/scsi_target.h
new file mode 100644
index 000000000000..57b6696d2e77
--- /dev/null
+++ b/share/examples/scsi_target/scsi_target.h
@@ -0,0 +1,133 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * SCSI Target Emulator
+ *
+ * Copyright (c) 2002 Nate Lawson.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SCSI_TARGET_H
+#define _SCSI_TARGET_H
+
+/*
+ * Maximum number of parallel commands to accept,
+ * 1024 for Fibre Channel (SPI is 16).
+ */
+#define MAX_INITIATORS 8
+#define SECTOR_SIZE 512
+#define MAX_EVENTS (MAX_INITIATORS + 5)
+ /* kqueue for AIO, signals */
+
+/* Additional SCSI 3 defines for inquiry response */
+#define SID_Addr16 0x0100
+
+TAILQ_HEAD(io_queue, ccb_hdr);
+
+/* Offset into the private CCB area for storing our descriptor */
+#define targ_descr periph_priv.entries[1].ptr
+
+/* Descriptor attached to each ATIO */
+struct atio_descr {
+ off_t base_off; /* Base offset for ATIO */
+ uint total_len; /* Total xfer len for this ATIO */
+ uint init_req; /* Transfer count requested to/from init */
+ uint init_ack; /* Data transferred ok to/from init */
+ uint targ_req; /* Transfer count requested to/from target */
+ uint targ_ack; /* Data transferred ok to/from target */
+ int flags; /* Flags for CTIOs */
+ u_int8_t *cdb; /* Pointer to received CDB */
+ /* List of completed AIO/CTIOs */
+ struct io_queue cmplt_io;
+};
+
+typedef enum {
+ ATIO_WORK,
+ AIO_DONE,
+ CTIO_DONE
+} io_ops;
+
+/* Descriptor attached to each CTIO */
+struct ctio_descr {
+ void *buf; /* Backing store */
+ off_t offset; /* Position in transfer (for file, */
+ /* doesn't start at 0) */
+ struct aiocb aiocb; /* AIO descriptor for this CTIO */
+ struct ccb_accept_tio *atio;
+ /* ATIO we are satisfying */
+ io_ops event; /* Event that queued this CTIO */
+};
+
+typedef enum {
+ UA_NONE = 0x00,
+ UA_POWER_ON = 0x01,
+ UA_BUS_RESET = 0x02,
+ UA_BDR = 0x04
+} ua_types;
+
+typedef enum {
+ CA_NONE = 0x00,
+ CA_UNIT_ATTN = 0x01,
+ CA_CMD_SENSE = 0x02
+} ca_types;
+
+struct initiator_state {
+ ua_types orig_ua;
+ ca_types orig_ca;
+ ua_types pending_ua;
+ ca_types pending_ca;
+ struct scsi_sense_data sense_data;
+};
+
+/* Global functions */
+extern cam_status tcmd_init(u_int16_t req_inq_flags,
+ u_int16_t sim_inq_flags);
+extern int tcmd_handle(struct ccb_accept_tio *atio,
+ struct ccb_scsiio *ctio, io_ops event);
+extern void tcmd_sense(u_int init_id, struct ccb_scsiio *ctio,
+ u_int8_t flags,
+ u_int8_t asc, u_int8_t ascq);
+extern void tcmd_ua(u_int init_id, ua_types new_ua);
+extern int work_atio(struct ccb_accept_tio *atio);
+extern void send_ccb(union ccb *ccb, int priority);
+extern void free_ccb(union ccb *ccb);
+static __inline u_int min(u_int a, u_int b) { return (a < b ? a : b); }
+
+/* Global Data */
+extern int notaio;
+extern int debug;
+extern off_t volume_size;
+extern u_int sector_size;
+extern size_t buf_size;
+
+/*
+ * Compat Defines
+ */
+#if __FreeBSD_version >= 500000
+#define OFF_FMT "%ju"
+#else
+#define OFF_FMT "%llu"
+#endif
+
+#endif /* _SCSI_TARGET_H */