summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Jacob <mjacob@FreeBSD.org>2007-05-05 20:18:24 +0000
committerMatt Jacob <mjacob@FreeBSD.org>2007-05-05 20:18:24 +0000
commitd0a68c274726ff51f8fa2893f8fce89937131f25 (patch)
treed29b2f49d770d4cdb6a8b8021f00d3f0191529c1
parent0a70657fcc012f03d2e0064bb31e48dc6a8414b5 (diff)
Notes
-rw-r--r--sys/dev/mpt/mpt.c20
-rw-r--r--sys/dev/mpt/mpt.h70
-rw-r--r--sys/dev/mpt/mpt_cam.c115
-rw-r--r--sys/dev/mpt/mpt_cam.h8
-rw-r--r--sys/dev/mpt/mpt_pci.c10
-rw-r--r--sys/dev/mpt/mpt_raid.c56
6 files changed, 150 insertions, 129 deletions
diff --git a/sys/dev/mpt/mpt.c b/sys/dev/mpt/mpt.c
index 0f9b5023d96e..a937f61bad9b 100644
--- a/sys/dev/mpt/mpt.c
+++ b/sys/dev/mpt/mpt.c
@@ -704,6 +704,8 @@ mpt_intr(void *arg)
mpt = (struct mpt_softc *)arg;
mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
+ MPT_LOCK_ASSERT(mpt);
+
while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
request_t *req;
MSG_DEFAULT_REPLY *reply_frame;
@@ -1171,7 +1173,7 @@ mpt_free_request(struct mpt_softc *mpt, request_t *req)
}
KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
- KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n"));
+ MPT_LOCK_ASSERT(mpt);
KASSERT(mpt_req_on_free_list(mpt, req) == 0,
("mpt_free_request: req %p:%u func %x already on freelist",
req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
@@ -1220,7 +1222,7 @@ mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
request_t *req;
retry:
- KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n"));
+ MPT_LOCK_ASSERT(mpt);
req = TAILQ_FIRST(&mpt->request_free_list);
if (req != NULL) {
KASSERT(req == &mpt->request_pool[req->index],
@@ -2107,18 +2109,20 @@ mpt_core_load(struct mpt_personality *pers)
int
mpt_core_attach(struct mpt_softc *mpt)
{
- int val;
+ int val, error;
LIST_INIT(&mpt->ack_frames);
/* Put all request buffers on the free list */
TAILQ_INIT(&mpt->request_pending_list);
TAILQ_INIT(&mpt->request_free_list);
TAILQ_INIT(&mpt->request_timeout_list);
+ MPT_LOCK(mpt);
for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
request_t *req = &mpt->request_pool[val];
req->state = REQ_STATE_ALLOCATED;
mpt_free_request(mpt, req);
}
+ MPT_UNLOCK(mpt);
for (val = 0; val < MPT_MAX_LUNS; val++) {
STAILQ_INIT(&mpt->trt[val].atios);
STAILQ_INIT(&mpt->trt[val].inots);
@@ -2132,7 +2136,12 @@ mpt_core_attach(struct mpt_softc *mpt)
mpt_sysctl_attach(mpt);
mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
- return (mpt_configure_ioc(mpt, 0, 0));
+
+ MPT_LOCK(mpt);
+ error = mpt_configure_ioc(mpt, 0, 0);
+ MPT_UNLOCK(mpt);
+
+ return (error);
}
int
@@ -2143,6 +2152,7 @@ mpt_core_enable(struct mpt_softc *mpt)
* not enabled, ports not enabled and interrupts
* not enabled.
*/
+ MPT_LOCK(mpt);
/*
* Enable asynchronous event reporting- all personalities
@@ -2177,8 +2187,10 @@ mpt_core_enable(struct mpt_softc *mpt)
*/
if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
mpt_prt(mpt, "failed to enable port 0\n");
+ MPT_UNLOCK(mpt);
return (ENXIO);
}
+ MPT_UNLOCK(mpt);
return (0);
}
diff --git a/sys/dev/mpt/mpt.h b/sys/dev/mpt/mpt.h
index 51171fdf75c9..3e18496e9adf 100644
--- a/sys/dev/mpt/mpt.h
+++ b/sys/dev/mpt/mpt.h
@@ -109,6 +109,7 @@
#include <sys/kernel.h>
#include <sys/queue.h>
#include <sys/malloc.h>
+#include <sys/devicestat.h>
#else
#include <sys/lock.h>
#include <sys/kernel.h>
@@ -241,7 +242,7 @@ int mpt_modevent(module_t, int, void *);
bus_dma_tag_create(parent_tag, alignment, boundary, \
lowaddr, highaddr, filter, filterarg, \
maxsize, nsegments, maxsegsz, flags, \
- busdma_lock_mutex, &Giant, \
+ busdma_lock_mutex, &(mpt)->mpt_lock, \
dma_tagp)
#else
#define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \
@@ -280,7 +281,7 @@ void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
/****************************** Timer Facilities ******************************/
#if __FreeBSD_version > 500000
-#define mpt_callout_init(c) callout_init(c, /*mpsafe*/0);
+#define mpt_callout_init(c) callout_init(c, /*mpsafe*/1);
#else
#define mpt_callout_init(c) callout_init(c);
#endif
@@ -337,6 +338,7 @@ struct req_entry {
bus_addr_t sense_pbuf; /* Physical Address of sense data */
bus_dmamap_t dmap; /* DMA map for data buffers */
struct req_entry *chain; /* for SGE overallocations */
+ struct callout callout; /* Timeout for the request */
};
/**************************** MPI Target State Info ***************************/
@@ -741,6 +743,7 @@ mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
#define MPT_LOCK(mpt) mpt_lockspl(mpt)
#define MPT_UNLOCK(mpt) mpt_unlockspl(mpt)
#define MPT_OWNED(mpt) mpt->mpt_islocked
+#define MPT_LOCK_ASSERT(mpt)
#define MPTLOCK_2_CAMLOCK MPT_UNLOCK
#define CAMLOCK_2_MPTLOCK MPT_LOCK
#define MPT_LOCK_SETUP(mpt)
@@ -793,9 +796,15 @@ mpt_sleep(struct mpt_softc *mpt, void *ident, int priority,
return (error);
}
+#define mpt_req_timeout(req, ticks, func, arg) \
+ callout_reset(&(req)->callout, (ticks), (func), (arg));
+#define mpt_req_untimeout(req, func, arg) \
+ callout_stop(&(req)->callout)
+#define mpt_req_timeout_init(req) \
+ callout_init(&(req)->callout)
+
#else
-#ifdef LOCKING_WORKED_AS_IT_SHOULD
-#error "Shouldn't Be Here!"
+#if 1
#define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
#define MPT_LOCK_SETUP(mpt) \
mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \
@@ -809,53 +818,46 @@ mpt_sleep(struct mpt_softc *mpt, void *ident, int priority,
#define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock)
#define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock)
#define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock)
-#define MPTLOCK_2_CAMLOCK(mpt) \
- mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant)
-#define CAMLOCK_2_MPTLOCK(mpt) \
- mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock)
+#define MPT_LOCK_ASSERT(mpt) mtx_assert(&(mpt)->mpt_lock, MA_OWNED)
+#define MPTLOCK_2_CAMLOCK(mpt)
+#define CAMLOCK_2_MPTLOCK(mpt)
#define mpt_sleep(mpt, ident, priority, wmesg, timo) \
msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo)
+#define mpt_req_timeout(req, ticks, func, arg) \
+ callout_reset(&(req)->callout, (ticks), (func), (arg));
+#define mpt_req_untimeout(req, func, arg) \
+ callout_stop(&(req)->callout)
+#define mpt_req_timeout_init(req) \
+ callout_init(&(req)->callout, 1)
#else
#define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY
#define MPT_LOCK_SETUP(mpt) do { } while (0)
#define MPT_LOCK_DESTROY(mpt) do { } while (0)
-#if 0
-#define MPT_LOCK(mpt) \
- device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); \
- KASSERT(mpt->mpt_locksetup == 0, \
- ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \
- mpt->mpt_locksetup = 1
-#define MPT_UNLOCK(mpt) \
- device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); \
- KASSERT(mpt->mpt_locksetup == 1, \
- ("release unowned lock at %s:%d", __FILE__, __LINE__)); \
- mpt->mpt_locksetup = 0
-#else
-#define MPT_LOCK(mpt) \
- KASSERT(mpt->mpt_locksetup == 0, \
- ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \
- mpt->mpt_locksetup = 1
-#define MPT_UNLOCK(mpt) \
- KASSERT(mpt->mpt_locksetup == 1, \
- ("release unowned lock at %s:%d", __FILE__, __LINE__)); \
- mpt->mpt_locksetup = 0
-#endif
-#define MPT_OWNED(mpt) mpt->mpt_locksetup
-#define MPTLOCK_2_CAMLOCK(mpt) MPT_UNLOCK(mpt)
-#define CAMLOCK_2_MPTLOCK(mpt) MPT_LOCK(mpt)
+#define MPT_LOCK_ASSERT(mpt) mtx_assert(&Giant, MA_OWNED)
+#define MPT_LOCK(mpt) mtx_lock(&Giant)
+#define MPT_UNLOCK(mpt) mtx_unlock(&Giant)
+#define MPTLOCK_2_CAMLOCK(mpt)
+#define CAMLOCK_2_MPTLOCK(mpt)
static __inline int
mpt_sleep(struct mpt_softc *, void *, int, const char *, int);
+#define mpt_ccb_timeout(ccb, ticks, func, arg) \
+ do { \
+ (ccb)->ccb_h.timeout_ch = timeout((func), (arg), (ticks)); \
+ } while (0)
+#define mpt_ccb_untimeout(ccb, func, arg) \
+ untimeout((func), (arg), (ccb)->ccb_h.timeout_ch)
+#define mpt_ccb_timeout_init(ccb) \
+ callout_handle_init(&(ccb)->ccb_h.timeout_ch)
+
static __inline int
mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t)
{
int r;
- MPT_UNLOCK(mpt);
r = tsleep(i, p, w, t);
- MPT_LOCK(mpt);
return (r);
}
#endif
diff --git a/sys/dev/mpt/mpt_cam.c b/sys/dev/mpt/mpt_cam.c
index 2276ce2660b9..59f1eb44b96a 100644
--- a/sys/dev/mpt/mpt_cam.c
+++ b/sys/dev/mpt/mpt_cam.c
@@ -217,6 +217,7 @@ mpt_cam_attach(struct mpt_softc *mpt)
int maxq;
int error;
+ MPT_LOCK(mpt);
TAILQ_INIT(&mpt->request_timeout_list);
maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
@@ -225,14 +226,16 @@ mpt_cam_attach(struct mpt_softc *mpt)
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&scsi_io_handler_id);
if (error != 0) {
- goto cleanup0;
+ MPT_UNLOCK(mpt);
+ goto cleanup;
}
handler.reply_handler = mpt_scsi_tmf_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&scsi_tmf_handler_id);
if (error != 0) {
- goto cleanup0;
+ MPT_UNLOCK(mpt);
+ goto cleanup;
}
/*
@@ -244,11 +247,13 @@ mpt_cam_attach(struct mpt_softc *mpt)
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&fc_els_handler_id);
if (error != 0) {
- goto cleanup0;
+ MPT_UNLOCK(mpt);
+ goto cleanup;
}
if (mpt_add_els_buffers(mpt) == FALSE) {
error = ENOMEM;
- goto cleanup0;
+ MPT_UNLOCK(mpt);
+ goto cleanup;
}
maxq -= mpt->els_cmds_allocated;
}
@@ -263,7 +268,8 @@ mpt_cam_attach(struct mpt_softc *mpt)
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&mpt->scsi_tgt_handler_id);
if (error != 0) {
- goto cleanup0;
+ MPT_UNLOCK(mpt);
+ goto cleanup;
}
}
@@ -274,7 +280,8 @@ mpt_cam_attach(struct mpt_softc *mpt)
if (mpt->tmf_req == NULL) {
mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
error = ENOMEM;
- goto cleanup0;
+ MPT_UNLOCK(mpt);
+ goto cleanup;
}
/*
@@ -286,18 +293,18 @@ mpt_cam_attach(struct mpt_softc *mpt)
mpt->tmf_req->state = REQ_STATE_FREE;
maxq--;
+ /*
+ * The rest of this is CAM foo, for which we need to drop our lock
+ */
+ MPT_UNLOCK(mpt);
+
if (mpt_spawn_recovery_thread(mpt) != 0) {
mpt_prt(mpt, "Unable to spawn recovery thread!\n");
error = ENOMEM;
- goto cleanup0;
+ goto cleanup;
}
/*
- * The rest of this is CAM foo, for which we need to drop our lock
- */
- MPTLOCK_2_CAMLOCK(mpt);
-
- /*
* Create the device queue for our SIM(s).
*/
devq = cam_simq_alloc(maxq);
@@ -310,8 +317,8 @@ mpt_cam_attach(struct mpt_softc *mpt)
/*
* Construct our SIM entry.
*/
- mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
- mpt->unit, &Giant, 1, maxq, devq);
+ mpt->sim =
+ mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
if (mpt->sim == NULL) {
mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
cam_simq_free(devq);
@@ -322,9 +329,11 @@ mpt_cam_attach(struct mpt_softc *mpt)
/*
* Register exactly this bus.
*/
+ MPT_LOCK(mpt);
if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
mpt_prt(mpt, "Bus registration Failed!\n");
error = ENOMEM;
+ MPT_UNLOCK(mpt);
goto cleanup;
}
@@ -332,23 +341,24 @@ mpt_cam_attach(struct mpt_softc *mpt)
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
mpt_prt(mpt, "Unable to allocate Path!\n");
error = ENOMEM;
+ MPT_UNLOCK(mpt);
goto cleanup;
}
+ MPT_UNLOCK(mpt);
/*
* Only register a second bus for RAID physical
* devices if the controller supports RAID.
*/
if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
- CAMLOCK_2_MPTLOCK(mpt);
return (0);
}
/*
* Create a "bus" to export all hidden disks to CAM.
*/
- mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
- mpt->unit, &Giant, 1, maxq, devq);
+ mpt->phydisk_sim =
+ mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
if (mpt->phydisk_sim == NULL) {
mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
error = ENOMEM;
@@ -358,9 +368,11 @@ mpt_cam_attach(struct mpt_softc *mpt)
/*
* Register this bus.
*/
+ MPT_LOCK(mpt);
if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
error = ENOMEM;
+ MPT_UNLOCK(mpt);
goto cleanup;
}
@@ -369,15 +381,14 @@ mpt_cam_attach(struct mpt_softc *mpt)
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
error = ENOMEM;
+ MPT_UNLOCK(mpt);
goto cleanup;
}
- CAMLOCK_2_MPTLOCK(mpt);
+ MPT_UNLOCK(mpt);
mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
return (0);
cleanup:
- CAMLOCK_2_MPTLOCK(mpt);
-cleanup0:
mpt_cam_detach(mpt);
return (error);
}
@@ -445,6 +456,7 @@ mpt_read_config_info_fc(struct mpt_softc *mpt)
mpt->mpt_fcport_page0.WWPN.Low,
mpt->mpt_fcport_speed);
#if __FreeBSD_version >= 500000
+ MPT_UNLOCK(mpt);
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
@@ -468,6 +480,7 @@ mpt_read_config_info_fc(struct mpt_softc *mpt)
"World Wide Port Name");
}
+ MPT_LOCK(mpt);
#endif
return (0);
}
@@ -800,29 +813,38 @@ mpt_set_initial_config_spi(struct mpt_softc *mpt)
int
mpt_cam_enable(struct mpt_softc *mpt)
{
+ int error;
+
+ MPT_LOCK(mpt);
+
+ error = EIO;
if (mpt->is_fc) {
if (mpt_read_config_info_fc(mpt)) {
- return (EIO);
+ goto out;
}
if (mpt_set_initial_config_fc(mpt)) {
- return (EIO);
+ goto out;
}
} else if (mpt->is_sas) {
if (mpt_read_config_info_sas(mpt)) {
- return (EIO);
+ goto out;
}
if (mpt_set_initial_config_sas(mpt)) {
- return (EIO);
+ goto out;
}
} else if (mpt->is_spi) {
if (mpt_read_config_info_spi(mpt)) {
- return (EIO);
+ goto out;
}
if (mpt_set_initial_config_spi(mpt)) {
- return (EIO);
+ goto out;
}
}
- return (0);
+ error = 0;
+
+out:
+ MPT_UNLOCK(mpt);
+ return (error);
}
void
@@ -850,6 +872,7 @@ mpt_cam_detach(struct mpt_softc *mpt)
{
mpt_handler_t handler;
+ MPT_LOCK(mpt);
mpt->ready = 0;
mpt_terminate_recovery_thread(mpt);
@@ -871,23 +894,20 @@ mpt_cam_detach(struct mpt_softc *mpt)
mpt_free_request(mpt, mpt->tmf_req);
mpt->tmf_req = NULL;
}
+ MPT_UNLOCK(mpt);
if (mpt->sim != NULL) {
- MPTLOCK_2_CAMLOCK(mpt);
xpt_free_path(mpt->path);
xpt_bus_deregister(cam_sim_path(mpt->sim));
cam_sim_free(mpt->sim, TRUE);
mpt->sim = NULL;
- CAMLOCK_2_MPTLOCK(mpt);
}
if (mpt->phydisk_sim != NULL) {
- MPTLOCK_2_CAMLOCK(mpt);
xpt_free_path(mpt->phydisk_path);
xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
cam_sim_free(mpt->phydisk_sim, TRUE);
mpt->phydisk_sim = NULL;
- CAMLOCK_2_MPTLOCK(mpt);
}
}
@@ -899,9 +919,7 @@ mpt_poll(struct cam_sim *sim)
struct mpt_softc *mpt;
mpt = (struct mpt_softc *)cam_sim_softc(sim);
- MPT_LOCK(mpt);
mpt_intr(mpt);
- MPT_UNLOCK(mpt);
}
/*
@@ -1307,11 +1325,10 @@ out:
ccb->ccb_h.status |= CAM_SIM_QUEUED;
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
- ccb->ccb_h.timeout_ch =
- timeout(mpt_timeout, (caddr_t)ccb,
- (ccb->ccb_h.timeout * hz) / 1000);
+ mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
+ mpt_timeout, ccb);
} else {
- callout_handle_init(&ccb->ccb_h.timeout_ch);
+ mpt_req_timeout_init(req);
}
if (mpt->verbose > MPT_PRT_DEBUG) {
int nc = 0;
@@ -1709,11 +1726,10 @@ out:
ccb->ccb_h.status |= CAM_SIM_QUEUED;
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
- ccb->ccb_h.timeout_ch =
- timeout(mpt_timeout, (caddr_t)ccb,
- (ccb->ccb_h.timeout * hz) / 1000);
+ mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
+ mpt_timeout, ccb);
} else {
- callout_handle_init(&ccb->ccb_h.timeout_ch);
+ mpt_req_timeout_init(req);
}
if (mpt->verbose > MPT_PRT_DEBUG) {
int nc = 0;
@@ -2281,7 +2297,7 @@ mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
}
tgt = scsi_req->TargetID;
- untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
+ mpt_req_untimeout(req, mpt_timeout, ccb);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
@@ -2904,8 +2920,8 @@ mpt_action(struct cam_sim *sim, union ccb *ccb)
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
mpt = (struct mpt_softc *)cam_sim_softc(sim);
- KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
raid_passthru = (sim == mpt->phydisk_sim);
+ MPT_LOCK_ASSERT(mpt);
tgt = ccb->ccb_h.target_id;
lun = ccb->ccb_h.target_lun;
@@ -3652,9 +3668,6 @@ mpt_recovery_thread(void *arg)
{
struct mpt_softc *mpt;
-#if __FreeBSD_version >= 500000
- mtx_lock(&Giant);
-#endif
mpt = (struct mpt_softc *)arg;
MPT_LOCK(mpt);
for (;;) {
@@ -3671,9 +3684,6 @@ mpt_recovery_thread(void *arg)
mpt->recovery_thread = NULL;
wakeup(&mpt->recovery_thread);
MPT_UNLOCK(mpt);
-#if __FreeBSD_version >= 500000
- mtx_unlock(&Giant);
-#endif
kthread_exit(0);
}
@@ -4613,7 +4623,7 @@ mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
req->serno, tgt->resid);
if (ccb) {
ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
- ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
+ mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
}
mpt_send_cmd(mpt, req);
}
@@ -5028,7 +5038,7 @@ mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
}
tgt->ccb = NULL;
tgt->nxfers++;
- untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
+ mpt_req_untimeout(req, mpt_timeout, ccb);
mpt_lprt(mpt, MPT_PRT_DEBUG,
"TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
@@ -5093,8 +5103,7 @@ mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
TGT_STATE_MOVING_DATA_AND_STATUS) {
tgt->nxfers++;
}
- untimeout(mpt_timeout, ccb,
- ccb->ccb_h.timeout_ch);
+ mpt_req_untimeout(req, mpt_timeout, ccb);
if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
ccb->ccb_h.status |= CAM_SENT_SENSE;
}
diff --git a/sys/dev/mpt/mpt_cam.h b/sys/dev/mpt/mpt_cam.h
index 65e272eddba3..f2faa4abdff8 100644
--- a/sys/dev/mpt/mpt_cam.h
+++ b/sys/dev/mpt/mpt_cam.h
@@ -143,4 +143,12 @@ mpt_wakeup_recovery_thread(struct mpt_softc *mpt)
wakeup(mpt);
}
+/************************** Version Compatibility *************************/
+#if __FreeBSD_version < 700031
+#define mpt_sim_alloc(a, b, c, mpt, e, f, g) \
+ cam_sim_alloc(a, b, c, mpt, (mpt)->unit, e, f, g)
+#else
+#define mpt_sim_alloc(a, b, c, mpt, e, f, g) \
+ cam_sim_alloc(a, b, c, mpt, (mpt)->unit, &(mpt)->mpt_lock, e, f, g)
+#endif
#endif /*_MPT_CAM_H_ */
diff --git a/sys/dev/mpt/mpt_pci.c b/sys/dev/mpt/mpt_pci.c
index d65c9c91d606..47c724b2e727 100644
--- a/sys/dev/mpt/mpt_pci.c
+++ b/sys/dev/mpt/mpt_pci.c
@@ -597,12 +597,9 @@ mpt_pci_attach(device_t dev)
/* Initialize the hardware */
if (mpt->disabled == 0) {
- MPT_LOCK(mpt);
if (mpt_attach(mpt) != 0) {
- MPT_UNLOCK(mpt);
goto bad;
}
- MPT_UNLOCK(mpt);
} else {
mpt_prt(mpt, "device disabled at user request\n");
goto bad;
@@ -613,12 +610,9 @@ mpt_pci_attach(device_t dev)
if (mpt->eh == NULL) {
mpt_prt(mpt, "shutdown event registration failed\n");
- MPT_LOCK(mpt);
(void) mpt_detach(mpt);
- MPT_UNLOCK(mpt);
goto bad;
}
- KASSERT(MPT_OWNED(mpt) == 0, ("leaving attach with device locked"));
return (0);
bad:
@@ -681,7 +675,6 @@ mpt_pci_detach(device_t dev)
mpt = (struct mpt_softc*)device_get_softc(dev);
if (mpt) {
- MPT_LOCK(mpt);
mpt_disable_ints(mpt);
mpt_detach(mpt);
mpt_reset(mpt, /*reinit*/FALSE);
@@ -691,7 +684,6 @@ mpt_pci_detach(device_t dev)
if (mpt->eh != NULL) {
EVENTHANDLER_DEREGISTER(shutdown_final, mpt->eh);
}
- MPT_UNLOCK(mpt);
}
return(0);
}
@@ -708,9 +700,7 @@ mpt_pci_shutdown(device_t dev)
mpt = (struct mpt_softc *)device_get_softc(dev);
if (mpt) {
int r;
- MPT_LOCK(mpt);
r = mpt_shutdown(mpt);
- MPT_UNLOCK(mpt);
return (r);
}
return(0);
diff --git a/sys/dev/mpt/mpt_raid.c b/sys/dev/mpt/mpt_raid.c
index 624baaf73ce1..192c9d411595 100644
--- a/sys/dev/mpt/mpt_raid.c
+++ b/sys/dev/mpt/mpt_raid.c
@@ -116,12 +116,14 @@ static timeout_t mpt_raid_timer;
static void mpt_enable_vol(struct mpt_softc *mpt,
struct mpt_raid_volume *mpt_vol, int enable);
#endif
-static void mpt_verify_mwce(struct mpt_softc *mpt,
- struct mpt_raid_volume *mpt_vol);
-static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
- struct mpt_raid_volume *mpt_vol,
- struct cam_path *path);
-static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
+static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
+static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
+ struct cam_path *);
+#if __FreeBSD_version < 500000
+#define mpt_raid_sysctl_attach(x) do { } while (0)
+#else
+static void mpt_raid_sysctl_attach(struct mpt_softc *);
+#endif
static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
@@ -270,6 +272,13 @@ mpt_raid_attach(struct mpt_softc *mpt)
mpt_callout_init(&mpt->raid_timer);
+ error = mpt_spawn_raid_thread(mpt);
+ if (error != 0) {
+ mpt_prt(mpt, "Unable to spawn RAID thread!\n");
+ goto cleanup;
+ }
+
+ MPT_LOCK(mpt);
handler.reply_handler = mpt_raid_reply_handler;
error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
&raid_handler_id);
@@ -278,28 +287,22 @@ mpt_raid_attach(struct mpt_softc *mpt)
goto cleanup;
}
- error = mpt_spawn_raid_thread(mpt);
- if (error != 0) {
- mpt_prt(mpt, "Unable to spawn RAID thread!\n");
- goto cleanup;
- }
-
xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_FOUND_DEVICE;
csa.callback = mpt_raid_async;
csa.callback_arg = mpt;
- MPTLOCK_2_CAMLOCK(mpt);
xpt_action((union ccb *)&csa);
- CAMLOCK_2_MPTLOCK(mpt);
if (csa.ccb_h.status != CAM_REQ_CMP) {
mpt_prt(mpt, "mpt_raid_attach: Unable to register "
"CAM async handler.\n");
}
+ MPT_UNLOCK(mpt);
mpt_raid_sysctl_attach(mpt);
return (0);
cleanup:
+ MPT_UNLOCK(mpt);
mpt_raid_detach(mpt);
return (error);
}
@@ -317,6 +320,7 @@ mpt_raid_detach(struct mpt_softc *mpt)
mpt_handler_t handler;
callout_stop(&mpt->raid_timer);
+ MPT_LOCK(mpt);
mpt_terminate_raid_thread(mpt);
handler.reply_handler = mpt_raid_reply_handler;
@@ -327,9 +331,8 @@ mpt_raid_detach(struct mpt_softc *mpt)
csa.event_enable = 0;
csa.callback = mpt_raid_async;
csa.callback_arg = mpt;
- MPTLOCK_2_CAMLOCK(mpt);
xpt_action((union ccb *)&csa);
- CAMLOCK_2_MPTLOCK(mpt);
+ MPT_UNLOCK(mpt);
}
static void
@@ -620,12 +623,17 @@ mpt_spawn_raid_thread(struct mpt_softc *mpt)
* reject I/O to an ID we later determine is for a
* hidden physdisk.
*/
+ MPT_LOCK(mpt);
xpt_freeze_simq(mpt->phydisk_sim, 1);
+ MPT_UNLOCK(mpt);
error = mpt_kthread_create(mpt_raid_thread, mpt,
&mpt->raid_thread, /*flags*/0, /*altstack*/0,
"mpt_raid%d", mpt->unit);
- if (error != 0)
+ if (error != 0) {
+ MPT_LOCK(mpt);
xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
+ MPT_UNLOCK(mpt);
+ }
return (error);
}
@@ -658,9 +666,6 @@ mpt_raid_thread(void *arg)
struct mpt_softc *mpt;
int firstrun;
-#if __FreeBSD_version >= 500000
- mtx_lock(&Giant);
-#endif
mpt = (struct mpt_softc *)arg;
firstrun = 1;
MPT_LOCK(mpt);
@@ -717,9 +722,6 @@ mpt_raid_thread(void *arg)
mpt->raid_thread = NULL;
wakeup(&mpt->raid_thread);
MPT_UNLOCK(mpt);
-#if __FreeBSD_version >= 500000
- mtx_unlock(&Giant);
-#endif
kthread_exit(0);
}
@@ -756,8 +758,7 @@ mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
if (rv != 0)
return (CAM_REQ_CMP_ERR);
- ccb->ccb_h.timeout_ch =
- timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
+ mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
#if 0
if (rv == ETIMEDOUT) {
mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
@@ -1605,6 +1606,7 @@ mpt_raid_free_mem(struct mpt_softc *mpt)
mpt->raid_max_disks = 0;
}
+#if __FreeBSD_version >= 500000
static int
mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
{
@@ -1718,7 +1720,6 @@ mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
MPT_UNLOCK(mpt);
return (0);
}
-
const char *mpt_vol_mwce_strs[] =
{
"On",
@@ -1807,7 +1808,6 @@ mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
static void
mpt_raid_sysctl_attach(struct mpt_softc *mpt)
{
-#if __FreeBSD_version >= 500000
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
@@ -1829,5 +1829,5 @@ mpt_raid_sysctl_attach(struct mpt_softc *mpt)
"nonoptimal_volumes", CTLFLAG_RD,
&mpt->raid_nonopt_volumes, 0,
"number of nonoptimal volumes");
-#endif
}
+#endif