summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2010-04-06 00:58:01 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2010-04-06 00:58:01 +0000
commit84cd084901fdee8a8e3fae3a885b533c9a29b1d5 (patch)
treeb95204b31eb1a1af9f736150e4cd9386b87b48e0 /sys
parent79fb0239fb794c3a4a1eb6f7145f92d0a923b81c (diff)
implement a new mechanism for allocating resources on the bus.
instead of optimistically trying to use a resource by executing an xs and then failing when there's no room for it, this puts things that want to use the hardware on a runqueue. as resources become available on the bus then consumers on the runqueue are popped off and guaranteed access to the resource. the resources are generally "ccbs" in adapter drivers, so this abstracts a way for the midlayer to get access to them into something called iopools. it also provides a callback api for consumers of resources to use: the scsi_ioh api for things that want direct access to the ccbs, and the scsi_xsh api for things that want to issue a scsi_xfer on the bus. these apis have been modelled on the timeout api. scsi_xs_get and therefore scsi_scs_cmd have been cut over to using these apis internally, so if they are allowed to sleep then can wait on the runqueue for a resource to become available and therefore guarantee that when executed on an adapter providing an iopool that they will succeed. ok krw@ beck@ marco@ tested by many including krw@ beck@ mk@ okan@ todd@
Diffstat (limited to 'sys')
-rw-r--r--sys/scsi/scsi_base.c452
-rw-r--r--sys/scsi/scsiconf.c33
-rw-r--r--sys/scsi/scsiconf.h85
3 files changed, 526 insertions, 44 deletions
diff --git a/sys/scsi/scsi_base.c b/sys/scsi/scsi_base.c
index b42febf6b21..939cc22d502 100644
--- a/sys/scsi/scsi_base.c
+++ b/sys/scsi/scsi_base.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: scsi_base.c,v 1.167 2010/03/23 01:57:20 krw Exp $ */
+/* $OpenBSD: scsi_base.c,v 1.168 2010/04/06 00:58:00 dlg Exp $ */
/* $NetBSD: scsi_base.c,v 1.43 1997/04/02 02:29:36 mycroft Exp $ */
/*
@@ -76,6 +76,31 @@ struct scsi_plug {
void scsi_plug_probe(void *, void *);
void scsi_plug_detach(void *, void *);
+struct scsi_xfer * scsi_xs_io(struct scsi_link *, void *, int);
+
+struct scsi_iohandler * scsi_ioh_deq(struct scsi_iopool *);
+void scsi_ioh_req(struct scsi_iopool *,
+ struct scsi_iohandler *);
+void scsi_ioh_runqueue(struct scsi_iopool *);
+
+void scsi_xsh_ioh(void *, void *);
+void scsi_xsh_runqueue(struct scsi_link *);
+struct scsi_xshandler * scsi_xsh_deq(struct scsi_link *);
+
+int scsi_link_open(struct scsi_link *);
+void scsi_link_close(struct scsi_link *);
+
+int scsi_sem_enter(struct mutex *, u_int *);
+int scsi_sem_leave(struct mutex *, u_int *);
+
+/* synchronous api for allocating an io. */
+struct scsi_io_mover {
+ struct mutex mtx;
+ void *io;
+};
+
+void scsi_io_get_done(void *, void *);
+
/*
* Called when a scsibus is attached to initialize global data.
*/
@@ -170,78 +195,415 @@ scsi_deinit()
return;
}
+int
+scsi_sem_enter(struct mutex *mtx, u_int *running)
+{
+ int rv = 1;
+
+ mtx_enter(mtx);
+ (*running)++;
+ if ((*running) > 1)
+ rv = 0;
+ mtx_leave(mtx);
+
+ return (rv);
+}
+
+int
+scsi_sem_leave(struct mutex *mtx, u_int *running)
+{
+ int rv = 1;
+
+ mtx_enter(mtx);
+ (*running)--;
+ if ((*running) > 0)
+ rv = 0;
+ mtx_leave(mtx);
+
+ return (rv);
+}
+
+void
+scsi_iopool_init(struct scsi_iopool *iopl, void *iocookie,
+ void *(*io_get)(void *), void (*io_put)(void *, void *))
+{
+ iopl->iocookie = iocookie;
+ iopl->io_get = io_get;
+ iopl->io_put = io_put;
+
+ TAILQ_INIT(&iopl->queue);
+ iopl->running = 0;
+ mtx_init(&iopl->mtx, IPL_BIO);
+}
+
+void *
+scsi_default_get(void *iocookie)
+{
+ return (iocookie);
+}
+
+void
+scsi_default_put(void *iocookie, void *io)
+{
+#ifdef DIAGNOSTIC
+ if (iocookie != io)
+ panic("unexpected opening returned");
+#endif
+}
+
/*
- * Get a scsi transfer structure for the caller. Charge the structure
- * to the device that is referenced by the sc_link structure. If the
- * sc_link structure has no 'credits' then the device already has the
- * maximum number or outstanding operations under way. In this stage,
- * wait on the structure so that when one is freed, we are awoken again
- * If the SCSI_NOSLEEP flag is set, then do not wait, but rather, return
- * a NULL pointer, signifying that no slots were available
- * Note in the link structure, that we are waiting on it.
+ * public interface to the ioh api.
*/
-struct scsi_xfer *
-scsi_xs_get(struct scsi_link *link, int flags)
+void
+scsi_ioh_set(struct scsi_iohandler *ioh, struct scsi_iopool *iopl,
+ void (*handler)(void *, void *), void *cookie)
{
- struct scsi_xfer *xs;
+ ioh->onq = 0;
+ ioh->pool = iopl;
+ ioh->handler = handler;
+ ioh->cookie = cookie;
+}
+
+void
+scsi_ioh_add(struct scsi_iohandler *ioh)
+{
+ struct scsi_iopool *iopl = ioh->pool;
+
+ mtx_enter(&iopl->mtx);
+ if (!ioh->onq) {
+ TAILQ_INSERT_TAIL(&iopl->queue, &ioh->entry, e);
+ ioh->onq = 1;
+ }
+ mtx_leave(&iopl->mtx);
+
+ /* lets get some io up in the air */
+ scsi_ioh_runqueue(iopl);
+}
+
+void
+scsi_ioh_del(struct scsi_iohandler *ioh)
+{
+ struct scsi_iopool *iopl = ioh->pool;
+
+ mtx_enter(&iopl->mtx);
+ if (ioh->onq) {
+ TAILQ_REMOVE(&iopl->queue, &ioh->entry, e);
+ ioh->onq = 0;
+ }
+ mtx_leave(&iopl->mtx);
+}
+
+/*
+ * internal iopool runqueue handling.
+ */
+
+struct scsi_iohandler *
+scsi_ioh_deq(struct scsi_iopool *iopl)
+{
+ struct scsi_iohandler *ioh = NULL;
+
+ mtx_enter(&iopl->mtx);
+ ioh = (struct scsi_iohandler *)TAILQ_FIRST(&iopl->queue);
+ if (ioh != NULL) {
+ TAILQ_REMOVE(&iopl->queue, &ioh->entry, e);
+ ioh->onq = 0;
+ }
+ mtx_leave(&iopl->mtx);
+
+ return (ioh);
+}
+
+void
+scsi_ioh_req(struct scsi_iopool *iopl, struct scsi_iohandler *ioh)
+{
+ mtx_enter(&iopl->mtx);
+ if (!ioh->onq) {
+ TAILQ_INSERT_HEAD(&iopl->queue, &ioh->entry, e);
+ ioh->onq = 1;
+ }
+ mtx_leave(&iopl->mtx);
+}
+
+void
+scsi_ioh_runqueue(struct scsi_iopool *iopl)
+{
+ struct scsi_iohandler *ioh;
+ void *io;
+
+ if (!scsi_sem_enter(&iopl->mtx, &iopl->running))
+ return;
+ do {
+ for (;;) {
+ ioh = scsi_ioh_deq(iopl);
+ if (ioh == NULL)
+ break;
+
+ io = iopl->io_get(iopl->iocookie);
+ if (io == NULL) {
+ scsi_ioh_req(iopl, ioh);
+ break;
+ }
+
+ ioh->handler(ioh->cookie, io);
+ }
+ } while (!scsi_sem_leave(&iopl->mtx, &iopl->running));
+}
+
+/*
+ * synchronous api for allocating an io.
+ */
+
+void *
+scsi_io_get(struct scsi_iopool *iopl, int flags)
+{
+ struct scsi_io_mover m = { MUTEX_INITIALIZER(IPL_BIO), NULL };
+ struct scsi_iohandler ioh;
+ void *io;
+
+ /* try and sneak an io off the backend immediately */
+ io = iopl->io_get(iopl->iocookie);
+ if (io != NULL)
+ return (io);
+ else if (ISSET(flags, SCSI_NOSLEEP))
+ return (NULL);
+
+ /* otherwise sleep until we get one */
+ scsi_ioh_set(&ioh, iopl, scsi_io_get_done, &m);
+ scsi_ioh_add(&ioh);
+
+ mtx_enter(&m.mtx);
+ while (m.io == NULL)
+ msleep(&m, &m.mtx, PRIBIO, "scsiio", 0);
+ mtx_leave(&m.mtx);
+
+ return (m.io);
+}
+
+void
+scsi_io_get_done(void *cookie, void *io)
+{
+ struct scsi_io_mover *m = cookie;
+
+ mtx_enter(&m->mtx);
+ m->io = io;
+ wakeup_one(m);
+ mtx_leave(&m->mtx);
+}
+
+void
+scsi_io_put(struct scsi_iopool *iopl, void *io)
+{
+ iopl->io_put(iopl->iocookie, io);
+ scsi_ioh_runqueue(iopl);
+}
+
+/*
+ * public interface to the xsh api.
+ */
+
+void
+scsi_xsh_set(struct scsi_xshandler *xsh, struct scsi_link *link,
+ void (*handler)(struct scsi_xfer *))
+{
+ scsi_ioh_set(&xsh->ioh, link->pool, scsi_xsh_ioh, xsh);
+
+ xsh->onq = 0;
+ xsh->link = link;
+ xsh->handler = handler;
+}
+
+void
+scsi_xsh_add(struct scsi_xshandler *xsh)
+{
+ struct scsi_link *link = xsh->link;
mtx_enter(&link->mtx);
- while (link->openings == 0) {
- if (ISSET(flags, SCSI_NOSLEEP)) {
- mtx_leave(&link->mtx);
+ if (!xsh->onq) {
+ TAILQ_INSERT_TAIL(&link->queue, &xsh->ioh.entry, e);
+ xsh->onq = 1;
+ }
+ mtx_leave(&link->mtx);
+
+ /* lets get some io up in the air */
+ scsi_xsh_runqueue(link);
+}
+
+void
+scsi_xsh_del(struct scsi_xshandler *xsh)
+{
+ struct scsi_link *link = xsh->link;
+
+ mtx_enter(&link->mtx);
+ if (xsh->onq) {
+ TAILQ_REMOVE(&link->queue, &xsh->ioh.entry, e);
+ xsh->onq = 0;
+ }
+ mtx_leave(&link->mtx);
+}
+
+/*
+ * internal xs runqueue handling.
+ */
+
+struct scsi_xshandler *
+scsi_xsh_deq(struct scsi_link *link)
+{
+ struct scsi_runq_entry *entry;
+ struct scsi_xshandler *xsh = NULL;
+
+ mtx_enter(&link->mtx);
+ if (link->openings && ((entry = TAILQ_FIRST(&link->queue)) != NULL)) {
+ TAILQ_REMOVE(&link->queue, entry, e);
+
+ xsh = (struct scsi_xshandler *)entry;
+ xsh->onq = 0;
+
+ link->openings--;
+ }
+ mtx_leave(&link->mtx);
+
+ return (xsh);
+}
+
+void
+scsi_xsh_runqueue(struct scsi_link *link)
+{
+ struct scsi_xshandler *xsh;
+
+ if (!scsi_sem_enter(&link->mtx, &link->running))
+ return;
+ do {
+ for (;;) {
+ xsh = scsi_xsh_deq(link);
+ if (xsh == NULL)
+ break;
+
+ scsi_ioh_add(&xsh->ioh);
+ }
+ } while (!scsi_sem_leave(&link->mtx, &link->running));
+}
+
+void
+scsi_xsh_ioh(void *cookie, void *io)
+{
+ struct scsi_xshandler *xsh = cookie;
+ struct scsi_xfer *xs;
+
+ xs = scsi_xs_io(xsh->link, io, SCSI_NOSLEEP);
+ if (xs == NULL) {
+ /*
+ * in this situation we should queue things waiting for an
+ * xs and then give them xses when they were supposed be to
+ * returned to the pool.
+ */
+
+ printf("scsi_xfer pool exhausted!\n");
+ scsi_xsh_add(xsh);
+ return;
+ }
+
+ xsh->handler(xs);
+}
+
+/*
+ * Get a scsi transfer structure for the caller.
+ * Go to the iopool backend for an "opening" and then attach an xs to it.
+ */
+
+struct scsi_xfer *
+scsi_xs_get(struct scsi_link *link, int flags)
+{
+ struct scsi_xshandler xsh;
+ struct scsi_io_mover m = { MUTEX_INITIALIZER(IPL_BIO), NULL };
+ void *io;
+
+ if (scsi_link_open(link)) {
+ io = scsi_io_get(link->pool, flags);
+ if (io == NULL) {
+ scsi_link_close(link);
return (NULL);
}
+ } else {
+ if (ISSET(flags, SCSI_NOSLEEP))
+ return (NULL);
- atomic_setbits_int(&link->state, SDEV_S_WAITING);
- msleep(link, &link->mtx, PRIBIO, "getxs", 0);
+ /* really custom xs handler to avoid scsi_xsh_ioh */
+ scsi_ioh_set(&xsh.ioh, link->pool, scsi_io_get_done, &m);
+ xsh.onq = 0;
+ xsh.link = link;
+ scsi_xsh_add(&xsh);
+
+ mtx_enter(&m.mtx);
+ while (m.io == NULL)
+ msleep(&m, &m.mtx, PRIBIO, "scsixs", 0);
+ mtx_leave(&m.mtx);
+
+ io = m.io;
}
- link->openings--;
+
+ return (scsi_xs_io(link, io, flags));
+}
+
+int
+scsi_link_open(struct scsi_link *link)
+{
+ int open = 0;
+
+ mtx_enter(&link->mtx);
+ if (link->openings) {
+ link->openings--;
+ open = 1;
+ }
+ mtx_leave(&link->mtx);
+
+ return (open);
+}
+
+void
+scsi_link_close(struct scsi_link *link)
+{
+ mtx_enter(&link->mtx);
+ link->openings++;
mtx_leave(&link->mtx);
- /* pool is shared, link mtx is not */
+ scsi_xsh_runqueue(link);
+}
+
+struct scsi_xfer *
+scsi_xs_io(struct scsi_link *link, void *io, int flags)
+{
+ struct scsi_xfer *xs;
+
xs = pool_get(&scsi_xfer_pool, PR_ZERO |
(ISSET(flags, SCSI_NOSLEEP) ? PR_NOWAIT : PR_WAITOK));
if (xs == NULL) {
- mtx_enter(&link->mtx);
- link->openings++;
- mtx_leave(&link->mtx);
+ scsi_io_put(link->pool, io);
+ scsi_link_close(link);
} else {
xs->flags = flags;
xs->sc_link = link;
xs->retries = SCSI_RETRIES;
xs->timeout = 10000;
xs->cmd = &xs->cmdstore;
+ xs->io = io;
}
return (xs);
}
-/*
- * Given a scsi_xfer struct, and a device (referenced through sc_link)
- * return the struct to the free pool and credit the device with it
- * If another process is waiting for an xs, do a wakeup, let it proceed
- */
void
scsi_xs_put(struct scsi_xfer *xs)
{
struct scsi_link *link = xs->sc_link;
- int start = 1;
+ void *io = xs->io;
pool_put(&scsi_xfer_pool, xs);
- mtx_enter(&link->mtx);
- link->openings++;
+ scsi_io_put(link->pool, io);
+ scsi_link_close(link);
- /* If someone is waiting for scsi_xfer, wake them up. */
- if (ISSET(link->state, SDEV_S_WAITING)) {
- atomic_clearbits_int(&link->state, SDEV_S_WAITING);
- wakeup(link);
- start = 0;
- }
- mtx_leave(&link->mtx);
-
- if (start && link->device->start)
+ if (link->device->start)
link->device->start(link->device_softc);
}
@@ -1915,6 +2277,18 @@ scsi_buf_requeue(struct buf *head, struct buf *bp, struct mutex *mtx)
mtx_leave(mtx);
}
+int
+scsi_buf_canqueue(struct buf *head, struct mutex *mtx)
+{
+ int rv;
+
+ mtx_enter(mtx);
+ rv = (head->b_actf != NULL);
+ mtx_leave(mtx);
+
+ return (rv);
+}
+
void
scsi_buf_killqueue(struct buf *head, struct mutex *mtx)
{
diff --git a/sys/scsi/scsiconf.c b/sys/scsi/scsiconf.c
index ef5cb8030ec..81b64cff7c5 100644
--- a/sys/scsi/scsiconf.c
+++ b/sys/scsi/scsiconf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: scsiconf.c,v 1.154 2010/01/01 14:28:59 miod Exp $ */
+/* $OpenBSD: scsiconf.c,v 1.155 2010/04/06 00:58:00 dlg Exp $ */
/* $NetBSD: scsiconf.c,v 1.57 1996/05/02 01:09:01 neil Exp $ */
/*
@@ -517,7 +517,7 @@ scsi_detach_lun(struct scsibus_softc *sc, int target, int lun, int flags)
if (((flags & DETACH_FORCE) == 0) && (link->flags & SDEV_OPEN))
return (EBUSY);
- /* detaching a device from scsibus is a three step process... */
+ /* detaching a device from scsibus is a four step process... */
/* 1. detach the device */
#if NMPATH > 0
@@ -530,11 +530,15 @@ scsi_detach_lun(struct scsibus_softc *sc, int target, int lun, int flags)
if (rv != 0)
return (rv);
- /* 2. free up its state in the adapter */
+ /* 2. if its using the openings io allocator, clean it up */
+ if (ISSET(link->flags, SDEV_OWN_IOPL))
+ free(link->pool, M_DEVBUF);
+
+ /* 3. free up its state in the adapter */
if (alink->adapter->dev_free != NULL)
alink->adapter->dev_free(link);
- /* 3. free up its state in the midlayer */
+ /* 4. free up its state in the midlayer */
if (link->id != NULL)
devid_free(link->id);
free(link, M_DEVBUF);
@@ -859,6 +863,7 @@ scsi_probedev(struct scsibus_softc *scsi, int target, int lun)
sc_link->lun = lun;
sc_link->device = &probe_switch;
mtx_init(&sc_link->mtx, IPL_BIO);
+ TAILQ_INIT(&sc_link->queue);
inqbuf = &sc_link->inqdata;
SC_DEBUG(sc_link, SDEV_DB2, ("scsi_link created.\n"));
@@ -872,6 +877,23 @@ scsi_probedev(struct scsibus_softc *scsi, int target, int lun)
}
/*
+ * If we havent been given an io pool by now then fall back to
+ * using sc_link->openings.
+ */
+ if (sc_link->pool == NULL) {
+ sc_link->pool = malloc(sizeof(*sc_link->pool),
+ M_DEVBUF, M_NOWAIT);
+ if (sc_link->pool == NULL) {
+ rslt = ENOMEM;
+ goto bad;
+ }
+ scsi_iopool_init(sc_link->pool, sc_link,
+ scsi_default_get, scsi_default_put);
+
+ SET(sc_link->flags, SDEV_OWN_IOPL);
+ }
+
+ /*
* Tell drivers that are paying attention to avoid sync/wide/tags until
* INQUIRY data has been processed and the quirks information is
* complete. Some drivers set bits in quirks before we get here, so
@@ -1034,6 +1056,9 @@ free_devid:
if (sc_link->id)
devid_free(sc_link->id);
bad:
+ if (ISSET(sc_link->flags, SDEV_OWN_IOPL))
+ free(sc_link->pool, M_DEVBUF);
+
if (scsi->adapter_link->adapter->dev_free != NULL)
scsi->adapter_link->adapter->dev_free(sc_link);
free:
diff --git a/sys/scsi/scsiconf.h b/sys/scsi/scsiconf.h
index 7a9a9b05f1d..72257b98703 100644
--- a/sys/scsi/scsiconf.h
+++ b/sys/scsi/scsiconf.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: scsiconf.h,v 1.120 2010/03/23 01:57:20 krw Exp $ */
+/* $OpenBSD: scsiconf.h,v 1.121 2010/04/06 00:58:00 dlg Exp $ */
/* $NetBSD: scsiconf.h,v 1.35 1997/04/02 02:29:38 mycroft Exp $ */
/*
@@ -327,6 +327,52 @@ struct scsi_device {
};
/*
+ *
+ */
+
+struct scsi_runq_entry {
+ TAILQ_ENTRY(scsi_runq_entry) e;
+};
+TAILQ_HEAD(scsi_runq, scsi_runq_entry);
+
+struct scsi_iopool;
+
+struct scsi_iohandler {
+ struct scsi_runq_entry entry; /* must be first */
+ u_int onq;
+
+ struct scsi_iopool *pool;
+ void (*handler)(void *, void *);
+ void *cookie;
+};
+
+struct scsi_iopool {
+ /* access to the IOs */
+ void *iocookie;
+ void *(*io_get)(void *);
+ void (*io_put)(void *, void *);
+
+ /* the runqueue */
+ struct scsi_runq queue;
+ /* runqueue semaphore */
+ u_int running;
+ /* protection for the runqueue and its semaphore */
+ struct mutex mtx;
+};
+
+/*
+ *
+ */
+
+struct scsi_xshandler {
+ struct scsi_iohandler ioh; /* must be first */
+ u_int onq;
+
+ struct scsi_link *link;
+ void (*handler)(struct scsi_xfer *);
+};
+
+/*
* This structure describes the connection between an adapter driver and
* a device driver, and is used by each to call services provided by
* the other, and to allow generic scsi glue code to call these services
@@ -356,6 +402,7 @@ struct scsi_link {
#define SDEV_2NDBUS 0x0400 /* device is a 'second' bus device */
#define SDEV_UMASS 0x0800 /* device is UMASS SCSI */
#define SDEV_VIRTUAL 0x1000 /* device is virtualised on the hba */
+#define SDEV_OWN_IOPL 0x2000 /* scsibus */
u_int16_t quirks; /* per-device oddities */
#define SDEV_AUTOSAVE 0x0001 /* do implicit SAVEDATAPOINTER on disconnect */
#define SDEV_NOSYNC 0x0002 /* does not grok SDTR */
@@ -375,6 +422,11 @@ struct scsi_link {
struct scsi_inquiry_data inqdata; /* copy of INQUIRY data from probe */
struct devid *id;
struct mutex mtx;
+
+ struct scsi_runq queue;
+ u_int running;
+
+ struct scsi_iopool *pool;
};
int scsiprint(void *, const char *);
@@ -452,6 +504,8 @@ struct scsi_xfer {
struct timeout stimeout;
void *cookie;
void (*done)(struct scsi_xfer *);
+
+ void *io; /* adapter io resource */
};
/*
@@ -551,6 +605,7 @@ int scsi_interpret_sense(struct scsi_xfer *);
void scsi_buf_enqueue(struct buf *, struct buf *, struct mutex *);
struct buf *scsi_buf_dequeue(struct buf *, struct mutex *);
void scsi_buf_requeue(struct buf *, struct buf *, struct mutex *);
+int scsi_buf_canqueue(struct buf *, struct mutex *);
void scsi_buf_killqueue(struct buf *, struct mutex *);
void scsi_xs_show(struct scsi_xfer *);
@@ -581,6 +636,34 @@ int scsi_xs_sync(struct scsi_xfer *);
void scsi_xs_put(struct scsi_xfer *);
/*
+ * iopool stuff
+ */
+void scsi_iopool_init(struct scsi_iopool *, void *,
+ void *(*)(void *), void (*)(void *, void *));
+
+void * scsi_io_get(struct scsi_iopool *, int);
+void scsi_io_put(struct scsi_iopool *, void *);
+
+/*
+ * default io allocator.
+ */
+void * scsi_default_get(void *);
+void scsi_default_put(void *, void *);
+
+/*
+ * io handler interface
+ */
+void scsi_ioh_set(struct scsi_iohandler *, struct scsi_iopool *,
+ void (*)(void *, void *), void *);
+void scsi_ioh_add(struct scsi_iohandler *);
+void scsi_ioh_del(struct scsi_iohandler *);
+
+void scsi_xsh_set(struct scsi_xshandler *, struct scsi_link *,
+ void (*)(struct scsi_xfer *));
+void scsi_xsh_add(struct scsi_xshandler *);
+void scsi_xsh_del(struct scsi_xshandler *);
+
+/*
* Entrypoints for multipathing
*/
int mpath_path_attach(struct scsi_link *);