aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKenneth D. Merry <ken@FreeBSD.org>2018-03-30 15:28:25 +0000
committerKenneth D. Merry <ken@FreeBSD.org>2018-03-30 15:28:25 +0000
commitef270ab1b656486947b3b4aaec9bc0a6b5d6af21 (patch)
treea6d661ed5f29d6a96dca0df8da5b91c151df9249
parent9c9f60dbd646aa928eb7f3f664d07bf077814c47 (diff)
downloadsrc-ef270ab1b656486947b3b4aaec9bc0a6b5d6af21.tar.gz
src-ef270ab1b656486947b3b4aaec9bc0a6b5d6af21.zip
Notes
-rw-r--r--share/man/man4/Makefile1
-rw-r--r--share/man/man4/ocs_fc.4194
-rw-r--r--sys/amd64/conf/GENERIC1
-rw-r--r--sys/conf/files21
-rw-r--r--sys/dev/ocs_fc/ocs.h261
-rw-r--r--sys/dev/ocs_fc/ocs_cam.c2640
-rw-r--r--sys/dev/ocs_fc/ocs_cam.h122
-rw-r--r--sys/dev/ocs_fc/ocs_common.h424
-rw-r--r--sys/dev/ocs_fc/ocs_ddump.c881
-rw-r--r--sys/dev/ocs_fc/ocs_ddump.h60
-rw-r--r--sys/dev/ocs_fc/ocs_device.c1929
-rw-r--r--sys/dev/ocs_fc/ocs_device.h133
-rw-r--r--sys/dev/ocs_fc/ocs_domain.c1585
-rw-r--r--sys/dev/ocs_fc/ocs_domain.h91
-rw-r--r--sys/dev/ocs_fc/ocs_drv_fc.h212
-rw-r--r--sys/dev/ocs_fc/ocs_els.c2777
-rw-r--r--sys/dev/ocs_fc/ocs_els.h110
-rw-r--r--sys/dev/ocs_fc/ocs_fabric.c2046
-rw-r--r--sys/dev/ocs_fc/ocs_fabric.h82
-rw-r--r--sys/dev/ocs_fc/ocs_fcp.h747
-rw-r--r--sys/dev/ocs_fc/ocs_hw.c12551
-rw-r--r--sys/dev/ocs_fc/ocs_hw.h1547
-rw-r--r--sys/dev/ocs_fc/ocs_hw_queues.c2602
-rw-r--r--sys/dev/ocs_fc/ocs_hw_queues.h97
-rw-r--r--sys/dev/ocs_fc/ocs_io.c491
-rw-r--r--sys/dev/ocs_fc/ocs_io.h196
-rw-r--r--sys/dev/ocs_fc/ocs_ioctl.c1253
-rw-r--r--sys/dev/ocs_fc/ocs_ioctl.h368
-rw-r--r--sys/dev/ocs_fc/ocs_list.h449
-rw-r--r--sys/dev/ocs_fc/ocs_mgmt.c2923
-rw-r--r--sys/dev/ocs_fc/ocs_mgmt.h121
-rw-r--r--sys/dev/ocs_fc/ocs_node.c2376
-rw-r--r--sys/dev/ocs_fc/ocs_node.h240
-rw-r--r--sys/dev/ocs_fc/ocs_os.c1046
-rw-r--r--sys/dev/ocs_fc/ocs_os.h1406
-rw-r--r--sys/dev/ocs_fc/ocs_pci.c963
-rw-r--r--sys/dev/ocs_fc/ocs_scsi.c2960
-rw-r--r--sys/dev/ocs_fc/ocs_scsi.h454
-rw-r--r--sys/dev/ocs_fc/ocs_sm.c207
-rw-r--r--sys/dev/ocs_fc/ocs_sm.h203
-rw-r--r--sys/dev/ocs_fc/ocs_sport.c1926
-rw-r--r--sys/dev/ocs_fc/ocs_sport.h113
-rw-r--r--sys/dev/ocs_fc/ocs_stats.h49
-rw-r--r--sys/dev/ocs_fc/ocs_unsol.c1399
-rw-r--r--sys/dev/ocs_fc/ocs_unsol.h53
-rw-r--r--sys/dev/ocs_fc/ocs_utils.c2826
-rw-r--r--sys/dev/ocs_fc/ocs_utils.h345
-rw-r--r--sys/dev/ocs_fc/ocs_vpd.h203
-rw-r--r--sys/dev/ocs_fc/ocs_xport.c1105
-rw-r--r--sys/dev/ocs_fc/ocs_xport.h213
-rw-r--r--sys/dev/ocs_fc/sli4.c8648
-rw-r--r--sys/dev/ocs_fc/sli4.h5609
-rw-r--r--sys/dev/ocs_fc/version.h84
-rw-r--r--sys/modules/Makefile2
-rw-r--r--sys/modules/ocs_fc/Makefile45
55 files changed, 69390 insertions, 0 deletions
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index 007ca6df5da8..1ea40b109e9b 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -402,6 +402,7 @@ MAN= aac.4 \
${_nvram2env.4} \
${_nxge.4} \
oce.4 \
+ ocs_fc.4\
ohci.4 \
orm.4 \
ow.4 \
diff --git a/share/man/man4/ocs_fc.4 b/share/man/man4/ocs_fc.4
new file mode 100644
index 000000000000..a2f13c483930
--- /dev/null
+++ b/share/man/man4/ocs_fc.4
@@ -0,0 +1,194 @@
+.\" Copyright (c) 2017 Broadcom. All rights reserved.
+.\" The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright notice,
+.\" this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright notice,
+.\" this list of conditions and the following disclaimer in the documentation
+.\" and/or other materials provided with the distribution.
+.\"
+.\" 3. Neither the name of the copyright holder nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd March 30, 2018
+.Dt OCS_FC 4
+.Os
+.Sh NAME
+.Nm ocs_fc
+.Nd "Device driver for Emulex Fibre Channel Host Adapters"
+.Sh SYNOPSIS
+To compile this driver into the kernel, add this line to the
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device ocs_fc"
+.Ed
+.Pp
+To load the driver as a module at boot, add this line to
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+ocs_fc_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver provides access to Fibre Channel SCSI devices.
+.Pp
+The
+.Nm
+driver supports initiator and target modes.
+Support is available for Arbitrated loops, Point-to-Point,
+and Fabric connections.
+FC-Tape is highly recommended for connections to tape drives that support
+it.
+FC-Tape includes four elements from the T-10 FCP-4 specification:
+.Bl -bullet -offset indent
+.It
+Precise Delivery of Commands
+.It
+Confirmed Completion of FCP I/O Operations
+.It
+Retransmission of Unsuccessfully Transmitted IUs
+.It
+Task Retry Identification
+.El
+.Pp
+Together these features allow for link level error recovery with tape
+devices.
+Without link level error recovery, an initiator cannot, for instance, tell whether a tape write
+command that has timed out resulted in all, part, or none of the data going to
+the tape drive.
+FC-Tape is automatically enabled when both the controller and target support it.
+
+.Sh HARDWARE
+The
+.Nm
+driver supports these Fibre Channel adapters:
+.Bl -tag -width xxxxxx -offset indent
+.It Emulex 16/8G FC GEN 5 HBAS
+.Bd -literal -offset indent
+LPe15004 FC Host Bus Adapters
+LPe160XX FC Host Bus Adapters
+.Ed
+.It Emulex 32/16G FC GEN 6 HBAS
+.Bd -literal -offset indent
+LPe3100X FC Host Bus Adapters
+LPe3200X FC Host Bus Adapters
+.Ed
+.El
+.Sh UPDATING FIRMWARE
+Adapter firmware updates are persistent.
+.Pp
+Firmware can be updated by following these steps:
+.Bl -enum
+.It
+Copy this code to a
+.Pa Makefile :
+.Bd -literal -offset indent
+KMOD=ocsflash
+FIRMWS=imagename.grp:ocsflash
+\&.include <bsd.kmod.mk>
+.Ed
+.It
+Replace
+.Pa imagename
+with the name of the GRP file.
+.It
+Copy the
+.Pa Makefile
+and GRP file to a local directory
+.It
+Execute
+.Cm make
+and copy the generated
+.Pa ocsflash.ko
+file to
+.Pa /lib/modules
+.It
+.Cm sysctl dev.ocs_fc.<N>.fw_upgrade=ocsflash
+.It
+Check kernel messages regarding status of the operation
+.It
+Reboot the machine
+.El
+.Pp
+.Sh BOOT OPTIONS
+Options are controlled by setting values in
+.Pa /boot/device.hints .
+.Pp
+They are:
+.Bl -tag -width indent
+.It Va hint.ocs_fc.N.initiator
+Enable initiator functionality.
+Default 1 (enabled), 0 to disable.
+.It Va hint.ocs_fc.N.target
+Enable target functionality.
+Default 1 (enabled), 0 to disable.
+.It Va hint.ocs_fc.N.topology
+Topology: 0 for Auto, 1 for NPort only, 2 for Loop only.
+.It Va hint.ocs_fc.N.speed
+Link speed in megabits per second.
+Possible values include:
+0 Auto-speed negotiation (default), 4000 (4GFC), 8000 (8GFC), 16000 (16GFC).
+.El
+.Sh SYSCTL OPTIONS
+.Bl -tag -width indent
+.It Va dev.ocs_fc.N.port_state
+Port state (read/write).
+Valid values are
+.Li online
+and
+.Li offline .
+.It Va dev.ocs_fc.N.wwpn
+World Wide Port Name (read/write).
+.It Va dev.ocs_fc.N.wwnn
+World Wide Node Name (read/write).
+.It Va dev.ocs_fc.N.fwrev
+Firmware revision (read-only).
+.It Va dev.ocs_fc.N.sn
+Adapter serial number (read-only).
+.It Va dev.ocs_fc.N.configured_speed
+Configured Port Speed (read/write).
+Valid values are:
+0 Auto-speed negotiation (default), 4000 (4GFC), 8000 (8GFC), 16000 (16GFC).
+.It Va dev.ocs_fc.N.configured_topology
+Configured Port Topology (read/write).
+Valid values are:
+0-Auto; 1-NPort; 2-Loop.
+.It Va dev.ocs_fc.N.current_speed
+Current Port Speed (read-only).
+.It Va dev.ocs_fc.N.current_topology
+Current Port Topology (read-only).
+.El
+.Sh SUPPORT
+For general information and support,
+go to the Broadcom website at:
+.Pa http://www.broadcom.com/
+or E-Mail at
+.Pa ocs-driver-team.pdl@broadcom.com.
+.Sh SEE ALSO
+.Xr ifconfig 8
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was written by
+.An Broadcom.
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index d1219b7c75bb..c57b7783c657 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -141,6 +141,7 @@ device adw # Advansys wide SCSI adapters
device aic # Adaptec 15[012]x SCSI adapters, AIC-6[23]60.
device bt # Buslogic/Mylex MultiMaster SCSI adapters
device isci # Intel C600 SAS controller
+device ocs_fc # Emulex FC adapters
# ATA/SCSI peripherals
device scbus # SCSI bus (required for ATA/SCSI)
diff --git a/sys/conf/files b/sys/conf/files
index 0a0b6ac831cb..1c344e2d41b3 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -2578,6 +2578,27 @@ dev/oce/oce_mbox.c optional oce pci
dev/oce/oce_queue.c optional oce pci
dev/oce/oce_sysctl.c optional oce pci
dev/oce/oce_util.c optional oce pci
+dev/ocs_fc/ocs_pci.c optional ocs_fc pci
+dev/ocs_fc/ocs_ioctl.c optional ocs_fc pci
+dev/ocs_fc/ocs_os.c optional ocs_fc pci
+dev/ocs_fc/ocs_utils.c optional ocs_fc pci
+dev/ocs_fc/ocs_hw.c optional ocs_fc pci
+dev/ocs_fc/ocs_hw_queues.c optional ocs_fc pci
+dev/ocs_fc/sli4.c optional ocs_fc pci
+dev/ocs_fc/ocs_sm.c optional ocs_fc pci
+dev/ocs_fc/ocs_device.c optional ocs_fc pci
+dev/ocs_fc/ocs_xport.c optional ocs_fc pci
+dev/ocs_fc/ocs_domain.c optional ocs_fc pci
+dev/ocs_fc/ocs_sport.c optional ocs_fc pci
+dev/ocs_fc/ocs_els.c optional ocs_fc pci
+dev/ocs_fc/ocs_fabric.c optional ocs_fc pci
+dev/ocs_fc/ocs_io.c optional ocs_fc pci
+dev/ocs_fc/ocs_node.c optional ocs_fc pci
+dev/ocs_fc/ocs_scsi.c optional ocs_fc pci
+dev/ocs_fc/ocs_unsol.c optional ocs_fc pci
+dev/ocs_fc/ocs_ddump.c optional ocs_fc pci
+dev/ocs_fc/ocs_mgmt.c optional ocs_fc pci
+dev/ocs_fc/ocs_cam.c optional ocs_fc pci
dev/ofw/ofw_bus_if.m optional fdt
dev/ofw/ofw_bus_subr.c optional fdt
dev/ofw/ofw_cpu.c optional fdt
diff --git a/sys/dev/ocs_fc/ocs.h b/sys/dev/ocs_fc/ocs.h
new file mode 100644
index 000000000000..69db2b481363
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs.h
@@ -0,0 +1,261 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS bsd driver common include file
+ */
+
+
+#if !defined(__OCS_H__)
+#define __OCS_H__
+
+#include "ocs_os.h"
+#include "ocs_utils.h"
+
+#include "ocs_hw.h"
+#include "ocs_scsi.h"
+#include "ocs_io.h"
+
+#include "version.h"
+
+#define DRV_NAME "ocs_fc"
+#define DRV_VERSION \
+ STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH
+
+/**
+ * @brief Interrupt context
+ */
+typedef struct ocs_intr_ctx_s {
+ uint32_t vec; /** Zero based interrupt vector */
+ void *softc; /** software context for interrupt */
+ char name[64]; /** label for this context */
+} ocs_intr_ctx_t;
+
+typedef struct ocs_fcport_s {
+ struct cam_sim *sim;
+ struct cam_path *path;
+ uint32_t role;
+
+ ocs_tgt_resource_t targ_rsrc_wildcard;
+ ocs_tgt_resource_t targ_rsrc[OCS_MAX_LUN];
+ ocs_vport_spec_t *vport;
+} ocs_fcport;
+
+#define FCPORT(ocs, chan) (&((ocs_fcport *)(ocs)->fcports)[(chan)])
+
+/**
+ * @brief Driver's context
+ */
+
+struct ocs_softc {
+
+ device_t dev;
+ struct cdev *cdev;
+
+ ocs_pci_reg_t reg[PCI_MAX_BAR];
+
+ uint32_t instance_index;
+ const char *desc;
+
+ uint32_t irqid;
+ struct resource *irq;
+ void *tag;
+
+ ocs_intr_ctx_t intr_ctx;
+ uint32_t n_vec;
+
+ bus_dma_tag_t dmat; /** Parent DMA tag */
+ bus_dma_tag_t buf_dmat;/** IO buffer DMA tag */
+ char display_name[OCS_DISPLAY_NAME_LENGTH];
+ uint16_t pci_vendor;
+ uint16_t pci_device;
+ uint16_t pci_subsystem_vendor;
+ uint16_t pci_subsystem_device;
+ char businfo[16];
+ const char *driver_version;
+ const char *fw_version;
+ const char *model;
+
+ ocs_hw_t hw;
+
+ ocs_rlock_t lock; /**< device wide lock */
+
+ ocs_xport_e ocs_xport;
+ ocs_xport_t *xport; /**< pointer to transport object */
+ ocs_domain_t *domain;
+ ocs_list_t domain_list;
+ uint32_t domain_instance_count;
+ void (*domain_list_empty_cb)(ocs_t *ocs, void *arg);
+ void *domain_list_empty_cb_arg;
+
+ uint8_t enable_ini;
+ uint8_t enable_tgt;
+ uint8_t fc_type;
+ int ctrlmask;
+ uint8_t explicit_buffer_list;
+ uint8_t external_loopback;
+ uint8_t skip_hw_teardown;
+ int speed;
+ int topology;
+ int ethernet_license;
+ int num_scsi_ios;
+ uint8_t enable_hlm;
+ uint32_t hlm_group_size;
+ uint32_t max_isr_time_msec; /*>> Maximum ISR time */
+ uint32_t auto_xfer_rdy_size; /*>> Max sized write to use auto xfer rdy*/
+ uint8_t esoc;
+ int logmask;
+ char *hw_war_version;
+ uint32_t num_vports;
+ uint32_t target_io_timer_sec;
+ uint32_t hw_bounce;
+ uint8_t rq_threads;
+ uint8_t rq_selection_policy;
+ uint8_t rr_quanta;
+ char *filter_def;
+ uint32_t max_remote_nodes;
+
+ /*
+ * tgt_rscn_delay - delay in kicking off RSCN processing
+ * (nameserver queries) after receiving an RSCN on the target.
+ * This prevents thrashing of nameserver requests due to a huge burst of
+ * RSCNs received in a short period of time.
+ * Note: this is only valid when target RSCN handling is enabled -- see
+ * ctrlmask.
+ */
+ time_t tgt_rscn_delay_msec; /*>> minimum target RSCN delay */
+
+ /*
+ * tgt_rscn_period - determines maximum frequency when processing
+ * back-to-back RSCNs; e.g. if this value is 30, there will never be
+ * any more than 1 RSCN handling per 30s window. This prevents
+ * initiators on a faulty link generating many RSCN from causing the
+ * target to continually query the nameserver.
+ * Note: This is only valid when target RSCN handling is enabled
+ */
+ time_t tgt_rscn_period_msec; /*>> minimum target RSCN period */
+
+ uint32_t enable_task_set_full;
+ uint32_t io_in_use;
+ uint32_t io_high_watermark; /**< used to send task set full */
+ struct mtx sim_lock;
+ uint32_t config_tgt:1, /**< Configured to support target mode */
+ config_ini:1; /**< Configured to support initiator mode */
+
+
+ uint32_t nodedb_mask; /**< Node debugging mask */
+
+ char modeldesc[64];
+ char serialnum[64];
+ char fwrev[64];
+ char sli_intf[9];
+
+ ocs_ramlog_t *ramlog;
+ ocs_textbuf_t ddump_saved;
+
+ ocs_mgmt_functions_t *mgmt_functions;
+ ocs_mgmt_functions_t *tgt_mgmt_functions;
+ ocs_mgmt_functions_t *ini_mgmt_functions;
+
+ ocs_err_injection_e err_injection;
+ uint32_t cmd_err_inject;
+ time_t delay_value_msec;
+
+ bool attached;
+ struct mtx dbg_lock;
+
+ struct cam_devq *devq;
+ ocs_fcport *fcports;
+
+ void* tgt_ocs;
+};
+
+static inline void
+ocs_device_lock_init(ocs_t *ocs)
+{
+ ocs_rlock_init(ocs, &ocs->lock, "ocsdevicelock");
+}
+
+static inline int32_t
+ocs_device_lock_try(ocs_t *ocs)
+{
+ return ocs_rlock_try(&ocs->lock);
+}
+
+static inline void
+ocs_device_lock(ocs_t *ocs)
+{
+ ocs_rlock_acquire(&ocs->lock);
+}
+
+static inline void
+ocs_device_unlock(ocs_t *ocs)
+{
+ ocs_rlock_release(&ocs->lock);
+}
+
+static inline void
+ocs_device_lock_free(ocs_t *ocs)
+{
+ ocs_rlock_free(&ocs->lock);
+}
+
+extern int32_t ocs_device_detach(ocs_t *ocs);
+
+extern int32_t ocs_device_attach(ocs_t *ocs);
+
+#define ocs_is_initiator_enabled() (ocs->enable_ini)
+#define ocs_is_target_enabled() (ocs->enable_tgt)
+
+#include "ocs_xport.h"
+#include "ocs_domain.h"
+#include "ocs_sport.h"
+#include "ocs_node.h"
+#include "ocs_unsol.h"
+#include "ocs_scsi.h"
+#include "ocs_ioctl.h"
+
+static inline ocs_io_t *
+ocs_io_alloc(ocs_t *ocs)
+{
+ return ocs_io_pool_io_alloc(ocs->xport->io_pool);
+}
+
+static inline void
+ocs_io_free(ocs_t *ocs, ocs_io_t *io)
+{
+ ocs_io_pool_io_free(ocs->xport->io_pool, io);
+}
+
+#endif /* __OCS_H__ */
diff --git a/sys/dev/ocs_fc/ocs_cam.c b/sys/dev/ocs_fc/ocs_cam.c
new file mode 100644
index 000000000000..d5e2e7578255
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_cam.c
@@ -0,0 +1,2640 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @defgroup scsi_api_target SCSI Target API
+ * @defgroup scsi_api_initiator SCSI Initiator API
+ * @defgroup cam_api Common Access Method (CAM) API
+ * @defgroup cam_io CAM IO
+ */
+
+/**
+ * @file
+ * Provides CAM functionality.
+ */
+
+#include "ocs.h"
+#include "ocs_scsi.h"
+#include "ocs_device.h"
+
+/* Default IO timeout value for initiators is 30 seconds */
+#define OCS_CAM_IO_TIMEOUT 30
+
+typedef struct {
+ ocs_scsi_sgl_t *sgl;
+ uint32_t sgl_max;
+ uint32_t sgl_count;
+ int32_t rc;
+} ocs_dmamap_load_arg_t;
+
+static void ocs_action(struct cam_sim *, union ccb *);
+static void ocs_poll(struct cam_sim *);
+
+static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *,
+ struct ccb_hdr *, uint32_t *);
+static int32_t ocs_tgt_resource_abort(struct ocs_softc *, ocs_tgt_resource_t *);
+static uint32_t ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb);
+static void ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb);
+static void ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb);
+static int32_t ocs_target_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *);
+static int32_t ocs_io_abort_cb(ocs_io_t *, ocs_scsi_io_status_e, uint32_t, void *);
+static int32_t ocs_task_set_full_or_busy(ocs_io_t *io);
+static int32_t ocs_initiator_tmf_cb(ocs_io_t *, ocs_scsi_io_status_e,
+ ocs_scsi_cmd_resp_t *, uint32_t, void *);
+static uint32_t
+ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role);
+
+static inline ocs_io_t *ocs_scsi_find_io(struct ocs_softc *ocs, uint32_t tag)
+{
+
+ return ocs_io_get_instance(ocs, tag);
+}
+
+static inline void ocs_target_io_free(ocs_io_t *io)
+{
+ io->tgt_io.state = OCS_CAM_IO_FREE;
+ io->tgt_io.flags = 0;
+ io->tgt_io.app = NULL;
+ ocs_scsi_io_complete(io);
+ if(io->ocs->io_in_use != 0)
+ atomic_subtract_acq_32(&io->ocs->io_in_use, 1);
+}
+
+static int32_t
+ocs_attach_port(ocs_t *ocs, int chan)
+{
+
+ struct cam_sim *sim = NULL;
+ struct cam_path *path = NULL;
+ uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS);
+ ocs_fcport *fcp = FCPORT(ocs, chan);
+
+ if (NULL == (sim = cam_sim_alloc(ocs_action, ocs_poll,
+ device_get_name(ocs->dev), ocs,
+ device_get_unit(ocs->dev), &ocs->sim_lock,
+ max_io, max_io, ocs->devq))) {
+ device_printf(ocs->dev, "Can't allocate SIM\n");
+ return 1;
+ }
+
+ mtx_lock(&ocs->sim_lock);
+ if (CAM_SUCCESS != xpt_bus_register(sim, ocs->dev, chan)) {
+ device_printf(ocs->dev, "Can't register bus %d\n", 0);
+ mtx_unlock(&ocs->sim_lock);
+ cam_sim_free(sim, FALSE);
+ return 1;
+ }
+ mtx_unlock(&ocs->sim_lock);
+
+ if (CAM_REQ_CMP != xpt_create_path(&path, NULL, cam_sim_path(sim),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) {
+ device_printf(ocs->dev, "Can't create path\n");
+ xpt_bus_deregister(cam_sim_path(sim));
+ mtx_unlock(&ocs->sim_lock);
+ cam_sim_free(sim, FALSE);
+ return 1;
+ }
+
+ fcp->sim = sim;
+ fcp->path = path;
+
+ return 0;
+
+}
+
+static int32_t
+ocs_detach_port(ocs_t *ocs, int32_t chan)
+{
+ ocs_fcport *fcp = NULL;
+ struct cam_sim *sim = NULL;
+ struct cam_path *path = NULL;
+ fcp = FCPORT(ocs, chan);
+
+ sim = fcp->sim;
+ path = fcp->path;
+
+ if (fcp->sim) {
+ mtx_lock(&ocs->sim_lock);
+ ocs_tgt_resource_abort(ocs, &fcp->targ_rsrc_wildcard);
+ if (path) {
+ xpt_async(AC_LOST_DEVICE, path, NULL);
+ xpt_free_path(path);
+ fcp->path = NULL;
+ }
+ xpt_bus_deregister(cam_sim_path(sim));
+
+ cam_sim_free(sim, FALSE);
+ fcp->sim = NULL;
+ mtx_unlock(&ocs->sim_lock);
+ }
+
+ return 0;
+}
+
+int32_t
+ocs_cam_attach(ocs_t *ocs)
+{
+ struct cam_devq *devq = NULL;
+ int i = 0;
+ uint32_t max_io = ocs_scsi_get_property(ocs, OCS_SCSI_MAX_IOS);
+
+ if (NULL == (devq = cam_simq_alloc(max_io))) {
+ device_printf(ocs->dev, "Can't allocate SIMQ\n");
+ return -1;
+ }
+
+ ocs->devq = devq;
+
+ if (mtx_initialized(&ocs->sim_lock) == 0) {
+ mtx_init(&ocs->sim_lock, "ocs_sim_lock", NULL, MTX_DEF);
+ }
+
+ for (i = 0; i < (ocs->num_vports + 1); i++) {
+ if (ocs_attach_port(ocs, i)) {
+ ocs_log_err(ocs, "Attach port failed for chan: %d\n", i);
+ goto detach_port;
+ }
+ }
+
+ ocs->io_high_watermark = max_io;
+ ocs->io_in_use = 0;
+ return 0;
+
+detach_port:
+ while (--i >= 0) {
+ ocs_detach_port(ocs, i);
+ }
+
+ cam_simq_free(ocs->devq);
+
+ if (mtx_initialized(&ocs->sim_lock))
+ mtx_destroy(&ocs->sim_lock);
+
+ return 1;
+}
+
+int32_t
+ocs_cam_detach(ocs_t *ocs)
+{
+ int i = 0;
+
+ for (i = (ocs->num_vports); i >= 0; i--) {
+ ocs_detach_port(ocs, i);
+ }
+
+ cam_simq_free(ocs->devq);
+
+ if (mtx_initialized(&ocs->sim_lock))
+ mtx_destroy(&ocs->sim_lock);
+
+ return 0;
+}
+
+/***************************************************************************
+ * Functions required by SCSI base driver API
+ */
+
+/**
+ * @ingroup scsi_api_target
+ * @brief Attach driver to the BSD SCSI layer (a.k.a CAM)
+ *
+ * Allocates + initializes CAM related resources and attaches to the CAM
+ *
+ * @param ocs the driver instance's software context
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+int32_t
+ocs_scsi_tgt_new_device(ocs_t *ocs)
+{
+ ocs->enable_task_set_full = ocs_scsi_get_property(ocs,
+ OCS_SCSI_ENABLE_TASK_SET_FULL);
+ ocs_log_debug(ocs, "task set full processing is %s\n",
+ ocs->enable_task_set_full ? "enabled" : "disabled");
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief Tears down target members of ocs structure.
+ *
+ * Called by OS code when device is removed.
+ *
+ * @param ocs pointer to ocs
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_tgt_del_device(ocs_t *ocs)
+{
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief accept new domain notification
+ *
+ * Called by base drive when new domain is discovered. A target-server
+ * will use this call to prepare for new remote node notifications
+ * arising from ocs_scsi_new_initiator().
+ *
+ * The domain context has an element <b>ocs_scsi_tgt_domain_t tgt_domain</b>
+ * which is declared by the target-server code and is used for target-server
+ * private data.
+ *
+ * This function will only be called if the base-driver has been enabled for
+ * target capability.
+ *
+ * Note that this call is made to target-server backends,
+ * the ocs_scsi_ini_new_domain() function is called to initiator-client backends.
+ *
+ * @param domain pointer to domain
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_tgt_new_domain(ocs_domain_t *domain)
+{
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief accept domain lost notification
+ *
+ * Called by base-driver when a domain goes away. A target-server will
+ * use this call to clean up all domain scoped resources.
+ *
+ * Note that this call is made to target-server backends,
+ * the ocs_scsi_ini_del_domain() function is called to initiator-client backends.
+ *
+ * @param domain pointer to domain
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+void
+ocs_scsi_tgt_del_domain(ocs_domain_t *domain)
+{
+}
+
+
+/**
+ * @ingroup scsi_api_target
+ * @brief accept new sli port (sport) notification
+ *
+ * Called by base drive when new sport is discovered. A target-server
+ * will use this call to prepare for new remote node notifications
+ * arising from ocs_scsi_new_initiator().
+ *
+ * The domain context has an element <b>ocs_scsi_tgt_sport_t tgt_sport</b>
+ * which is declared by the target-server code and is used for
+ * target-server private data.
+ *
+ * This function will only be called if the base-driver has been enabled for
+ * target capability.
+ *
+ * Note that this call is made to target-server backends,
+ * the ocs_scsi_tgt_new_domain() is called to initiator-client backends.
+ *
+ * @param sport pointer to SLI port
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_tgt_new_sport(ocs_sport_t *sport)
+{
+ ocs_t *ocs = sport->ocs;
+
+ if(!sport->is_vport) {
+ sport->tgt_data = FCPORT(ocs, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief accept SLI port gone notification
+ *
+ * Called by base-driver when a sport goes away. A target-server will
+ * use this call to clean up all sport scoped resources.
+ *
+ * Note that this call is made to target-server backends,
+ * the ocs_scsi_ini_del_sport() is called to initiator-client backends.
+ *
+ * @param sport pointer to SLI port
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+void
+ocs_scsi_tgt_del_sport(ocs_sport_t *sport)
+{
+ return;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief receive notification of a new SCSI initiator node
+ *
+ * Sent by base driver to notify a target-server of the presense of a new
+ * remote initiator. The target-server may use this call to prepare for
+ * inbound IO from this node.
+ *
+ * The ocs_node_t structure has and elment of type ocs_scsi_tgt_node_t named
+ * tgt_node that is declared and used by a target-server for private
+ * information.
+ *
+ * This function is only called if the target capability is enabled in driver.
+ *
+ * @param node pointer to new remote initiator node
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ *
+ * @note
+ */
+int32_t
+ocs_scsi_new_initiator(ocs_node_t *node)
+{
+ ocs_t *ocs = node->ocs;
+ struct ac_contract ac;
+ struct ac_device_changed *adc;
+
+ ocs_fcport *fcp = NULL;
+
+ fcp = node->sport->tgt_data;
+ if (fcp == NULL) {
+ ocs_log_err(ocs, "FCP is NULL \n");
+ return 1;
+ }
+
+ /*
+ * Update the IO watermark by decrementing it by the
+ * number of IOs reserved for each initiator.
+ */
+ atomic_subtract_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO);
+
+ ac.contract_number = AC_CONTRACT_DEV_CHG;
+ adc = (struct ac_device_changed *) ac.contract_data;
+ adc->wwpn = ocs_node_get_wwpn(node);
+ adc->port = node->rnode.fc_id;
+ adc->target = node->instance_index;
+ adc->arrived = 1;
+ xpt_async(AC_CONTRACT, fcp->path, &ac);
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief validate new initiator
+ *
+ * Sent by base driver to validate a remote initiatiator. The target-server
+ * returns TRUE if this initiator should be accepted.
+ *
+ * This function is only called if the target capability is enabled in driver.
+ *
+ * @param node pointer to remote initiator node to validate
+ *
+ * @return TRUE if initiator should be accepted, FALSE if it should be rejected
+ *
+ * @note
+ */
+
+int32_t
+ocs_scsi_validate_initiator(ocs_node_t *node)
+{
+ return 1;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief Delete a SCSI initiator node
+ *
+ * Sent by base driver to notify a target-server that a remote initiator
+ * is now gone. The base driver will have terminated all outstanding IOs
+ * and the target-server will receive appropriate completions.
+ *
+ * This function is only called if the base driver is enabled for
+ * target capability.
+ *
+ * @param node pointer node being deleted
+ * @param reason Reason why initiator is gone.
+ *
+ * @return OCS_SCSI_CALL_COMPLETE to indicate that all work was completed
+ *
+ * @note
+ */
+int32_t
+ocs_scsi_del_initiator(ocs_node_t *node, ocs_scsi_del_initiator_reason_e reason)
+{
+ ocs_t *ocs = node->ocs;
+
+ struct ac_contract ac;
+ struct ac_device_changed *adc;
+ ocs_fcport *fcp = NULL;
+
+ fcp = node->sport->tgt_data;
+ if (fcp == NULL) {
+ ocs_log_err(ocs, "FCP is NULL \n");
+ return 1;
+ }
+
+ ac.contract_number = AC_CONTRACT_DEV_CHG;
+ adc = (struct ac_device_changed *) ac.contract_data;
+ adc->wwpn = ocs_node_get_wwpn(node);
+ adc->port = node->rnode.fc_id;
+ adc->target = node->instance_index;
+ adc->arrived = 0;
+ xpt_async(AC_CONTRACT, fcp->path, &ac);
+
+
+ if (reason == OCS_SCSI_INITIATOR_MISSING) {
+ return OCS_SCSI_CALL_COMPLETE;
+ }
+
+ /*
+ * Update the IO watermark by incrementing it by the
+ * number of IOs reserved for each initiator.
+ */
+ atomic_add_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO);
+
+ return OCS_SCSI_CALL_COMPLETE;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief receive FCP SCSI Command
+ *
+ * Called by the base driver when a new SCSI command has been received. The
+ * target-server will process the command, and issue data and/or response phase
+ * requests to the base driver.
+ *
+ * The IO context (ocs_io_t) structure has and element of type
+ * ocs_scsi_tgt_io_t named tgt_io that is declared and used by
+ * a target-server for private information.
+ *
+ * @param io pointer to IO context
+ * @param lun LUN for this IO
+ * @param cdb pointer to SCSI CDB
+ * @param cdb_len length of CDB in bytes
+ * @param flags command flags
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t ocs_scsi_recv_cmd(ocs_io_t *io, uint64_t lun, uint8_t *cdb,
+ uint32_t cdb_len, uint32_t flags)
+{
+ ocs_t *ocs = io->ocs;
+ struct ccb_accept_tio *atio = NULL;
+ ocs_node_t *node = io->node;
+ ocs_tgt_resource_t *trsrc = NULL;
+ int32_t rc = -1;
+ ocs_fcport *fcp = NULL;
+
+ fcp = node->sport->tgt_data;
+ if (fcp == NULL) {
+ ocs_log_err(ocs, "FCP is NULL \n");
+ return 1;
+ }
+
+ atomic_add_acq_32(&ocs->io_in_use, 1);
+
+ /* set target io timeout */
+ io->timeout = ocs->target_io_timer_sec;
+
+ if (ocs->enable_task_set_full &&
+ (ocs->io_in_use >= ocs->io_high_watermark)) {
+ return ocs_task_set_full_or_busy(io);
+ } else {
+ atomic_store_rel_32(&io->node->tgt_node.busy_sent, FALSE);
+ }
+
+ if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) {
+ trsrc = &fcp->targ_rsrc[lun];
+ } else if (fcp->targ_rsrc_wildcard.enabled) {
+ trsrc = &fcp->targ_rsrc_wildcard;
+ }
+
+ if (trsrc) {
+ atio = (struct ccb_accept_tio *)STAILQ_FIRST(&trsrc->atio);
+ }
+
+ if (atio) {
+
+ STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe);
+
+ atio->ccb_h.status = CAM_CDB_RECVD;
+ atio->ccb_h.target_lun = lun;
+ atio->sense_len = 0;
+
+ atio->init_id = node->instance_index;
+ atio->tag_id = io->tag;
+ atio->ccb_h.ccb_io_ptr = io;
+
+ if (flags & OCS_SCSI_CMD_SIMPLE)
+ atio->tag_action = MSG_SIMPLE_Q_TAG;
+ else if (flags & FCP_TASK_ATTR_HEAD_OF_QUEUE)
+ atio->tag_action = MSG_HEAD_OF_Q_TAG;
+ else if (flags & FCP_TASK_ATTR_ORDERED)
+ atio->tag_action = MSG_ORDERED_Q_TAG;
+ else
+ atio->tag_action = 0;
+
+ atio->cdb_len = cdb_len;
+ ocs_memcpy(atio->cdb_io.cdb_bytes, cdb, cdb_len);
+
+ io->tgt_io.flags = 0;
+ io->tgt_io.state = OCS_CAM_IO_COMMAND;
+ io->tgt_io.lun = lun;
+
+ xpt_done((union ccb *)atio);
+
+ rc = 0;
+ } else {
+ device_printf(
+ ocs->dev, "%s: no ATIO for LUN %lx (en=%s) OX_ID %#x\n",
+ __func__, (unsigned long)lun,
+ trsrc ? (trsrc->enabled ? "T" : "F") : "X",
+ be16toh(io->init_task_tag));
+
+ io->tgt_io.state = OCS_CAM_IO_MAX;
+ ocs_target_io_free(io);
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief receive FCP SCSI Command with first burst data.
+ *
+ * Receive a new FCP SCSI command from the base driver with first burst data.
+ *
+ * @param io pointer to IO context
+ * @param lun LUN for this IO
+ * @param cdb pointer to SCSI CDB
+ * @param cdb_len length of CDB in bytes
+ * @param flags command flags
+ * @param first_burst_buffers first burst buffers
+ * @param first_burst_buffer_count The number of bytes received in the first burst
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t ocs_scsi_recv_cmd_first_burst(ocs_io_t *io, uint64_t lun, uint8_t *cdb,
+ uint32_t cdb_len, uint32_t flags,
+ ocs_dma_t first_burst_buffers[],
+ uint32_t first_burst_buffer_count)
+{
+ return -1;
+}
+
+/**
+ * @ingroup scsi_api_target
+ * @brief receive a TMF command IO
+ *
+ * Called by the base driver when a SCSI TMF command has been received. The
+ * target-server will process the command, aborting commands as needed, and post
+ * a response using ocs_scsi_send_resp()
+ *
+ * The IO context (ocs_io_t) structure has and element of type ocs_scsi_tgt_io_t named
+ * tgt_io that is declared and used by a target-server for private information.
+ *
+ * If the target-server walks the nodes active_ios linked list, and starts IO
+ * abort processing, the code <b>must</b> be sure not to abort the IO passed into the
+ * ocs_scsi_recv_tmf() command.
+ *
+ * @param tmfio pointer to IO context
+ * @param lun logical unit value
+ * @param cmd command request
+ * @param abortio pointer to IO object to abort for TASK_ABORT (NULL for all other TMF)
+ * @param flags flags
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t ocs_scsi_recv_tmf(ocs_io_t *tmfio, uint64_t lun, ocs_scsi_tmf_cmd_e cmd,
+ ocs_io_t *abortio, uint32_t flags)
+{
+ ocs_t *ocs = tmfio->ocs;
+ ocs_node_t *node = tmfio->node;
+ ocs_tgt_resource_t *trsrc = NULL;
+ struct ccb_immediate_notify *inot = NULL;
+ int32_t rc = -1;
+ ocs_fcport *fcp = NULL;
+
+ fcp = node->sport->tgt_data;
+ if (fcp == NULL) {
+ ocs_log_err(ocs, "FCP is NULL \n");
+ return 1;
+ }
+
+ if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) {
+ trsrc = &fcp->targ_rsrc[lun];
+ } else if (fcp->targ_rsrc_wildcard.enabled) {
+ trsrc = &fcp->targ_rsrc_wildcard;
+ }
+
+ device_printf(tmfio->ocs->dev, "%s: io=%p cmd=%#x LU=%lx en=%s\n",
+ __func__, tmfio, cmd, (unsigned long)lun,
+ trsrc ? (trsrc->enabled ? "T" : "F") : "X");
+ if (trsrc) {
+ inot = (struct ccb_immediate_notify *)STAILQ_FIRST(&trsrc->inot);
+ }
+
+ if (!inot) {
+ device_printf(
+ ocs->dev, "%s: no INOT for LUN %llx (en=%s) OX_ID %#x\n",
+ __func__, (unsigned long long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X",
+ be16toh(tmfio->init_task_tag));
+
+ if (abortio) {
+ ocs_scsi_io_complete(abortio);
+ }
+ ocs_scsi_io_complete(tmfio);
+ goto ocs_scsi_recv_tmf_out;
+ }
+
+
+ tmfio->tgt_io.app = abortio;
+
+ STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe);
+
+ inot->tag_id = tmfio->tag;
+ inot->seq_id = tmfio->tag;
+
+ if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) {
+ inot->initiator_id = node->instance_index;
+ } else {
+ inot->initiator_id = CAM_TARGET_WILDCARD;
+ }
+
+ inot->ccb_h.status = CAM_MESSAGE_RECV;
+ inot->ccb_h.target_lun = lun;
+
+ switch (cmd) {
+ case OCS_SCSI_TMF_ABORT_TASK:
+ inot->arg = MSG_ABORT_TASK;
+ inot->seq_id = abortio->tag;
+ device_printf(ocs->dev, "%s: ABTS IO.%#x st=%#x\n",
+ __func__, abortio->tag, abortio->tgt_io.state);
+ abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_RECV;
+ abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_NOTIFY;
+ break;
+ case OCS_SCSI_TMF_QUERY_TASK_SET:
+ device_printf(ocs->dev,
+ "%s: OCS_SCSI_TMF_QUERY_TASK_SET not supported\n",
+ __func__);
+ STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe);
+ ocs_scsi_io_complete(tmfio);
+ goto ocs_scsi_recv_tmf_out;
+ break;
+ case OCS_SCSI_TMF_ABORT_TASK_SET:
+ inot->arg = MSG_ABORT_TASK_SET;
+ break;
+ case OCS_SCSI_TMF_CLEAR_TASK_SET:
+ inot->arg = MSG_CLEAR_TASK_SET;
+ break;
+ case OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
+ inot->arg = MSG_QUERY_ASYNC_EVENT;
+ break;
+ case OCS_SCSI_TMF_LOGICAL_UNIT_RESET:
+ inot->arg = MSG_LOGICAL_UNIT_RESET;
+ break;
+ case OCS_SCSI_TMF_CLEAR_ACA:
+ inot->arg = MSG_CLEAR_ACA;
+ break;
+ case OCS_SCSI_TMF_TARGET_RESET:
+ inot->arg = MSG_TARGET_RESET;
+ break;
+ default:
+ device_printf(ocs->dev, "%s: unsupported TMF %#x\n",
+ __func__, cmd);
+ STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe);
+ goto ocs_scsi_recv_tmf_out;
+ }
+
+ rc = 0;
+
+ xpt_print(inot->ccb_h.path, "%s: func=%#x stat=%#x id=%#x lun=%#x"
+ " flags=%#x tag=%#x seq=%#x ini=%#x arg=%#x\n",
+ __func__, inot->ccb_h.func_code, inot->ccb_h.status,
+ inot->ccb_h.target_id,
+ (unsigned int)inot->ccb_h.target_lun, inot->ccb_h.flags,
+ inot->tag_id, inot->seq_id, inot->initiator_id,
+ inot->arg);
+ xpt_done((union ccb *)inot);
+
+ if (abortio) {
+ abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_DEV;
+ rc = ocs_scsi_tgt_abort_io(abortio, ocs_io_abort_cb, tmfio);
+ }
+
+ocs_scsi_recv_tmf_out:
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief Initializes any initiator fields on the ocs structure.
+ *
+ * Called by OS initialization code when a new device is discovered.
+ *
+ * @param ocs pointer to ocs
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_ini_new_device(ocs_t *ocs)
+{
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief Tears down initiator members of ocs structure.
+ *
+ * Called by OS code when device is removed.
+ *
+ * @param ocs pointer to ocs
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_scsi_ini_del_device(ocs_t *ocs)
+{
+
+ return 0;
+}
+
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief accept new domain notification
+ *
+ * Called by base drive when new domain is discovered. An initiator-client
+ * will accept this call to prepare for new remote node notifications
+ * arising from ocs_scsi_new_target().
+ *
+ * The domain context has the element <b>ocs_scsi_ini_domain_t ini_domain</b>
+ * which is declared by the initiator-client code and is used for
+ * initiator-client private data.
+ *
+ * This function will only be called if the base-driver has been enabled for
+ * initiator capability.
+ *
+ * Note that this call is made to initiator-client backends,
+ * the ocs_scsi_tgt_new_domain() function is called to target-server backends.
+ *
+ * @param domain pointer to domain
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_ini_new_domain(ocs_domain_t *domain)
+{
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief accept domain lost notification
+ *
+ * Called by base-driver when a domain goes away. An initiator-client will
+ * use this call to clean up all domain scoped resources.
+ *
+ * This function will only be called if the base-driver has been enabled for
+ * initiator capability.
+ *
+ * Note that this call is made to initiator-client backends,
+ * the ocs_scsi_tgt_del_domain() function is called to target-server backends.
+ *
+ * @param domain pointer to domain
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+void
+ocs_scsi_ini_del_domain(ocs_domain_t *domain)
+{
+}
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief accept new sli port notification
+ *
+ * Called by base drive when new sli port (sport) is discovered.
+ * A target-server will use this call to prepare for new remote node
+ * notifications arising from ocs_scsi_new_initiator().
+ *
+ * This function will only be called if the base-driver has been enabled for
+ * target capability.
+ *
+ * Note that this call is made to target-server backends,
+ * the ocs_scsi_ini_new_sport() function is called to initiator-client backends.
+ *
+ * @param sport pointer to sport
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_ini_new_sport(ocs_sport_t *sport)
+{
+ ocs_t *ocs = sport->ocs;
+
+ if(!sport->is_vport) {
+ sport->tgt_data = FCPORT(ocs, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief accept sli port gone notification
+ *
+ * Called by base-driver when a sport goes away. A target-server will
+ * use this call to clean up all sport scoped resources.
+ *
+ * Note that this call is made to target-server backends,
+ * the ocs_scsi_ini_del_sport() function is called to initiator-client backends.
+ *
+ * @param sport pointer to SLI port
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+void
+ocs_scsi_ini_del_sport(ocs_sport_t *sport)
+{
+}
+
+void
+ocs_scsi_sport_deleted(ocs_sport_t *sport)
+{
+ ocs_t *ocs = sport->ocs;
+ ocs_fcport *fcp = NULL;
+
+ ocs_xport_stats_t value;
+
+ if (!sport->is_vport) {
+ return;
+ }
+
+ fcp = sport->tgt_data;
+
+ ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value);
+
+ if (value.value == 0) {
+ ocs_log_debug(ocs, "PORT offline,.. skipping\n");
+ return;
+ }
+
+ if ((fcp->role != KNOB_ROLE_NONE)) {
+ if(fcp->vport->sport != NULL) {
+ ocs_log_debug(ocs,"sport is not NULL, skipping\n");
+ return;
+ }
+
+ ocs_sport_vport_alloc(ocs->domain, fcp->vport);
+ return;
+ }
+
+}
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief receive notification of a new SCSI target node
+ *
+ * Sent by base driver to notify an initiator-client of the presense of a new
+ * remote target. The initiator-server may use this call to prepare for
+ * inbound IO from this node.
+ *
+ * This function is only called if the base driver is enabled for
+ * initiator capability.
+ *
+ * @param node pointer to new remote initiator node
+ *
+ * @return none
+ *
+ * @note
+ */
+int32_t
+ocs_scsi_new_target(ocs_node_t *node)
+{
+ struct ocs_softc *ocs = node->ocs;
+ union ccb *ccb = NULL;
+ ocs_fcport *fcp = NULL;
+
+ fcp = node->sport->tgt_data;
+ if (fcp == NULL) {
+ printf("%s:FCP is NULL \n", __func__);
+ return 0;
+ }
+
+ if (NULL == (ccb = xpt_alloc_ccb_nowait())) {
+ device_printf(ocs->dev, "%s: ccb allocation failed\n", __func__);
+ return -1;
+ }
+
+ if (CAM_REQ_CMP != xpt_create_path(&ccb->ccb_h.path, xpt_periph,
+ cam_sim_path(fcp->sim),
+ node->instance_index, CAM_LUN_WILDCARD)) {
+ device_printf(
+ ocs->dev, "%s: target path creation failed\n", __func__);
+ xpt_free_ccb(ccb);
+ return -1;
+ }
+
+ xpt_rescan(ccb);
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_initiator
+ * @brief Delete a SCSI target node
+ *
+ * Sent by base driver to notify a initiator-client that a remote target
+ * is now gone. The base driver will have terminated all outstanding IOs
+ * and the initiator-client will receive appropriate completions.
+ *
+ * The ocs_node_t structure has and elment of type ocs_scsi_ini_node_t named
+ * ini_node that is declared and used by a target-server for private
+ * information.
+ *
+ * This function is only called if the base driver is enabled for
+ * initiator capability.
+ *
+ * @param node pointer node being deleted
+ * @param reason reason for deleting the target
+ *
+ * @return Returns OCS_SCSI_CALL_ASYNC if target delete is queued for async
+ * completion and OCS_SCSI_CALL_COMPLETE if call completed or error.
+ *
+ * @note
+ */
+int32_t
+ocs_scsi_del_target(ocs_node_t *node, ocs_scsi_del_target_reason_e reason)
+{
+ struct ocs_softc *ocs = node->ocs;
+ struct cam_path *cpath = NULL;
+ ocs_fcport *fcp = NULL;
+
+ fcp = node->sport->tgt_data;
+ if (fcp == NULL) {
+ ocs_log_err(ocs,"FCP is NULL \n");
+ return 0;
+ }
+
+ if (!fcp->sim) {
+ device_printf(ocs->dev, "%s: calling with NULL sim\n", __func__);
+ return OCS_SCSI_CALL_COMPLETE;
+ }
+
+ if (CAM_REQ_CMP == xpt_create_path(&cpath, NULL, cam_sim_path(fcp->sim),
+ node->instance_index, CAM_LUN_WILDCARD)) {
+ xpt_async(AC_LOST_DEVICE, cpath, NULL);
+
+ xpt_free_path(cpath);
+ }
+ return OCS_SCSI_CALL_COMPLETE;
+}
+
+/**
+ * @brief Initialize SCSI IO
+ *
+ * Initialize SCSI IO, this function is called once per IO during IO pool
+ * allocation so that the target server may initialize any of its own private
+ * data.
+ *
+ * @param io pointer to SCSI IO object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_tgt_io_init(ocs_io_t *io)
+{
+ return 0;
+}
+
+/**
+ * @brief Uninitialize SCSI IO
+ *
+ * Uninitialize target server private data in a SCSI io object
+ *
+ * @param io pointer to SCSI IO object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_tgt_io_exit(ocs_io_t *io)
+{
+ return 0;
+}
+
+/**
+ * @brief Initialize SCSI IO
+ *
+ * Initialize SCSI IO, this function is called once per IO during IO pool
+ * allocation so that the initiator client may initialize any of its own private
+ * data.
+ *
+ * @param io pointer to SCSI IO object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_ini_io_init(ocs_io_t *io)
+{
+ return 0;
+}
+
+/**
+ * @brief Uninitialize SCSI IO
+ *
+ * Uninitialize initiator client private data in a SCSI io object
+ *
+ * @param io pointer to SCSI IO object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_scsi_ini_io_exit(ocs_io_t *io)
+{
+ return 0;
+}
+/*
+ * End of functions required by SCSI base driver API
+ ***************************************************************************/
+
+static __inline void
+ocs_set_ccb_status(union ccb *ccb, cam_status status)
+{
+ ccb->ccb_h.status &= ~CAM_STATUS_MASK;
+ ccb->ccb_h.status |= status;
+}
+
+static int32_t
+ocs_task_set_full_or_busy_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status,
+ uint32_t flags, void *arg)
+{
+
+ ocs_target_io_free(io);
+
+ return 0;
+}
+
+/**
+ * @brief send SCSI task set full or busy status
+ *
+ * A SCSI task set full or busy response is sent depending on whether
+ * another IO is already active on the LUN.
+ *
+ * @param io pointer to IO context
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+static int32_t
+ocs_task_set_full_or_busy(ocs_io_t *io)
+{
+ ocs_scsi_cmd_resp_t rsp = { 0 };
+ ocs_t *ocs = io->ocs;
+
+ /*
+ * If there is another command for the LUN, then send task set full,
+ * if this is the first one, then send the busy status.
+ *
+ * if 'busy sent' is FALSE, set it to TRUE and send BUSY
+ * otherwise send FULL
+ */
+ if (atomic_cmpset_acq_32(&io->node->tgt_node.busy_sent, FALSE, TRUE)) {
+ rsp.scsi_status = SCSI_STATUS_BUSY; /* Busy */
+ printf("%s: busy [%s] tag=%x iiu=%d ihw=%d\n", __func__,
+ io->node->display_name, io->tag,
+ io->ocs->io_in_use, io->ocs->io_high_watermark);
+ } else {
+ rsp.scsi_status = SCSI_STATUS_TASK_SET_FULL; /* Task set full */
+ printf("%s: full tag=%x iiu=%d\n", __func__, io->tag,
+ io->ocs->io_in_use);
+ }
+
+ /* Log a message here indicating a busy or task set full state */
+ if (OCS_LOG_ENABLE_Q_FULL_BUSY_MSG(ocs)) {
+ /* Log Task Set Full */
+ if (rsp.scsi_status == SCSI_STATUS_TASK_SET_FULL) {
+ /* Task Set Full Message */
+ ocs_log_info(ocs, "OCS CAM TASK SET FULL. Tasks >= %d\n",
+ ocs->io_high_watermark);
+ }
+ else if (rsp.scsi_status == SCSI_STATUS_BUSY) {
+ /* Log Busy Message */
+ ocs_log_info(ocs, "OCS CAM SCSI BUSY\n");
+ }
+ }
+
+ /* Send the response */
+ return
+ ocs_scsi_send_resp(io, 0, &rsp, ocs_task_set_full_or_busy_cb, NULL);
+}
+
+/**
+ * @ingroup cam_io
+ * @brief Process target IO completions
+ *
+ * @param io
+ * @param scsi_status did the IO complete successfully
+ * @param flags
+ * @param arg application specific pointer provided in the call to ocs_target_io()
+ *
+ * @todo
+ */
+static int32_t ocs_scsi_target_io_cb(ocs_io_t *io,
+ ocs_scsi_io_status_e scsi_status,
+ uint32_t flags, void *arg)
+{
+ union ccb *ccb = arg;
+ struct ccb_scsiio *csio = &ccb->csio;
+ struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr;
+ uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
+ uint32_t io_is_done =
+ (ccb->ccb_h.flags & CAM_SEND_STATUS) == CAM_SEND_STATUS;
+
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+
+ if (CAM_DIR_NONE != cam_dir) {
+ bus_dmasync_op_t op;
+
+ if (CAM_DIR_IN == cam_dir) {
+ op = BUS_DMASYNC_POSTREAD;
+ } else {
+ op = BUS_DMASYNC_POSTWRITE;
+ }
+ /* Synchronize the DMA memory with the CPU and free the mapping */
+ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op);
+ if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) {
+ bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap);
+ }
+ }
+
+ if (io->tgt_io.sendresp) {
+ io->tgt_io.sendresp = 0;
+ ocs_scsi_cmd_resp_t resp = { 0 };
+ io->tgt_io.state = OCS_CAM_IO_RESP;
+ resp.scsi_status = scsi_status;
+ if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
+ resp.sense_data = (uint8_t *)&csio->sense_data;
+ resp.sense_data_length = csio->sense_len;
+ }
+ resp.residual = io->exp_xfer_len - io->transferred;
+
+ return ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb);
+ }
+
+ switch (scsi_status) {
+ case OCS_SCSI_STATUS_GOOD:
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ break;
+ case OCS_SCSI_STATUS_ABORTED:
+ ocs_set_ccb_status(ccb, CAM_REQ_ABORTED);
+ break;
+ default:
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ }
+
+ if (io_is_done) {
+ if ((io->tgt_io.flags & OCS_CAM_IO_F_ABORT_NOTIFY) == 0) {
+ ocs_target_io_free(io);
+ }
+ } else {
+ io->tgt_io.state = OCS_CAM_IO_DATA_DONE;
+ /*device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n",
+ __func__, io->tgt_io.state, io->tag);*/
+ }
+
+ xpt_done(ccb);
+
+ return 0;
+}
+
+/**
+ * @note 1. Since the CCB is assigned to the ocs_io_t on an XPT_CONT_TARGET_IO
+ * action, if an initiator aborts a command prior to the SIM receiving
+ * a CTIO, the IO's CCB will be NULL.
+ */
+static int32_t
+ocs_io_abort_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg)
+{
+ struct ocs_softc *ocs = NULL;
+ ocs_io_t *tmfio = arg;
+ ocs_scsi_tmf_resp_e tmf_resp = OCS_SCSI_TMF_FUNCTION_COMPLETE;
+ int32_t rc = 0;
+
+ ocs = io->ocs;
+
+ io->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_DEV;
+
+ /* A good status indicates the IO was aborted and will be completed in
+ * the IO's completion handler. Handle the other cases here. */
+ switch (scsi_status) {
+ case OCS_SCSI_STATUS_GOOD:
+ break;
+ case OCS_SCSI_STATUS_NO_IO:
+ break;
+ default:
+ device_printf(ocs->dev, "%s: unhandled status %d\n",
+ __func__, scsi_status);
+ tmf_resp = OCS_SCSI_TMF_FUNCTION_REJECTED;
+ rc = -1;
+ }
+
+ ocs_scsi_send_tmf_resp(tmfio, tmf_resp, NULL, ocs_target_tmf_cb, NULL);
+
+ return rc;
+}
+
+/**
+ * @ingroup cam_io
+ * @brief Process initiator IO completions
+ *
+ * @param io
+ * @param scsi_status did the IO complete successfully
+ * @param rsp pointer to response buffer
+ * @param flags
+ * @param arg application specific pointer provided in the call to ocs_target_io()
+ *
+ * @todo
+ */
+static int32_t ocs_scsi_initiator_io_cb(ocs_io_t *io,
+ ocs_scsi_io_status_e scsi_status,
+ ocs_scsi_cmd_resp_t *rsp,
+ uint32_t flags, void *arg)
+{
+ union ccb *ccb = arg;
+ struct ccb_scsiio *csio = &ccb->csio;
+ struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr;
+ uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
+ cam_status ccb_status= CAM_REQ_CMP_ERR;
+
+ if (CAM_DIR_NONE != cam_dir) {
+ bus_dmasync_op_t op;
+
+ if (CAM_DIR_IN == cam_dir) {
+ op = BUS_DMASYNC_POSTREAD;
+ } else {
+ op = BUS_DMASYNC_POSTWRITE;
+ }
+ /* Synchronize the DMA memory with the CPU and free the mapping */
+ bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op);
+ if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) {
+ bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap);
+ }
+ }
+
+ if (scsi_status == OCS_SCSI_STATUS_CHECK_RESPONSE) {
+ csio->scsi_status = rsp->scsi_status;
+ if (SCSI_STATUS_OK != rsp->scsi_status) {
+ ccb_status = CAM_SCSI_STATUS_ERROR;
+ }
+
+ csio->resid = rsp->residual;
+ if (rsp->residual > 0) {
+ uint32_t length = rsp->response_wire_length;
+ /* underflow */
+ if (csio->dxfer_len == (length + csio->resid)) {
+ ccb_status = CAM_REQ_CMP;
+ }
+ } else if (rsp->residual < 0) {
+ ccb_status = CAM_DATA_RUN_ERR;
+ }
+
+ if ((rsp->sense_data_length) &&
+ !(ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR))) {
+ uint32_t sense_len = 0;
+
+ ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
+ if (rsp->sense_data_length < csio->sense_len) {
+ csio->sense_resid =
+ csio->sense_len - rsp->sense_data_length;
+ sense_len = rsp->sense_data_length;
+ } else {
+ csio->sense_resid = 0;
+ sense_len = csio->sense_len;
+ }
+ ocs_memcpy(&csio->sense_data, rsp->sense_data, sense_len);
+ }
+ } else if (scsi_status != OCS_SCSI_STATUS_GOOD) {
+ ccb_status = CAM_REQ_CMP_ERR;
+ } else {
+ ccb_status = CAM_REQ_CMP;
+ }
+
+ ocs_set_ccb_status(ccb, ccb_status);
+
+ ocs_scsi_io_free(io);
+
+ csio->ccb_h.ccb_io_ptr = NULL;
+ csio->ccb_h.ccb_ocs_ptr = NULL;
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+
+ xpt_done(ccb);
+
+ return 0;
+}
+
+/**
+ * @brief Load scatter-gather list entries into an IO
+ *
+ * This routine relies on the driver instance's software context pointer and
+ * the IO object pointer having been already assigned to hooks in the CCB.
+ * Although the routine does not return success/fail, callers can look at the
+ * n_sge member to determine if the mapping failed (0 on failure).
+ *
+ * @param arg pointer to the CAM ccb for this IO
+ * @param seg DMA address/length pairs
+ * @param nseg number of DMA address/length pairs
+ * @param error any errors while mapping the IO
+ */
+static void
+ocs_scsi_dmamap_load(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ ocs_dmamap_load_arg_t *sglarg = (ocs_dmamap_load_arg_t*) arg;
+
+ if (error) {
+ printf("%s: seg=%p nseg=%d error=%d\n",
+ __func__, seg, nseg, error);
+ sglarg->rc = -1;
+ } else {
+ uint32_t i = 0;
+ uint32_t c = 0;
+
+ if ((sglarg->sgl_count + nseg) > sglarg->sgl_max) {
+ printf("%s: sgl_count=%d nseg=%d max=%d\n", __func__,
+ sglarg->sgl_count, nseg, sglarg->sgl_max);
+ sglarg->rc = -2;
+ return;
+ }
+
+ for (i = 0, c = sglarg->sgl_count; i < nseg; i++, c++) {
+ sglarg->sgl[c].addr = seg[i].ds_addr;
+ sglarg->sgl[c].len = seg[i].ds_len;
+ }
+
+ sglarg->sgl_count = c;
+
+ sglarg->rc = 0;
+ }
+}
+
+/**
+ * @brief Build a scatter-gather list from a CAM CCB
+ *
+ * @param ocs the driver instance's software context
+ * @param ccb pointer to the CCB
+ * @param io pointer to the previously allocated IO object
+ * @param sgl pointer to SGL
+ * @param sgl_max number of entries in sgl
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_build_scsi_sgl(struct ocs_softc *ocs, union ccb *ccb, ocs_io_t *io,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_max)
+{
+ ocs_dmamap_load_arg_t dmaarg;
+ int32_t err = 0;
+
+ if (!ocs || !ccb || !io || !sgl) {
+ printf("%s: bad param o=%p c=%p i=%p s=%p\n", __func__,
+ ocs, ccb, io, sgl);
+ return -1;
+ }
+
+ io->tgt_io.flags &= ~OCS_CAM_IO_F_DMAPPED;
+
+ dmaarg.sgl = sgl;
+ dmaarg.sgl_count = 0;
+ dmaarg.sgl_max = sgl_max;
+ dmaarg.rc = 0;
+
+ err = bus_dmamap_load_ccb(ocs->buf_dmat, io->tgt_io.dmap, ccb,
+ ocs_scsi_dmamap_load, &dmaarg, 0);
+
+ if (err || dmaarg.rc) {
+ device_printf(
+ ocs->dev, "%s: bus_dmamap_load_ccb error (%d %d)\n",
+ __func__, err, dmaarg.rc);
+ return -1;
+ }
+
+ io->tgt_io.flags |= OCS_CAM_IO_F_DMAPPED;
+ return dmaarg.sgl_count;
+}
+
+/**
+ * @ingroup cam_io
+ * @brief Send a target IO
+ *
+ * @param ocs the driver instance's software context
+ * @param ccb pointer to the CCB
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_target_io(struct ocs_softc *ocs, union ccb *ccb)
+{
+ struct ccb_scsiio *csio = &ccb->csio;
+ ocs_io_t *io = NULL;
+ uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
+ bool sendstatus = ccb->ccb_h.flags & CAM_SEND_STATUS;
+ uint32_t xferlen = csio->dxfer_len;
+ int32_t rc = 0;
+
+ io = ocs_scsi_find_io(ocs, csio->tag_id);
+ if (io == NULL) {
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ panic("bad tag value");
+ return 1;
+ }
+
+ /* Received an ABORT TASK for this IO */
+ if (io->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) {
+ /*device_printf(ocs->dev,
+ "%s: XPT_CONT_TARGET_IO state=%d tag=%#x xid=%#x flags=%#x\n",
+ __func__, io->tgt_io.state, io->tag, io->init_task_tag,
+ io->tgt_io.flags);*/
+ io->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM;
+
+ if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ ocs_target_io_free(io);
+ return 1;
+ }
+
+ ocs_set_ccb_status(ccb, CAM_REQ_ABORTED);
+
+ return 1;
+ }
+
+ io->tgt_io.app = ccb;
+
+ ocs_set_ccb_status(ccb, CAM_REQ_INPROG);
+ ccb->ccb_h.status |= CAM_SIM_QUEUED;
+
+ csio->ccb_h.ccb_ocs_ptr = ocs;
+ csio->ccb_h.ccb_io_ptr = io;
+
+ if ((sendstatus && (xferlen == 0))) {
+ ocs_scsi_cmd_resp_t resp = { 0 };
+
+ ocs_assert(ccb->ccb_h.flags & CAM_SEND_STATUS, -1);
+
+ io->tgt_io.state = OCS_CAM_IO_RESP;
+
+ resp.scsi_status = csio->scsi_status;
+
+ if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
+ resp.sense_data = (uint8_t *)&csio->sense_data;
+ resp.sense_data_length = csio->sense_len;
+ }
+
+ resp.residual = io->exp_xfer_len - io->transferred;
+ rc = ocs_scsi_send_resp(io, 0, &resp, ocs_scsi_target_io_cb, ccb);
+
+ } else if (xferlen != 0) {
+ ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL];
+ int32_t sgl_count = 0;
+
+ io->tgt_io.state = OCS_CAM_IO_DATA;
+
+ if (sendstatus)
+ io->tgt_io.sendresp = 1;
+
+ sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl));
+ if (sgl_count > 0) {
+ if (cam_dir == CAM_DIR_IN) {
+ rc = ocs_scsi_send_rd_data(io, 0, NULL, sgl,
+ sgl_count, csio->dxfer_len,
+ ocs_scsi_target_io_cb, ccb);
+ } else if (cam_dir == CAM_DIR_OUT) {
+ rc = ocs_scsi_recv_wr_data(io, 0, NULL, sgl,
+ sgl_count, csio->dxfer_len,
+ ocs_scsi_target_io_cb, ccb);
+ } else {
+ device_printf(ocs->dev, "%s:"
+ " unknown CAM direction %#x\n",
+ __func__, cam_dir);
+ ocs_set_ccb_status(ccb, CAM_REQ_INVALID);
+ rc = 1;
+ }
+ } else {
+ device_printf(ocs->dev, "%s: building SGL failed\n",
+ __func__);
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ rc = 1;
+ }
+ } else {
+ device_printf(ocs->dev, "%s: Wrong value xfer and sendstatus"
+ " are 0 \n", __func__);
+ ocs_set_ccb_status(ccb, CAM_REQ_INVALID);
+ rc = 1;
+
+ }
+
+ if (rc) {
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+ io->tgt_io.state = OCS_CAM_IO_DATA_DONE;
+ device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n",
+ __func__, io->tgt_io.state, io->tag);
+ if ((sendstatus && (xferlen == 0))) {
+ ocs_target_io_free(io);
+ }
+ }
+
+ return rc;
+}
+
+static int32_t
+ocs_target_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags,
+ void *arg)
+{
+
+ /*device_printf(io->ocs->dev, "%s: tag=%x io=%p s=%#x\n",
+ __func__, io->tag, io, scsi_status);*/
+ ocs_scsi_io_complete(io);
+
+ return 0;
+}
+
+/**
+ * @ingroup cam_io
+ * @brief Send an initiator IO
+ *
+ * @param ocs the driver instance's software context
+ * @param ccb pointer to the CCB
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_initiator_io(struct ocs_softc *ocs, union ccb *ccb)
+{
+ int32_t rc;
+ struct ccb_scsiio *csio = &ccb->csio;
+ struct ccb_hdr *ccb_h = &csio->ccb_h;
+ ocs_node_t *node = NULL;
+ ocs_io_t *io = NULL;
+ ocs_scsi_sgl_t sgl[OCS_FC_MAX_SGL];
+ int32_t sgl_count;
+
+ node = ocs_node_get_instance(ocs, ccb_h->target_id);
+ if (node == NULL) {
+ device_printf(ocs->dev, "%s: no device %d\n", __func__,
+ ccb_h->target_id);
+ return CAM_DEV_NOT_THERE;
+ }
+
+ if (!node->targ) {
+ device_printf(ocs->dev, "%s: not target device %d\n", __func__,
+ ccb_h->target_id);
+ return CAM_DEV_NOT_THERE;
+ }
+
+ io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR);
+ if (io == NULL) {
+ device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__);
+ return -1;
+ }
+
+ /* eventhough this is INI, use target structure as ocs_build_scsi_sgl
+ * only references the tgt_io part of an ocs_io_t */
+ io->tgt_io.app = ccb;
+
+ csio->ccb_h.ccb_ocs_ptr = ocs;
+ csio->ccb_h.ccb_io_ptr = io;
+
+ sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, ARRAY_SIZE(sgl));
+ if (sgl_count < 0) {
+ ocs_scsi_io_free(io);
+ device_printf(ocs->dev, "%s: building SGL failed\n", __func__);
+ return -1;
+ }
+
+ if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) {
+ io->timeout = 0;
+ } else if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
+ io->timeout = OCS_CAM_IO_TIMEOUT;
+ } else {
+ io->timeout = ccb->ccb_h.timeout;
+ }
+
+ switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_NONE:
+ rc = ocs_scsi_send_nodata_io(node, io, ccb_h->target_lun,
+ ccb->ccb_h.flags & CAM_CDB_POINTER ?
+ csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes,
+ csio->cdb_len,
+ ocs_scsi_initiator_io_cb, ccb);
+ break;
+ case CAM_DIR_IN:
+ rc = ocs_scsi_send_rd_io(node, io, ccb_h->target_lun,
+ ccb->ccb_h.flags & CAM_CDB_POINTER ?
+ csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes,
+ csio->cdb_len,
+ NULL,
+ sgl, sgl_count, csio->dxfer_len,
+ ocs_scsi_initiator_io_cb, ccb);
+ break;
+ case CAM_DIR_OUT:
+ rc = ocs_scsi_send_wr_io(node, io, ccb_h->target_lun,
+ ccb->ccb_h.flags & CAM_CDB_POINTER ?
+ csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes,
+ csio->cdb_len,
+ NULL,
+ sgl, sgl_count, csio->dxfer_len,
+ ocs_scsi_initiator_io_cb, ccb);
+ break;
+ default:
+ panic("%s invalid data direction %08x\n", __func__,
+ ccb->ccb_h.flags);
+ break;
+ }
+
+ return rc;
+}
+
+static uint32_t
+ocs_fcp_change_role(struct ocs_softc *ocs, ocs_fcport *fcp, uint32_t new_role)
+{
+
+ uint32_t rc = 0, was = 0, i = 0;
+ ocs_vport_spec_t *vport = fcp->vport;
+
+ for (was = 0, i = 0; i < (ocs->num_vports + 1); i++) {
+ if (FCPORT(ocs, i)->role != KNOB_ROLE_NONE)
+ was++;
+ }
+
+ // Physical port
+ if ((was == 0) || (vport == NULL)) {
+ fcp->role = new_role;
+ if (vport == NULL) {
+ ocs->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
+ ocs->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
+ } else {
+ vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
+ vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
+ if (rc) {
+ ocs_log_debug(ocs, "port offline failed : %d\n", rc);
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+ if (rc) {
+ ocs_log_debug(ocs, "port online failed : %d\n", rc);
+ }
+
+ return 0;
+ }
+
+ if ((fcp->role != KNOB_ROLE_NONE)){
+ fcp->role = new_role;
+ vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
+ vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
+ /* New Sport will be created in sport deleted cb */
+ return ocs_sport_vport_del(ocs, ocs->domain, vport->wwpn, vport->wwnn);
+ }
+
+ fcp->role = new_role;
+
+ vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
+ vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
+
+ if (fcp->role != KNOB_ROLE_NONE) {
+ return ocs_sport_vport_alloc(ocs->domain, vport);
+ }
+
+ return (0);
+}
+
+/**
+ * @ingroup cam_api
+ * @brief Process CAM actions
+ *
+ * The driver supplies this routine to the CAM during intialization and
+ * is the main entry point for processing CAM Control Blocks (CCB)
+ *
+ * @param sim pointer to the SCSI Interface Module
+ * @param ccb CAM control block
+ *
+ * @todo
+ * - populate path inquiry data via info retrieved from SLI port
+ */
+static void
+ocs_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct ocs_softc *ocs = (struct ocs_softc *)cam_sim_softc(sim);
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
+
+ int32_t rc, bus;
+ bus = cam_sim_bus(sim);
+
+ switch (ccb_h->func_code) {
+ case XPT_SCSI_IO:
+
+ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
+ if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ }
+ }
+
+ rc = ocs_initiator_io(ocs, ccb);
+ if (0 == rc) {
+ ocs_set_ccb_status(ccb, CAM_REQ_INPROG | CAM_SIM_QUEUED);
+ break;
+ } else {
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+ if (rc > 0) {
+ ocs_set_ccb_status(ccb, rc);
+ } else {
+ ocs_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
+ }
+ }
+ xpt_done(ccb);
+ break;
+ case XPT_PATH_INQ:
+ {
+ struct ccb_pathinq *cpi = &ccb->cpi;
+ struct ccb_pathinq_settings_fc *fc = &cpi->xport_specific.fc;
+
+ uint64_t wwn = 0;
+ ocs_xport_stats_t value;
+
+ cpi->version_num = 1;
+
+ cpi->protocol = PROTO_SCSI;
+ cpi->protocol_version = SCSI_REV_SPC;
+
+ if (ocs->ocs_xport == OCS_XPORT_FC) {
+ cpi->transport = XPORT_FC;
+ } else {
+ cpi->transport = XPORT_UNKNOWN;
+ }
+
+ cpi->transport_version = 0;
+
+ /* Set the transport parameters of the SIM */
+ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value);
+ fc->bitrate = value.value * 1000; /* speed in Mbps */
+
+ wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWPN));
+ fc->wwpn = be64toh(wwn);
+
+ wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs, OCS_SCSI_WWNN));
+ fc->wwnn = be64toh(wwn);
+
+ if (ocs->domain && ocs->domain->attached) {
+ fc->port = ocs->domain->sport->fc_id;
+ }
+
+ if (ocs->config_tgt) {
+ cpi->target_sprt =
+ PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
+ }
+
+ cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
+ cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
+
+ cpi->hba_inquiry = PI_TAG_ABLE;
+ cpi->max_target = OCS_MAX_TARGETS;
+ cpi->initiator_id = ocs->max_remote_nodes + 1;
+
+ if (!ocs->enable_ini) {
+ cpi->hba_misc |= PIM_NOINITIATOR;
+ }
+
+ cpi->max_lun = OCS_MAX_LUN;
+ cpi->bus_id = cam_sim_bus(sim);
+
+ /* Need to supply a base transfer speed prior to linking up
+ * Worst case, this would be FC 1Gbps */
+ cpi->base_transfer_speed = 1 * 1000 * 1000;
+
+ /* Calculate the max IO supported
+ * Worst case would be an OS page per SGL entry */
+ cpi->maxio = PAGE_SIZE *
+ (ocs_scsi_get_property(ocs, OCS_SCSI_MAX_SGL) - 1);
+
+ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(cpi->hba_vid, "Emulex", HBA_IDLEN);
+ strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = cam_sim_unit(sim);
+
+ cpi->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ struct ccb_trans_settings *cts = &ccb->cts;
+ struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
+ struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc;
+ ocs_xport_stats_t value;
+ ocs_node_t *fnode = NULL;
+
+ if (ocs->ocs_xport != OCS_XPORT_FC) {
+ ocs_set_ccb_status(ccb, CAM_REQ_INVALID);
+ xpt_done(ccb);
+ break;
+ }
+
+ fnode = ocs_node_get_instance(ocs, cts->ccb_h.target_id);
+ if (fnode == NULL) {
+ ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
+ xpt_done(ccb);
+ break;
+ }
+
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_SPC2;
+ cts->transport = XPORT_FC;
+ cts->transport_version = 2;
+
+ scsi->valid = CTS_SCSI_VALID_TQ;
+ scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
+
+ /* speed in Mbps */
+ ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value);
+ fc->bitrate = value.value * 100;
+
+ fc->wwpn = ocs_node_get_wwpn(fnode);
+
+ fc->wwnn = ocs_node_get_wwnn(fnode);
+
+ fc->port = fnode->rnode.fc_id;
+
+ fc->valid = CTS_FC_VALID_SPEED |
+ CTS_FC_VALID_WWPN |
+ CTS_FC_VALID_WWNN |
+ CTS_FC_VALID_PORT;
+
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_SET_TRAN_SETTINGS:
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ xpt_done(ccb);
+ break;
+
+ case XPT_CALC_GEOMETRY:
+ cam_calc_geometry(&ccb->ccg, TRUE);
+ xpt_done(ccb);
+ break;
+
+ case XPT_GET_SIM_KNOB:
+ {
+ struct ccb_sim_knob *knob = &ccb->knob;
+ uint64_t wwn = 0;
+ ocs_fcport *fcp = FCPORT(ocs, bus);
+
+ if (ocs->ocs_xport != OCS_XPORT_FC) {
+ ocs_set_ccb_status(ccb, CAM_REQ_INVALID);
+ xpt_done(ccb);
+ break;
+ }
+
+ if (bus == 0) {
+ wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs,
+ OCS_SCSI_WWNN));
+ knob->xport_specific.fc.wwnn = be64toh(wwn);
+
+ wwn = *((uint64_t *)ocs_scsi_get_property_ptr(ocs,
+ OCS_SCSI_WWPN));
+ knob->xport_specific.fc.wwpn = be64toh(wwn);
+ } else {
+ knob->xport_specific.fc.wwnn = fcp->vport->wwnn;
+ knob->xport_specific.fc.wwpn = fcp->vport->wwpn;
+ }
+
+ knob->xport_specific.fc.role = fcp->role;
+ knob->xport_specific.fc.valid = KNOB_VALID_ADDRESS |
+ KNOB_VALID_ROLE;
+
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_SET_SIM_KNOB:
+ {
+ struct ccb_sim_knob *knob = &ccb->knob;
+ bool role_changed = FALSE;
+ ocs_fcport *fcp = FCPORT(ocs, bus);
+
+ if (ocs->ocs_xport != OCS_XPORT_FC) {
+ ocs_set_ccb_status(ccb, CAM_REQ_INVALID);
+ xpt_done(ccb);
+ break;
+ }
+
+ if (knob->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
+ device_printf(ocs->dev,
+ "%s: XPT_SET_SIM_KNOB wwnn=%llx wwpn=%llx\n",
+ __func__,
+ (unsigned long long)knob->xport_specific.fc.wwnn,
+ (unsigned long long)knob->xport_specific.fc.wwpn);
+ }
+
+ if (knob->xport_specific.fc.valid & KNOB_VALID_ROLE) {
+ switch (knob->xport_specific.fc.role) {
+ case KNOB_ROLE_NONE:
+ if (fcp->role != KNOB_ROLE_NONE) {
+ role_changed = TRUE;
+ }
+ break;
+ case KNOB_ROLE_TARGET:
+ if (fcp->role != KNOB_ROLE_TARGET) {
+ role_changed = TRUE;
+ }
+ break;
+ case KNOB_ROLE_INITIATOR:
+ if (fcp->role != KNOB_ROLE_INITIATOR) {
+ role_changed = TRUE;
+ }
+ break;
+ case KNOB_ROLE_BOTH:
+ if (fcp->role != KNOB_ROLE_BOTH) {
+ role_changed = TRUE;
+ }
+ break;
+ default:
+ device_printf(ocs->dev,
+ "%s: XPT_SET_SIM_KNOB unsupported role: %d\n",
+ __func__, knob->xport_specific.fc.role);
+ }
+
+ if (role_changed) {
+ device_printf(ocs->dev,
+ "BUS:%d XPT_SET_SIM_KNOB old_role: %d new_role: %d\n",
+ bus, fcp->role, knob->xport_specific.fc.role);
+
+ ocs_fcp_change_role(ocs, fcp, knob->xport_specific.fc.role);
+ }
+ }
+
+
+
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_ABORT:
+ {
+ union ccb *accb = ccb->cab.abort_ccb;
+
+ switch (accb->ccb_h.func_code) {
+ case XPT_ACCEPT_TARGET_IO:
+ ocs_abort_atio(ocs, ccb);
+ break;
+ case XPT_IMMEDIATE_NOTIFY:
+ ocs_abort_inot(ocs, ccb);
+ break;
+ case XPT_SCSI_IO:
+ rc = ocs_abort_initiator_io(ocs, accb);
+ if (rc) {
+ ccb->ccb_h.status = CAM_UA_ABORT;
+ } else {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ }
+
+ break;
+ default:
+ printf("abort of unknown func %#x\n",
+ accb->ccb_h.func_code);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ break;
+ }
+ break;
+ }
+ case XPT_RESET_BUS:
+ if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE) == 0) {
+ ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ } else {
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ }
+ xpt_done(ccb);
+ break;
+ case XPT_RESET_DEV:
+ {
+ ocs_node_t *node = NULL;
+ ocs_io_t *io = NULL;
+ int32_t rc = 0;
+
+ node = ocs_node_get_instance(ocs, ccb_h->target_id);
+ if (node == NULL) {
+ device_printf(ocs->dev, "%s: no device %d\n",
+ __func__, ccb_h->target_id);
+ ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
+ xpt_done(ccb);
+ break;
+ }
+
+ io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR);
+ if (io == NULL) {
+ device_printf(ocs->dev, "%s: unable to alloc IO\n",
+ __func__);
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ xpt_done(ccb);
+ break;
+ }
+
+ rc = ocs_scsi_send_tmf(node, io, NULL, ccb_h->target_lun,
+ OCS_SCSI_TMF_LOGICAL_UNIT_RESET,
+ NULL, 0, 0, /* sgl, sgl_count, length */
+ ocs_initiator_tmf_cb, NULL/*arg*/);
+
+ if (rc) {
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ } else {
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ }
+
+ if (node->fcp2device) {
+ ocs_reset_crn(node, ccb_h->target_lun);
+ }
+
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_EN_LUN: /* target support */
+ {
+ ocs_tgt_resource_t *trsrc = NULL;
+ uint32_t status = 0;
+ ocs_fcport *fcp = FCPORT(ocs, bus);
+
+ device_printf(ocs->dev, "XPT_EN_LUN %sable %d:%d\n",
+ ccb->cel.enable ? "en" : "dis",
+ ccb->ccb_h.target_id,
+ (unsigned int)ccb->ccb_h.target_lun);
+
+ trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status);
+ if (trsrc) {
+ trsrc->enabled = ccb->cel.enable;
+
+ /* Abort all ATIO/INOT on LUN disable */
+ if (trsrc->enabled == FALSE) {
+ ocs_tgt_resource_abort(ocs, trsrc);
+ } else {
+ STAILQ_INIT(&trsrc->atio);
+ STAILQ_INIT(&trsrc->inot);
+ }
+ status = CAM_REQ_CMP;
+ }
+
+ ocs_set_ccb_status(ccb, status);
+ xpt_done(ccb);
+ break;
+ }
+ /*
+ * The flow of target IOs in CAM is:
+ * - CAM supplies a number of CCBs to the driver used for received
+ * commands.
+ * - when the driver receives a command, it copies the relevant
+ * information to the CCB and returns it to the CAM using xpt_done()
+ * - after the target server processes the request, it creates
+ * a new CCB containing information on how to continue the IO and
+ * passes that to the driver
+ * - the driver processes the "continue IO" (a.k.a CTIO) CCB
+ * - once the IO completes, the driver returns the CTIO to the CAM
+ * using xpt_done()
+ */
+ case XPT_ACCEPT_TARGET_IO: /* used to inform upper layer of
+ received CDB (a.k.a. ATIO) */
+ case XPT_IMMEDIATE_NOTIFY: /* used to inform upper layer of other
+ event (a.k.a. INOT) */
+ {
+ ocs_tgt_resource_t *trsrc = NULL;
+ uint32_t status = 0;
+ ocs_fcport *fcp = FCPORT(ocs, bus);
+
+ /*printf("XPT_%s %p\n", ccb_h->func_code == XPT_ACCEPT_TARGET_IO ?
+ "ACCEPT_TARGET_IO" : "IMMEDIATE_NOTIFY", ccb);*/
+ trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status);
+ if (trsrc == NULL) {
+ ocs_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
+ xpt_done(ccb);
+ break;
+ }
+
+ if (XPT_ACCEPT_TARGET_IO == ccb->ccb_h.func_code) {
+ struct ccb_accept_tio *atio = NULL;
+
+ atio = (struct ccb_accept_tio *)ccb;
+ atio->init_id = 0x0badbeef;
+ atio->tag_id = 0xdeadc0de;
+
+ STAILQ_INSERT_TAIL(&trsrc->atio, &ccb->ccb_h,
+ sim_links.stqe);
+ } else {
+ STAILQ_INSERT_TAIL(&trsrc->inot, &ccb->ccb_h,
+ sim_links.stqe);
+ }
+ ccb->ccb_h.ccb_io_ptr = NULL;
+ ccb->ccb_h.ccb_ocs_ptr = ocs;
+ ocs_set_ccb_status(ccb, CAM_REQ_INPROG);
+ /*
+ * These actions give resources to the target driver.
+ * If we didn't return here, this function would call
+ * xpt_done(), signaling to the upper layers that an
+ * IO or other event had arrived.
+ */
+ break;
+ }
+ case XPT_NOTIFY_ACKNOWLEDGE:
+ {
+ ocs_io_t *io = NULL;
+ ocs_io_t *abortio = NULL;
+
+ /* Get the IO reference for this tag */
+ io = ocs_scsi_find_io(ocs, ccb->cna2.tag_id);
+ if (io == NULL) {
+ device_printf(ocs->dev,
+ "%s: XPT_NOTIFY_ACKNOWLEDGE no IO with tag %#x\n",
+ __func__, ccb->cna2.tag_id);
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
+ xpt_done(ccb);
+ break;
+ }
+
+ abortio = io->tgt_io.app;
+ if (abortio) {
+ abortio->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_NOTIFY;
+ device_printf(ocs->dev,
+ "%s: XPT_NOTIFY_ACK state=%d tag=%#x xid=%#x"
+ " flags=%#x\n", __func__, abortio->tgt_io.state,
+ abortio->tag, abortio->init_task_tag,
+ abortio->tgt_io.flags);
+ /* TMF response was sent in abort callback */
+ } else {
+ ocs_scsi_send_tmf_resp(io,
+ OCS_SCSI_TMF_FUNCTION_COMPLETE,
+ NULL, ocs_target_tmf_cb, NULL);
+ }
+
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_CONT_TARGET_IO: /* continue target IO, sending data/response (a.k.a. CTIO) */
+ if (ocs_target_io(ocs, ccb)) {
+ device_printf(ocs->dev,
+ "XPT_CONT_TARGET_IO failed flags=%x tag=%#x\n",
+ ccb->ccb_h.flags, ccb->csio.tag_id);
+ xpt_done(ccb);
+ }
+ break;
+ default:
+ device_printf(ocs->dev, "unhandled func_code = %#x\n",
+ ccb_h->func_code);
+ ccb_h->status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ }
+}
+
+/**
+ * @ingroup cam_api
+ * @brief Process events
+ *
+ * @param sim pointer to the SCSI Interface Module
+ *
+ */
+static void
+ocs_poll(struct cam_sim *sim)
+{
+ printf("%s\n", __func__);
+}
+
+static int32_t
+ocs_initiator_tmf_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status,
+ ocs_scsi_cmd_resp_t *rsp, uint32_t flags, void *arg)
+{
+ int32_t rc = 0;
+
+ switch (scsi_status) {
+ case OCS_SCSI_STATUS_GOOD:
+ case OCS_SCSI_STATUS_NO_IO:
+ break;
+ case OCS_SCSI_STATUS_CHECK_RESPONSE:
+ if (rsp->response_data_length == 0) {
+ ocs_log_test(io->ocs, "check response without data?!?\n");
+ rc = -1;
+ break;
+ }
+
+ if (rsp->response_data[3] != 0) {
+ ocs_log_test(io->ocs, "TMF status %08x\n",
+ be32toh(*((uint32_t *)rsp->response_data)));
+ rc = -1;
+ break;
+ }
+ break;
+ default:
+ ocs_log_test(io->ocs, "status=%#x\n", scsi_status);
+ rc = -1;
+ }
+
+ ocs_scsi_io_free(io);
+
+ return rc;
+}
+
+/**
+ * @brief lookup target resource structure
+ *
+ * Arbitrarily support
+ * - wildcard target ID + LU
+ * - 0 target ID + non-wildcard LU
+ *
+ * @param ocs the driver instance's software context
+ * @param ccb_h pointer to the CCB header
+ * @param status returned status value
+ *
+ * @return pointer to the target resource, NULL if none available (e.g. if LU
+ * is not enabled)
+ */
+static ocs_tgt_resource_t *ocs_tgt_resource_get(ocs_fcport *fcp,
+ struct ccb_hdr *ccb_h, uint32_t *status)
+{
+ target_id_t tid = ccb_h->target_id;
+ lun_id_t lun = ccb_h->target_lun;
+
+ if (CAM_TARGET_WILDCARD == tid) {
+ if (CAM_LUN_WILDCARD != lun) {
+ *status = CAM_LUN_INVALID;
+ return NULL;
+ }
+ return &fcp->targ_rsrc_wildcard;
+ } else {
+ if (lun < OCS_MAX_LUN) {
+ return &fcp->targ_rsrc[lun];
+ } else {
+ *status = CAM_LUN_INVALID;
+ return NULL;
+ }
+ }
+
+}
+
+static int32_t
+ocs_tgt_resource_abort(struct ocs_softc *ocs, ocs_tgt_resource_t *trsrc)
+{
+ union ccb *ccb = NULL;
+ uint32_t count;
+
+ count = 0;
+ do {
+ ccb = (union ccb *)STAILQ_FIRST(&trsrc->atio);
+ if (ccb) {
+ STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe);
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+ xpt_done(ccb);
+ count++;
+ }
+ } while (ccb);
+
+ count = 0;
+ do {
+ ccb = (union ccb *)STAILQ_FIRST(&trsrc->inot);
+ if (ccb) {
+ STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe);
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+ xpt_done(ccb);
+ count++;
+ }
+ } while (ccb);
+
+ return 0;
+}
+
+static void
+ocs_abort_atio(struct ocs_softc *ocs, union ccb *ccb)
+{
+
+ ocs_io_t *aio = NULL;
+ ocs_tgt_resource_t *trsrc = NULL;
+ uint32_t status = CAM_REQ_INVALID;
+ struct ccb_hdr *cur = NULL;
+ union ccb *accb = ccb->cab.abort_ccb;
+
+ int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path));
+ ocs_fcport *fcp = FCPORT(ocs, bus);
+
+ trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status);
+ if (trsrc != NULL) {
+ STAILQ_FOREACH(cur, &trsrc->atio, sim_links.stqe) {
+ if (cur != &accb->ccb_h)
+ continue;
+
+ STAILQ_REMOVE(&trsrc->atio, cur, ccb_hdr,
+ sim_links.stqe);
+ accb->ccb_h.status = CAM_REQ_ABORTED;
+ xpt_done(accb);
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ return;
+ }
+ }
+
+ /* if the ATIO has a valid IO pointer, CAM is telling
+ * the driver that the ATIO (which represents the entire
+ * exchange) has been aborted. */
+
+ aio = accb->ccb_h.ccb_io_ptr;
+ if (aio == NULL) {
+ ccb->ccb_h.status = CAM_UA_ABORT;
+ return;
+ }
+
+ device_printf(ocs->dev,
+ "%s: XPT_ABORT ATIO state=%d tag=%#x"
+ " xid=%#x flags=%#x\n", __func__,
+ aio->tgt_io.state, aio->tag,
+ aio->init_task_tag, aio->tgt_io.flags);
+ /* Expectations are:
+ * - abort task was received
+ * - already aborted IO in the DEVICE
+ * - already received NOTIFY ACKNOWLEDGE */
+
+ if ((aio->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) == 0) {
+ device_printf(ocs->dev, "%s: abort not received or io completed \n", __func__);
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ return;
+ }
+
+ aio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM;
+ ocs_target_io_free(aio);
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+
+ return;
+}
+
+static void
+ocs_abort_inot(struct ocs_softc *ocs, union ccb *ccb)
+{
+ ocs_tgt_resource_t *trsrc = NULL;
+ uint32_t status = CAM_REQ_INVALID;
+ struct ccb_hdr *cur = NULL;
+ union ccb *accb = ccb->cab.abort_ccb;
+
+ int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path));
+ ocs_fcport *fcp = FCPORT(ocs, bus);
+
+ trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status);
+ if (trsrc) {
+ STAILQ_FOREACH(cur, &trsrc->inot, sim_links.stqe) {
+ if (cur != &accb->ccb_h)
+ continue;
+
+ STAILQ_REMOVE(&trsrc->inot, cur, ccb_hdr,
+ sim_links.stqe);
+ accb->ccb_h.status = CAM_REQ_ABORTED;
+ xpt_done(accb);
+ ocs_set_ccb_status(ccb, CAM_REQ_CMP);
+ return;
+ }
+ }
+
+ ocs_set_ccb_status(ccb, CAM_UA_ABORT);
+ return;
+}
+
+static uint32_t
+ocs_abort_initiator_io(struct ocs_softc *ocs, union ccb *accb)
+{
+
+ ocs_node_t *node = NULL;
+ ocs_io_t *io = NULL;
+ int32_t rc = 0;
+ struct ccb_scsiio *csio = &accb->csio;
+
+ node = ocs_node_get_instance(ocs, accb->ccb_h.target_id);
+ if (node == NULL) {
+ device_printf(ocs->dev, "%s: no device %d\n",
+ __func__, accb->ccb_h.target_id);
+ ocs_set_ccb_status(accb, CAM_DEV_NOT_THERE);
+ xpt_done(accb);
+ return (-1);
+ }
+
+ io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_ORIGINATOR);
+ if (io == NULL) {
+ device_printf(ocs->dev,
+ "%s: unable to alloc IO\n", __func__);
+ ocs_set_ccb_status(accb, CAM_REQ_CMP_ERR);
+ xpt_done(accb);
+ return (-1);
+ }
+
+ rc = ocs_scsi_send_tmf(node, io,
+ (ocs_io_t *)csio->ccb_h.ccb_io_ptr,
+ accb->ccb_h.target_lun,
+ OCS_SCSI_TMF_ABORT_TASK,
+ NULL, 0, 0,
+ ocs_initiator_tmf_cb, NULL/*arg*/);
+
+ return rc;
+}
+
+void
+ocs_scsi_ini_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj)
+{
+ switch(type) {
+ case OCS_SCSI_DDUMP_DEVICE: {
+ //ocs_t *ocs = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_DOMAIN: {
+ //ocs_domain_t *domain = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_SPORT: {
+ //ocs_sport_t *sport = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_NODE: {
+ //ocs_node_t *node = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_IO: {
+ //ocs_io_t *io = obj;
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+}
+
+void
+ocs_scsi_tgt_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj)
+{
+ switch(type) {
+ case OCS_SCSI_DDUMP_DEVICE: {
+ //ocs_t *ocs = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_DOMAIN: {
+ //ocs_domain_t *domain = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_SPORT: {
+ //ocs_sport_t *sport = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_NODE: {
+ //ocs_node_t *node = obj;
+ break;
+ }
+ case OCS_SCSI_DDUMP_IO: {
+ ocs_io_t *io = obj;
+ char *state_str = NULL;
+
+ switch (io->tgt_io.state) {
+ case OCS_CAM_IO_FREE:
+ state_str = "FREE";
+ break;
+ case OCS_CAM_IO_COMMAND:
+ state_str = "COMMAND";
+ break;
+ case OCS_CAM_IO_DATA:
+ state_str = "DATA";
+ break;
+ case OCS_CAM_IO_DATA_DONE:
+ state_str = "DATA_DONE";
+ break;
+ case OCS_CAM_IO_RESP:
+ state_str = "RESP";
+ break;
+ default:
+ state_str = "xxx BAD xxx";
+ }
+ ocs_ddump_value(textbuf, "cam_st", "%s", state_str);
+ if (io->tgt_io.app) {
+ ocs_ddump_value(textbuf, "cam_flags", "%#x",
+ ((union ccb *)(io->tgt_io.app))->ccb_h.flags);
+ ocs_ddump_value(textbuf, "cam_status", "%#x",
+ ((union ccb *)(io->tgt_io.app))->ccb_h.status);
+ }
+
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+}
+
+int32_t ocs_scsi_get_block_vaddr(ocs_io_t *io, uint64_t blocknumber,
+ ocs_scsi_vaddr_len_t addrlen[],
+ uint32_t max_addrlen, void **dif_vaddr)
+{
+ return -1;
+}
+
+uint32_t
+ocs_get_crn(ocs_node_t *node, uint8_t *crn, uint64_t lun)
+{
+ uint32_t idx;
+ struct ocs_lun_crn *lcrn = NULL;
+ idx = lun % OCS_MAX_LUN;
+
+ lcrn = node->ini_node.lun_crn[idx];
+
+ if (lcrn == NULL) {
+ lcrn = ocs_malloc(node->ocs, sizeof(struct ocs_lun_crn),
+ M_ZERO|M_NOWAIT);
+ if (lcrn == NULL) {
+ return (1);
+ }
+
+ lcrn->lun = lun;
+ node->ini_node.lun_crn[idx] = lcrn;
+ }
+
+ if (lcrn->lun != lun) {
+ return (1);
+ }
+
+ if (lcrn->crnseed == 0)
+ lcrn->crnseed = 1;
+
+ *crn = lcrn->crnseed++;
+ return (0);
+}
+
+void
+ocs_del_crn(ocs_node_t *node)
+{
+ uint32_t i;
+ struct ocs_lun_crn *lcrn = NULL;
+
+ for(i = 0; i < OCS_MAX_LUN; i++) {
+ lcrn = node->ini_node.lun_crn[i];
+ if (lcrn) {
+ ocs_free(node->ocs, lcrn, sizeof(*lcrn));
+ }
+ }
+
+ return;
+}
+
+void
+ocs_reset_crn(ocs_node_t *node, uint64_t lun)
+{
+ uint32_t idx;
+ struct ocs_lun_crn *lcrn = NULL;
+ idx = lun % OCS_MAX_LUN;
+
+ lcrn = node->ini_node.lun_crn[idx];
+ if (lcrn)
+ lcrn->crnseed = 0;
+
+ return;
+}
diff --git a/sys/dev/ocs_fc/ocs_cam.h b/sys/dev/ocs_fc/ocs_cam.h
new file mode 100644
index 000000000000..2de549fe8f6e
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_cam.h
@@ -0,0 +1,122 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __OCS_CAM_H__
+#define __OCS_CAM_H__
+
+#include <cam/cam.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_sim.h>
+
+#include <cam/scsi/scsi_message.h>
+
+
+#define ccb_ocs_ptr spriv_ptr0
+#define ccb_io_ptr spriv_ptr1
+
+typedef STAILQ_HEAD(ocs_hdr_list_s, ccb_hdr) ocs_hdr_list_t;
+
+typedef struct ocs_tgt_resource_s {
+ ocs_hdr_list_t atio;
+ ocs_hdr_list_t inot;
+ uint8_t enabled;
+
+ lun_id_t lun;
+} ocs_tgt_resource_t;
+
+/* Common SCSI Domain structure declarations */
+
+typedef struct {
+} ocs_scsi_tgt_domain_t;
+
+typedef struct {
+} ocs_scsi_ini_domain_t;
+
+/* Common SCSI SLI port structure declarations */
+
+typedef struct {
+} ocs_scsi_tgt_sport_t;
+
+typedef struct {
+} ocs_scsi_ini_sport_t;
+
+/* Common IO structure declarations */
+
+typedef enum {
+ OCS_CAM_IO_FREE, /* IO unused (SIM) */
+ OCS_CAM_IO_COMMAND, /* ATIO returned to BE (CTL) */
+ OCS_CAM_IO_DATA, /* data phase (SIM) */
+ OCS_CAM_IO_DATA_DONE, /* CTIO returned to BE (CTL) */
+ OCS_CAM_IO_RESP, /* send response (SIM) */
+ OCS_CAM_IO_MAX
+} ocs_cam_io_state_t;
+
+typedef struct {
+ bus_dmamap_t dmap;
+ uint64_t lun; /* target_lun */
+ void *app; /** application specific pointer */
+ ocs_cam_io_state_t state;
+ bool sendresp;
+ uint32_t flags;
+#define OCS_CAM_IO_F_DMAPPED BIT(0) /* associated buffer bus_dmamap'd */
+#define OCS_CAM_IO_F_ABORT_RECV BIT(1) /* received ABORT TASK */
+#define OCS_CAM_IO_F_ABORT_DEV BIT(2) /* abort WQE pending */
+#define OCS_CAM_IO_F_ABORT_TMF BIT(3) /* TMF response sent */
+#define OCS_CAM_IO_F_ABORT_NOTIFY BIT(4) /* XPT_NOTIFY sent to CTL */
+#define OCS_CAM_IO_F_ABORT_CAM BIT(5) /* received ABORT or CTIO from CAM */
+} ocs_scsi_tgt_io_t;
+
+typedef struct {
+} ocs_scsi_ini_io_t;
+
+struct ocs_lun_crn {
+ uint64_t lun; /* target_lun */
+ uint8_t crnseed; /* next command reference number */
+};
+
+/* Common NODE structure declarations */
+typedef struct {
+ struct ocs_lun_crn *lun_crn[OCS_MAX_LUN];
+} ocs_scsi_ini_node_t;
+
+typedef struct {
+ uint32_t busy_sent;
+} ocs_scsi_tgt_node_t;
+
+extern int32_t ocs_cam_attach(ocs_t *ocs);
+extern int32_t ocs_cam_detach(ocs_t *ocs);
+
+#endif /* __OCS_CAM_H__ */
+
diff --git a/sys/dev/ocs_fc/ocs_common.h b/sys/dev/ocs_fc/ocs_common.h
new file mode 100644
index 000000000000..0d34a2d14704
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_common.h
@@ -0,0 +1,424 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Contains declarations shared between the alex layer and HW/SLI4
+ */
+
+
+#if !defined(__OCS_COMMON_H__)
+#define __OCS_COMMON_H__
+
+#include "ocs_sm.h"
+#include "ocs_utils.h"
+
+#define OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TSEND (1U << 0)
+#define OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TRECEIVE (1U << 1)
+#define OCS_CTRLMASK_XPORT_ENABLE_TARGET_RSCN (1U << 3)
+#define OCS_CTRLMASK_TGT_ALWAYS_VERIFY_DIF (1U << 4)
+#define OCS_CTRLMASK_TGT_SET_DIF_REF_TAG_CRC (1U << 5)
+#define OCS_CTRLMASK_TEST_CHAINED_SGLS (1U << 6)
+#define OCS_CTRLMASK_ISCSI_ISNS_ENABLE (1U << 7)
+#define OCS_CTRLMASK_ENABLE_FABRIC_EMULATION (1U << 8)
+#define OCS_CTRLMASK_INHIBIT_INITIATOR (1U << 9)
+#define OCS_CTRLMASK_CRASH_RESET (1U << 10)
+
+#define enable_target_rscn(ocs) \
+ ((ocs->ctrlmask & OCS_CTRLMASK_XPORT_ENABLE_TARGET_RSCN) != 0)
+
+/* Used for error injection testing. */
+typedef enum {
+ NO_ERR_INJECT = 0,
+ INJECT_DROP_CMD,
+ INJECT_FREE_DROPPED,
+ INJECT_DROP_DATA,
+ INJECT_DROP_RESP,
+ INJECT_DELAY_CMD,
+} ocs_err_injection_e;
+
+#define MAX_OCS_DEVICES 64
+
+typedef enum {OCS_XPORT_FC, OCS_XPORT_ISCSI} ocs_xport_e;
+
+#define OCS_SERVICE_PARMS_LENGTH 0x74
+#define OCS_DISPLAY_NAME_LENGTH 32
+#define OCS_DISPLAY_BUS_INFO_LENGTH 16
+
+#define OCS_WWN_LENGTH 32
+
+typedef struct ocs_hw_s ocs_hw_t;
+typedef struct ocs_domain_s ocs_domain_t;
+typedef struct ocs_sli_port_s ocs_sli_port_t;
+typedef struct ocs_sli_port_s ocs_sport_t;
+typedef struct ocs_remote_node_s ocs_remote_node_t;
+typedef struct ocs_remote_node_group_s ocs_remote_node_group_t;
+typedef struct ocs_node_s ocs_node_t;
+typedef struct ocs_io_s ocs_io_t;
+typedef struct ocs_xport_s ocs_xport_t;
+typedef struct ocs_node_cb_s ocs_node_cb_t;
+typedef struct ocs_ns_s ocs_ns_t;
+
+/* Node group data structure */
+typedef struct ocs_node_group_dir_s ocs_node_group_dir_t;
+
+#include "ocs_cam.h"
+
+/*--------------------------------------------------
+ * Shared HW/SLI objects
+ *
+ * Several objects used by the HW/SLI layers are communal; part of the
+ * object is for the sole use of the lower layers, but implementations
+ * are free to add their own fields if desired.
+ */
+
+/**
+ * @brief Description of discovered Fabric Domain
+ *
+ * @note Not all fields are valid for all mediums (FC/ethernet).
+ */
+typedef struct ocs_domain_record_s {
+ uint32_t index; /**< FCF table index (used in REG_FCFI) */
+ uint32_t priority; /**< FCF reported priority */
+ uint8_t address[6]; /**< Switch MAC/FC address */
+ uint8_t wwn[8]; /**< Switch WWN */
+ union {
+ uint8_t vlan[512]; /**< bitmap of valid VLAN IDs */
+ uint8_t loop[128]; /**< FC-AL position map */
+ } map;
+ uint32_t speed; /**< link speed */
+ uint32_t fc_id; /**< our ports fc_id */
+ uint32_t is_fc:1, /**< Connection medium is native FC */
+ is_ethernet:1, /**< Connection medium is ethernet (FCoE) */
+ is_loop:1, /**< Topology is FC-AL */
+ is_nport:1, /**< Topology is N-PORT */
+ :28;
+} ocs_domain_record_t;
+
+/**
+ * @brief Node group directory entry
+ */
+struct ocs_node_group_dir_s {
+ uint32_t instance_index; /*<< instance index */
+ ocs_sport_t *sport; /*<< pointer to sport */
+ uint8_t service_params[OCS_SERVICE_PARMS_LENGTH]; /**< Login parameters */
+ ocs_list_link_t link; /**< linked list link */
+ ocs_list_t node_group_list; /**< linked list of node groups */
+ uint32_t node_group_list_count; /**< current number of elements on the node group list */
+ uint32_t next_idx; /*<< index of the next node group in list */
+};
+
+typedef enum {
+ OCS_SPORT_TOPOLOGY_UNKNOWN=0,
+ OCS_SPORT_TOPOLOGY_FABRIC,
+ OCS_SPORT_TOPOLOGY_P2P,
+ OCS_SPORT_TOPOLOGY_LOOP,
+} ocs_sport_topology_e;
+
+/**
+ * @brief SLI Port object
+ *
+ * The SLI Port object represents the connection between the driver and the
+ * FC/FCoE domain. In some topologies / hardware, it is possible to have
+ * multiple connections to the domain via different WWN. Each would require
+ * a separate SLI port object.
+ */
+struct ocs_sli_port_s {
+
+ ocs_t *ocs; /**< pointer to ocs */
+ uint32_t tgt_id; /**< target id */
+ uint32_t index; /**< ??? */
+ uint32_t instance_index;
+ char display_name[OCS_DISPLAY_NAME_LENGTH]; /**< sport display name */
+ ocs_domain_t *domain; /**< current fabric domain */
+ uint32_t is_vport:1; /**< this SPORT is a virtual port */
+ uint64_t wwpn; /**< WWPN from HW (host endian) */
+ uint64_t wwnn; /**< WWNN from HW (host endian) */
+ ocs_list_t node_list; /**< list of nodes */
+ ocs_scsi_ini_sport_t ini_sport; /**< initiator backend private sport data */
+ ocs_scsi_tgt_sport_t tgt_sport; /**< target backend private sport data */
+ void *tgt_data; /**< target backend private pointer */
+ void *ini_data; /**< initiator backend private pointer */
+ ocs_mgmt_functions_t *mgmt_functions;
+
+ /*
+ * Members private to HW/SLI
+ */
+ ocs_sm_ctx_t ctx; /**< state machine context */
+ ocs_hw_t *hw; /**< pointer to HW */
+ uint32_t indicator; /**< VPI */
+ uint32_t fc_id; /**< FC address */
+ ocs_dma_t dma; /**< memory for Service Parameters */
+
+ uint8_t wwnn_str[OCS_WWN_LENGTH]; /**< WWN (ASCII) */
+ uint64_t sli_wwpn; /**< WWPN (wire endian) */
+ uint64_t sli_wwnn; /**< WWNN (wire endian) */
+ uint32_t sm_free_req_pending:1; /**< Free request received while waiting for attach response */
+
+ /*
+ * Implementation specific fields allowed here
+ */
+ ocs_sm_ctx_t sm; /**< sport context state machine */
+ sparse_vector_t lookup; /**< fc_id to node lookup object */
+ ocs_list_link_t link;
+ uint32_t enable_ini:1, /**< SCSI initiator enabled for this node */
+ enable_tgt:1, /**< SCSI target enabled for this node */
+ enable_rscn:1, /**< This SPORT will be expecting RSCN */
+ shutting_down:1, /**< sport in process of shutting down */
+ p2p_winner:1; /**< TRUE if we're the point-to-point winner */
+ ocs_sport_topology_e topology; /**< topology: fabric/p2p/unknown */
+ uint8_t service_params[OCS_SERVICE_PARMS_LENGTH]; /**< Login parameters */
+ uint32_t p2p_remote_port_id; /**< Remote node's port id for p2p */
+ uint32_t p2p_port_id; /**< our port's id */
+
+ /* List of remote node group directory entries (used by high login mode) */
+ ocs_lock_t node_group_lock;
+ uint32_t node_group_dir_next_instance; /**< HLM next node group directory instance value */
+ uint32_t node_group_next_instance; /**< HLM next node group instance value */
+ ocs_list_t node_group_dir_list;
+};
+
+/**
+ * @brief Fibre Channel domain object
+ *
+ * This object is a container for the various SLI components needed
+ * to connect to the domain of a FC or FCoE switch
+ */
+struct ocs_domain_s {
+
+ ocs_t *ocs; /**< pointer back to ocs */
+ uint32_t instance_index; /**< unique instance index value */
+ char display_name[OCS_DISPLAY_NAME_LENGTH]; /**< Node display name */
+ ocs_list_t sport_list; /**< linked list of SLI ports */
+ ocs_scsi_ini_domain_t ini_domain; /**< initiator backend private domain data */
+ ocs_scsi_tgt_domain_t tgt_domain; /**< target backend private domain data */
+ ocs_mgmt_functions_t *mgmt_functions;
+
+ /* Declarations private to HW/SLI */
+ ocs_hw_t *hw; /**< pointer to HW */
+ ocs_sm_ctx_t sm; /**< state machine context */
+ uint32_t fcf; /**< FC Forwarder table index */
+ uint32_t fcf_indicator; /**< FCFI */
+ uint32_t vlan_id; /**< VLAN tag for this domain */
+ uint32_t indicator; /**< VFI */
+ ocs_dma_t dma; /**< memory for Service Parameters */
+ uint32_t req_rediscover_fcf:1; /**< TRUE if fcf rediscover is needed (in response
+ * to Vlink Clear async event */
+
+ /* Declarations private to FC transport */
+ uint64_t fcf_wwn; /**< WWN for FCF/switch */
+ ocs_list_link_t link;
+ ocs_sm_ctx_t drvsm; /**< driver domain sm context */
+ uint32_t attached:1, /**< set true after attach completes */
+ is_fc:1, /**< is FC */
+ is_loop:1, /**< is loop topology */
+ is_nlport:1, /**< is public loop */
+ domain_found_pending:1, /**< A domain found is pending, drec is updated */
+ req_domain_free:1, /**< True if domain object should be free'd */
+ req_accept_frames:1, /**< set in domain state machine to enable frames */
+ domain_notify_pend:1; /** Set in domain SM to avoid duplicate node event post */
+ ocs_domain_record_t pending_drec; /**< Pending drec if a domain found is pending */
+ uint8_t service_params[OCS_SERVICE_PARMS_LENGTH]; /**< any sports service parameters */
+ uint8_t flogi_service_params[OCS_SERVICE_PARMS_LENGTH]; /**< Fabric/P2p service parameters from FLOGI */
+ uint8_t femul_enable; /**< TRUE if Fabric Emulation mode is enabled */
+
+ /* Declarations shared with back-ends */
+ sparse_vector_t lookup; /**< d_id to node lookup object */
+ ocs_lock_t lookup_lock;
+
+ ocs_sli_port_t *sport; /**< Pointer to first (physical) SLI port (also at the head of sport_list) */
+ uint32_t sport_instance_count; /**< count of sport instances */
+
+ /* Fabric Emulation */
+ ocs_bitmap_t *portid_pool;
+ ocs_ns_t *ocs_ns; /*>> Directory(Name) services data */
+};
+
+/**
+ * @brief Remote Node object
+ *
+ * This object represents a connection between the SLI port and another
+ * Nx_Port on the fabric. Note this can be either a well known port such
+ * as a F_Port (i.e. ff:ff:fe) or another N_Port.
+ */
+struct ocs_remote_node_s {
+ /*
+ * Members private to HW/SLI
+ */
+ uint32_t indicator; /**< RPI */
+ uint32_t index;
+ uint32_t fc_id; /**< FC address */
+
+ uint32_t attached:1, /**< true if attached */
+ node_group:1, /**< true if in node group */
+ free_group:1; /**< true if the node group should be free'd */
+
+ ocs_sli_port_t *sport; /**< associated SLI port */
+
+ /*
+ * Implementation specific fields allowed here
+ */
+ void *node; /**< associated node */
+};
+
+struct ocs_remote_node_group_s {
+ /*
+ * Members private to HW/SLI
+ */
+ uint32_t indicator; /**< RPI */
+ uint32_t index;
+
+ /*
+ * Implementation specific fields allowed here
+ */
+
+
+ uint32_t instance_index; /*<< instance index */
+ ocs_node_group_dir_t *node_group_dir; /*<< pointer to the node group directory */
+ ocs_list_link_t link; /*<< linked list link */
+};
+
+typedef enum {
+ OCS_NODE_SHUTDOWN_DEFAULT = 0,
+ OCS_NODE_SHUTDOWN_EXPLICIT_LOGO,
+ OCS_NODE_SHUTDOWN_IMPLICIT_LOGO,
+} ocs_node_shutd_rsn_e;
+
+typedef enum {
+ OCS_NODE_SEND_LS_ACC_NONE = 0,
+ OCS_NODE_SEND_LS_ACC_PLOGI,
+ OCS_NODE_SEND_LS_ACC_PRLI,
+} ocs_node_send_ls_acc_e;
+
+/**
+ * @brief FC Node object
+ *
+ */
+struct ocs_node_s {
+
+ ocs_t *ocs; /**< pointer back to ocs structure */
+ uint32_t instance_index; /**< unique instance index value */
+ char display_name[OCS_DISPLAY_NAME_LENGTH]; /**< Node display name */
+ ocs_sport_t *sport;
+ uint32_t hold_frames:1; /**< hold incoming frames if true */
+ ocs_rlock_t lock; /**< node wide lock */
+ ocs_lock_t active_ios_lock; /**< active SCSI and XPORT I/O's for this node */
+ ocs_list_t active_ios; /**< active I/O's for this node */
+ uint32_t max_wr_xfer_size; /**< Max write IO size per phase for the transport */
+ ocs_scsi_ini_node_t ini_node; /**< backend initiator private node data */
+ ocs_scsi_tgt_node_t tgt_node; /**< backend target private node data */
+ ocs_mgmt_functions_t *mgmt_functions;
+
+ /* Declarations private to HW/SLI */
+ ocs_remote_node_t rnode; /**< Remote node */
+
+ /* Declarations private to FC transport */
+ ocs_sm_ctx_t sm; /**< state machine context */
+ uint32_t evtdepth; /**< current event posting nesting depth */
+ uint32_t req_free:1, /**< this node is to be free'd */
+ attached:1, /**< node is attached (REGLOGIN complete) */
+ fcp_enabled:1, /**< node is enabled to handle FCP */
+ rscn_pending:1, /**< for name server node RSCN is pending */
+ send_plogi:1, /**< if initiator, send PLOGI at node initialization */
+ send_plogi_acc:1,/**< send PLOGI accept, upon completion of node attach */
+ io_alloc_enabled:1, /**< TRUE if ocs_scsi_io_alloc() and ocs_els_io_alloc() are enabled */
+ sent_prli:1; /**< if initiator, sent prli. */
+ ocs_node_send_ls_acc_e send_ls_acc; /**< type of LS acc to send */
+ ocs_io_t *ls_acc_io; /**< SCSI IO for LS acc */
+ uint32_t ls_acc_oxid; /**< OX_ID for pending accept */
+ uint32_t ls_acc_did; /**< D_ID for pending accept */
+ ocs_node_shutd_rsn_e shutdown_reason;/**< reason for node shutdown */
+ ocs_dma_t sparm_dma_buf; /**< service parameters buffer */
+ uint8_t service_params[OCS_SERVICE_PARMS_LENGTH]; /**< plogi/acc frame from remote device */
+ ocs_lock_t pend_frames_lock; /**< lock for inbound pending frames list */
+ ocs_list_t pend_frames; /**< inbound pending frames list */
+ uint32_t pend_frames_processed; /**< count of frames processed in hold frames interval */
+ uint32_t ox_id_in_use; /**< used to verify one at a time us of ox_id */
+ uint32_t els_retries_remaining; /**< for ELS, number of retries remaining */
+ uint32_t els_req_cnt; /**< number of outstanding ELS requests */
+ uint32_t els_cmpl_cnt; /**< number of outstanding ELS completions */
+ uint32_t abort_cnt; /**< Abort counter for debugging purpose */
+
+ char current_state_name[OCS_DISPLAY_NAME_LENGTH]; /**< current node state */
+ char prev_state_name[OCS_DISPLAY_NAME_LENGTH]; /**< previous node state */
+ ocs_sm_event_t current_evt; /**< current event */
+ ocs_sm_event_t prev_evt; /**< current event */
+ uint32_t targ:1, /**< node is target capable */
+ init:1, /**< node is initiator capable */
+ refound:1, /**< Handle node refound case when node is being deleted */
+ fcp2device:1, /* FCP2 device */
+ reserved:4,
+ fc_type:8;
+ ocs_list_t els_io_pend_list; /**< list of pending (not yet processed) ELS IOs */
+ ocs_list_t els_io_active_list; /**< list of active (processed) ELS IOs */
+
+ ocs_sm_function_t nodedb_state; /**< Node debugging, saved state */
+
+ ocs_timer_t gidpt_delay_timer; /**< GIDPT delay timer */
+ time_t time_last_gidpt_msec; /**< Start time of last target RSCN GIDPT */
+
+ /* WWN */
+ char wwnn[OCS_WWN_LENGTH]; /**< remote port WWN (uses iSCSI naming) */
+ char wwpn[OCS_WWN_LENGTH]; /**< remote port WWN (uses iSCSI naming) */
+
+ /* Statistics */
+ uint32_t chained_io_count; /**< count of IOs with chained SGL's */
+
+ ocs_list_link_t link; /**< node list link */
+
+ ocs_remote_node_group_t *node_group; /**< pointer to node group (if HLM enabled) */
+};
+
+/**
+ * @brief Virtual port specification
+ *
+ * Collection of the information required to restore a virtual port across
+ * link events
+ */
+
+typedef struct ocs_vport_spec_s ocs_vport_spec_t;
+struct ocs_vport_spec_s {
+ uint32_t domain_instance; /*>> instance index of this domain for the sport */
+ uint64_t wwnn; /*>> node name */
+ uint64_t wwpn; /*>> port name */
+ uint32_t fc_id; /*>> port id */
+ uint32_t enable_tgt:1, /*>> port is a target */
+ enable_ini:1; /*>> port is an initiator */
+ ocs_list_link_t link; /*>> link */
+ void *tgt_data; /**< target backend pointer */
+ void *ini_data; /**< initiator backend pointer */
+ ocs_sport_t *sport; /**< Used to match record after attaching for update */
+};
+
+
+#endif /* __OCS_COMMON_H__*/
diff --git a/sys/dev/ocs_fc/ocs_ddump.c b/sys/dev/ocs_fc/ocs_ddump.c
new file mode 100644
index 000000000000..4d452307480a
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_ddump.c
@@ -0,0 +1,881 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * generate FC ddump
+ *
+ */
+
+#include "ocs.h"
+#include "ocs_ddump.h"
+
+#define DEFAULT_SAVED_DUMP_SIZE (4*1024*1024)
+
+void hw_queue_ddump(ocs_textbuf_t *textbuf, ocs_hw_t *hw);
+
+/**
+ * @brief Generate sli4 queue ddump
+ *
+ * Generates sli4 queue ddump data
+ *
+ * @param textbuf pointer to text buffer
+ * @param name name of SLI4 queue
+ * @param hw pointer HW context
+ * @param q pointer to SLI4 queues array
+ * @param q_count count of SLI4 queues
+ * @param qentries number of SLI4 queue entries to dump
+ *
+ * @return none
+ */
+
+static void
+ocs_ddump_sli4_queue(ocs_textbuf_t *textbuf, const char *name, ocs_hw_t *hw, sli4_queue_t *q, uint32_t q_count, uint32_t qentries)
+{
+ uint32_t i;
+
+ for (i = 0; i < q_count; i ++, q ++) {
+ ocs_ddump_section(textbuf, name, i);
+ ocs_ddump_value(textbuf, "index", "%d", q->index);
+ ocs_ddump_value(textbuf, "size", "%d", q->size);
+ ocs_ddump_value(textbuf, "length", "%d", q->length);
+ ocs_ddump_value(textbuf, "n_posted", "%d", q->n_posted);
+ ocs_ddump_value(textbuf, "id", "%d", q->id);
+ ocs_ddump_value(textbuf, "type", "%d", q->type);
+ ocs_ddump_value(textbuf, "proc_limit", "%d", q->proc_limit);
+ ocs_ddump_value(textbuf, "posted_limit", "%d", q->posted_limit);
+ ocs_ddump_value(textbuf, "max_num_processed", "%d", q->max_num_processed);
+ ocs_ddump_value(textbuf, "max_process_time", "%ld", (unsigned long)q->max_process_time);
+ ocs_ddump_value(textbuf, "virt_addr", "%p", q->dma.virt);
+ ocs_ddump_value(textbuf, "phys_addr", "%lx", (unsigned long)q->dma.phys);
+
+ /* queue-specific information */
+ switch (q->type) {
+ case SLI_QTYPE_MQ:
+ ocs_ddump_value(textbuf, "r_idx", "%d", q->u.r_idx);
+ break;
+ case SLI_QTYPE_CQ:
+ ocs_ddump_value(textbuf, "is_mq", "%d", q->u.flag.is_mq);
+ break;
+ case SLI_QTYPE_WQ:
+ break;
+ case SLI_QTYPE_RQ: {
+ uint32_t i;
+ uint32_t j;
+ uint32_t rqe_count = 0;
+ hw_rq_t *rq;
+
+ ocs_ddump_value(textbuf, "is_hdr", "%d", q->u.flag.is_hdr);
+ ocs_ddump_value(textbuf, "rq_batch", "%d", q->u.flag.rq_batch);
+
+ /* loop through RQ tracker to see how many RQEs were produced */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ rq = hw->hw_rq[i];
+ for (j = 0; j < rq->entry_count; j++) {
+ if (rq->rq_tracker[j] != NULL) {
+ rqe_count++;
+ }
+ }
+ }
+ ocs_ddump_value(textbuf, "rqes_produced", "%d", rqe_count);
+ break;
+ }
+ }
+ ocs_ddump_queue_entries(textbuf, q->dma.virt, q->size, q->length,
+ ((q->type == SLI_QTYPE_MQ) ? q->u.r_idx : q->index),
+ qentries);
+ ocs_ddump_endsection(textbuf, name, i);
+ }
+}
+
+
+/**
+ * @brief Generate SLI4 ddump
+ *
+ * Generates sli4 ddump
+ *
+ * @param textbuf pointer to text buffer
+ * @param sli4 pointer SLI context
+ * @param qtype SLI4 queue type
+ *
+ * @return none
+ */
+
+static void
+ocs_ddump_sli_q_fields(ocs_textbuf_t *textbuf, sli4_t *sli4, sli4_qtype_e qtype)
+{
+ char * q_desc;
+
+ switch(qtype) {
+ case SLI_QTYPE_EQ: q_desc = "EQ"; break;
+ case SLI_QTYPE_CQ: q_desc = "CQ"; break;
+ case SLI_QTYPE_MQ: q_desc = "MQ"; break;
+ case SLI_QTYPE_WQ: q_desc = "WQ"; break;
+ case SLI_QTYPE_RQ: q_desc = "RQ"; break;
+ default: q_desc = "unknown"; break;
+ }
+
+ ocs_ddump_section(textbuf, q_desc, qtype);
+
+ ocs_ddump_value(textbuf, "max_qcount", "%d", sli4->config.max_qcount[qtype]);
+ ocs_ddump_value(textbuf, "max_qentries", "%d", sli4->config.max_qentries[qtype]);
+ ocs_ddump_value(textbuf, "qpage_count", "%d", sli4->config.qpage_count[qtype]);
+ ocs_ddump_endsection(textbuf, q_desc, qtype);
+}
+
+
+/**
+ * @brief Generate SLI4 ddump
+ *
+ * Generates sli4 ddump
+ *
+ * @param textbuf pointer to text buffer
+ * @param sli4 pointer SLI context
+ *
+ * @return none
+ */
+
+static void
+ocs_ddump_sli(ocs_textbuf_t *textbuf, sli4_t *sli4)
+{
+ sli4_sgl_chaining_params_t *cparams = &sli4->config.sgl_chaining_params;
+ const char *p;
+
+ ocs_ddump_section(textbuf, "sli4", 0);
+
+ ocs_ddump_value(textbuf, "sli_rev", "%d", sli4->sli_rev);
+ ocs_ddump_value(textbuf, "sli_family", "%d", sli4->sli_family);
+ ocs_ddump_value(textbuf, "if_type", "%d", sli4->if_type);
+
+ switch(sli4->asic_type) {
+ case SLI4_ASIC_TYPE_BE3: p = "BE3"; break;
+ case SLI4_ASIC_TYPE_SKYHAWK: p = "Skyhawk"; break;
+ case SLI4_ASIC_TYPE_LANCER: p = "Lancer"; break;
+ case SLI4_ASIC_TYPE_LANCERG6: p = "LancerG6"; break;
+ default: p = "unknown"; break;
+ }
+ ocs_ddump_value(textbuf, "asic_type", "%s", p);
+
+ switch(sli4->asic_rev) {
+ case SLI4_ASIC_REV_FPGA: p = "fpga"; break;
+ case SLI4_ASIC_REV_A0: p = "A0"; break;
+ case SLI4_ASIC_REV_A1: p = "A1"; break;
+ case SLI4_ASIC_REV_A2: p = "A2"; break;
+ case SLI4_ASIC_REV_A3: p = "A3"; break;
+ case SLI4_ASIC_REV_B0: p = "B0"; break;
+ case SLI4_ASIC_REV_C0: p = "C0"; break;
+ case SLI4_ASIC_REV_D0: p = "D0"; break;
+ default: p = "unknown"; break;
+ }
+ ocs_ddump_value(textbuf, "asic_rev", "%s", p);
+
+ ocs_ddump_value(textbuf, "e_d_tov", "%d", sli4->config.e_d_tov);
+ ocs_ddump_value(textbuf, "r_a_tov", "%d", sli4->config.r_a_tov);
+ ocs_ddump_value(textbuf, "link_module_type", "%d", sli4->config.link_module_type);
+ ocs_ddump_value(textbuf, "rq_batch", "%d", sli4->config.rq_batch);
+ ocs_ddump_value(textbuf, "topology", "%d", sli4->config.topology);
+ ocs_ddump_value(textbuf, "wwpn", "%02x%02x%02x%02x%02x%02x%02x%02x",
+ sli4->config.wwpn[0],
+ sli4->config.wwpn[1],
+ sli4->config.wwpn[2],
+ sli4->config.wwpn[3],
+ sli4->config.wwpn[4],
+ sli4->config.wwpn[5],
+ sli4->config.wwpn[6],
+ sli4->config.wwpn[7]);
+ ocs_ddump_value(textbuf, "wwnn", "%02x%02x%02x%02x%02x%02x%02x%02x",
+ sli4->config.wwnn[0],
+ sli4->config.wwnn[1],
+ sli4->config.wwnn[2],
+ sli4->config.wwnn[3],
+ sli4->config.wwnn[4],
+ sli4->config.wwnn[5],
+ sli4->config.wwnn[6],
+ sli4->config.wwnn[7]);
+ ocs_ddump_value(textbuf, "fw_rev0", "%d", sli4->config.fw_rev[0]);
+ ocs_ddump_value(textbuf, "fw_rev1", "%d", sli4->config.fw_rev[1]);
+ ocs_ddump_value(textbuf, "fw_name0", "%s", (char*)sli4->config.fw_name[0]);
+ ocs_ddump_value(textbuf, "fw_name1", "%s", (char*)sli4->config.fw_name[1]);
+ ocs_ddump_value(textbuf, "hw_rev0", "%x", sli4->config.hw_rev[0]);
+ ocs_ddump_value(textbuf, "hw_rev1", "%x", sli4->config.hw_rev[1]);
+ ocs_ddump_value(textbuf, "hw_rev2", "%x", sli4->config.hw_rev[2]);
+ ocs_ddump_value(textbuf, "sge_supported_length", "%x", sli4->config.sge_supported_length);
+ ocs_ddump_value(textbuf, "sgl_page_sizes", "%x", sli4->config.sgl_page_sizes);
+ ocs_ddump_value(textbuf, "max_sgl_pages", "%x", sli4->config.max_sgl_pages);
+ ocs_ddump_value(textbuf, "high_login_mode", "%x", sli4->config.high_login_mode);
+ ocs_ddump_value(textbuf, "sgl_pre_registered", "%x", sli4->config.sgl_pre_registered);
+ ocs_ddump_value(textbuf, "sgl_pre_registration_required", "%x", sli4->config.sgl_pre_registration_required);
+
+ ocs_ddump_value(textbuf, "sgl_chaining_capable", "%x", cparams->chaining_capable);
+ ocs_ddump_value(textbuf, "frag_num_field_offset", "%x", cparams->frag_num_field_offset);
+ ocs_ddump_value(textbuf, "frag_num_field_mask", "%016llx", (unsigned long long)cparams->frag_num_field_mask);
+ ocs_ddump_value(textbuf, "sgl_index_field_offset", "%x", cparams->sgl_index_field_offset);
+ ocs_ddump_value(textbuf, "sgl_index_field_mask", "%016llx", (unsigned long long)cparams->sgl_index_field_mask);
+ ocs_ddump_value(textbuf, "chain_sge_initial_value_lo", "%x", cparams->chain_sge_initial_value_lo);
+ ocs_ddump_value(textbuf, "chain_sge_initial_value_hi", "%x", cparams->chain_sge_initial_value_hi);
+
+ ocs_ddump_value(textbuf, "max_vfi", "%d", sli_get_max_rsrc(sli4, SLI_RSRC_FCOE_VFI));
+ ocs_ddump_value(textbuf, "max_vpi", "%d", sli_get_max_rsrc(sli4, SLI_RSRC_FCOE_VPI));
+ ocs_ddump_value(textbuf, "max_rpi", "%d", sli_get_max_rsrc(sli4, SLI_RSRC_FCOE_RPI));
+ ocs_ddump_value(textbuf, "max_xri", "%d", sli_get_max_rsrc(sli4, SLI_RSRC_FCOE_XRI));
+ ocs_ddump_value(textbuf, "max_fcfi", "%d", sli_get_max_rsrc(sli4, SLI_RSRC_FCOE_FCFI));
+
+ ocs_ddump_sli_q_fields(textbuf, sli4, SLI_QTYPE_EQ);
+ ocs_ddump_sli_q_fields(textbuf, sli4, SLI_QTYPE_CQ);
+ ocs_ddump_sli_q_fields(textbuf, sli4, SLI_QTYPE_MQ);
+ ocs_ddump_sli_q_fields(textbuf, sli4, SLI_QTYPE_WQ);
+ ocs_ddump_sli_q_fields(textbuf, sli4, SLI_QTYPE_RQ);
+
+ ocs_ddump_endsection(textbuf, "sli4", 0);
+}
+
+
+/**
+ * @brief Dump HW IO
+ *
+ * Dump HW IO
+ *
+ * @param textbuf pointer to text buffer
+ * @param io pointer to HW IO object
+ *
+ * @return none
+ */
+
+static void
+ocs_ddump_hw_io(ocs_textbuf_t *textbuf, ocs_hw_io_t *io)
+{
+ ocs_assert(io);
+
+ ocs_ddump_section(textbuf, "hw_io", io->indicator);
+
+ ocs_ddump_value(textbuf, "state", "%d", io->state);
+ ocs_ddump_value(textbuf, "xri", "0x%x", io->indicator);
+ ocs_ddump_value(textbuf, "tag", "0x%x", io->reqtag);
+ ocs_ddump_value(textbuf, "abort_reqtag", "0x%x", io->abort_reqtag);
+ ocs_ddump_value(textbuf, "ref_count", "%d", ocs_ref_read_count(&io->ref));
+
+ /* just to make it obvious, display abort bit from tag */
+ ocs_ddump_value(textbuf, "abort", "0x%x", io->abort_in_progress);
+ ocs_ddump_value(textbuf, "wq_index", "%d", (io->wq == NULL ? 0xffff : io->wq->instance));
+ ocs_ddump_value(textbuf, "type", "%d", io->type);
+ ocs_ddump_value(textbuf, "xbusy", "%d", io->xbusy);
+ ocs_ddump_value(textbuf, "active_wqe_link", "%d", ocs_list_on_list(&io->wqe_link));
+ ocs_ddump_value(textbuf, "def_sgl_count", "%d", io->def_sgl_count);
+ ocs_ddump_value(textbuf, "n_sge", "%d", io->n_sge);
+ ocs_ddump_value(textbuf, "has_ovfl_sgl", "%s", (io->ovfl_sgl != NULL ? "TRUE" : "FALSE"));
+ ocs_ddump_value(textbuf, "has_ovfl_io", "%s", (io->ovfl_io != NULL ? "TRUE" : "FALSE"));
+
+ ocs_ddump_endsection(textbuf, "hw_io", io->indicator);
+}
+
+#if defined(OCS_DEBUG_QUEUE_HISTORY)
+
+/**
+ * @brief Generate queue history ddump
+ *
+ * @param textbuf pointer to text buffer
+ * @param q_hist Pointer to queue history object.
+ */
+static void
+ocs_ddump_queue_history(ocs_textbuf_t *textbuf, ocs_hw_q_hist_t *q_hist)
+{
+ uint32_t x;
+
+ ocs_ddump_section(textbuf, "q_hist", 0);
+ ocs_ddump_value(textbuf, "count", "%ld", OCS_Q_HIST_SIZE);
+ ocs_ddump_value(textbuf, "index", "%d", q_hist->q_hist_index);
+
+ if (q_hist->q_hist == NULL) {
+ ocs_ddump_section(textbuf, "history", 0);
+ ocs_textbuf_printf(textbuf, "No history available\n");
+ ocs_ddump_endsection(textbuf, "history", 0);
+ ocs_ddump_endsection(textbuf, "q_hist", 0);
+ return;
+ }
+
+ /* start from last entry and go backwards */
+ ocs_textbuf_printf(textbuf, "<history>\n");
+ ocs_textbuf_printf(textbuf, "(newest first):\n");
+
+ ocs_lock(&q_hist->q_hist_lock);
+ x = ocs_queue_history_prev_index(q_hist->q_hist_index);
+ do {
+ int i;
+ ocs_q_hist_ftr_t ftr;
+ uint32_t mask;
+
+
+ /* footer's mask indicates what words were captured */
+ ftr.word = q_hist->q_hist[x];
+ mask = ftr.s.mask;
+ i = 0;
+
+ /* if we've encountered a mask of 0, must be done */
+ if (mask == 0) {
+ break;
+ }
+
+ /* display entry type */
+ ocs_textbuf_printf(textbuf, "%s:\n",
+ ocs_queue_history_type_name(ftr.s.type));
+
+ if (ocs_queue_history_timestamp_enabled()) {
+ uint64_t tsc_value;
+ x = ocs_queue_history_prev_index(x);
+ tsc_value = ((q_hist->q_hist[x]) & 0x00000000FFFFFFFFull);
+ x = ocs_queue_history_prev_index(x);
+ tsc_value |= (((uint64_t)q_hist->q_hist[x] << 32) & 0xFFFFFFFF00000000ull);
+ ocs_textbuf_printf(textbuf, " t: %" PRIu64 "\n", tsc_value);
+ }
+
+ if (ocs_queue_history_q_info_enabled()) {
+ if (ftr.s.type == OCS_Q_HIST_TYPE_CWQE ||
+ ftr.s.type == OCS_Q_HIST_TYPE_CXABT ||
+ ftr.s.type == OCS_Q_HIST_TYPE_WQE) {
+ x = ocs_queue_history_prev_index(x);
+ ocs_textbuf_printf(textbuf, " qid=0x%x idx=0x%x\n",
+ ((q_hist->q_hist[x] >> 16) & 0xFFFF),
+ ((q_hist->q_hist[x] >> 0) & 0xFFFF));
+ }
+ }
+
+ while (mask) {
+ if ((mask & 1) && (x != q_hist->q_hist_index)){
+ /* get next word */
+ x = ocs_queue_history_prev_index(x);
+ ocs_textbuf_printf(textbuf, " [%d]=%x\n",
+ i, q_hist->q_hist[x]);
+ }
+ mask = (mask >> 1UL);
+ i++;
+ }
+
+ /* go backwards to next element */
+ x = ocs_queue_history_prev_index(x);
+ } while (x != ocs_queue_history_prev_index(q_hist->q_hist_index));
+ ocs_unlock(&q_hist->q_hist_lock);
+
+ ocs_textbuf_printf(textbuf, "</history>\n");
+ ocs_ddump_endsection(textbuf, "q_hist", 0);
+}
+#endif
+
+/**
+ * @brief Generate hw ddump
+ *
+ * Generates hw ddump
+ *
+ * @param textbuf pointer to text buffer
+ * @param hw pointer HW context
+ * @param flags ddump flags
+ * @param qentries number of qentries to dump
+ *
+ * @return none
+ */
+
+static void
+ocs_ddump_hw(ocs_textbuf_t *textbuf, ocs_hw_t *hw, uint32_t flags, uint32_t qentries)
+{
+ ocs_t *ocs = hw->os;
+ uint32_t cnt = 0;
+ ocs_hw_io_t *io = NULL;
+ uint32_t i;
+ uint32_t j;
+ uint32_t max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
+
+ ocs_assert(ocs);
+
+ ocs_ddump_section(textbuf, "hw", ocs->instance_index);
+
+ /* device specific information */
+ switch(hw->sli.if_type) {
+ case 0:
+ ocs_ddump_value(textbuf, "uerr_mask_hi", "%08x",
+ sli_reg_read(&hw->sli, SLI4_REG_UERR_MASK_HI));
+ ocs_ddump_value(textbuf, "uerr_mask_lo", "%08x",
+ sli_reg_read(&hw->sli, SLI4_REG_UERR_MASK_LO));
+ ocs_ddump_value(textbuf, "uerr_status_hi", "%08x",
+ sli_reg_read(&hw->sli, SLI4_REG_UERR_STATUS_HI));
+ ocs_ddump_value(textbuf, "uerr_status_lo", "%08x",
+ sli_reg_read(&hw->sli, SLI4_REG_UERR_STATUS_LO));
+ break;
+ case 2:
+ ocs_ddump_value(textbuf, "sliport_status", "%08x",
+ sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS));
+ ocs_ddump_value(textbuf, "sliport_error1", "%08x",
+ sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1));
+ ocs_ddump_value(textbuf, "sliport_error2", "%08x",
+ sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2));
+ break;
+ }
+
+ ocs_ddump_value(textbuf, "link_status", "%d", hw->link.status);
+ ocs_ddump_value(textbuf, "link_speed", "%d", hw->link.speed);
+ ocs_ddump_value(textbuf, "link_topology", "%d", hw->link.topology);
+ ocs_ddump_value(textbuf, "state", "%d", hw->state);
+ ocs_ddump_value(textbuf, "io_alloc_failed_count", "%d", ocs_atomic_read(&hw->io_alloc_failed_count));
+ ocs_ddump_value(textbuf, "n_io", "%d", hw->config.n_io);
+
+ ocs_ddump_value(textbuf, "queue_topology", "%s", hw->config.queue_topology);
+ ocs_ddump_value(textbuf, "rq_selection_policy", "%d", hw->config.rq_selection_policy);
+ ocs_ddump_value(textbuf, "rr_quanta", "%d", hw->config.rr_quanta);
+ for (i = 0; i < ARRAY_SIZE(hw->config.filter_def); i++) {
+ ocs_ddump_value(textbuf, "filter_def", "%08X", hw->config.filter_def[i]);
+ }
+ ocs_ddump_value(textbuf, "n_eq", "%d", hw->eq_count);
+ ocs_ddump_value(textbuf, "n_cq", "%d", hw->cq_count);
+ ocs_ddump_value(textbuf, "n_mq", "%d", hw->mq_count);
+ ocs_ddump_value(textbuf, "n_rq", "%d", hw->rq_count);
+ ocs_ddump_value(textbuf, "n_wq", "%d", hw->wq_count);
+ ocs_ddump_value(textbuf, "n_sgl", "%d", hw->config.n_sgl);
+
+ ocs_ddump_sli(textbuf, &hw->sli);
+
+ ocs_ddump_sli4_queue(textbuf, "wq", hw, hw->wq, hw->wq_count,
+ ((flags & OCS_DDUMP_FLAGS_WQES) ? qentries : 0));
+ ocs_ddump_sli4_queue(textbuf, "rq", hw, hw->rq, hw->rq_count,
+ ((flags & OCS_DDUMP_FLAGS_RQES) ? qentries : 0));
+ ocs_ddump_sli4_queue(textbuf, "mq", hw, hw->mq, hw->mq_count,
+ ((flags & OCS_DDUMP_FLAGS_MQES) ? qentries : 0));
+ ocs_ddump_sli4_queue(textbuf, "cq", hw, hw->cq, hw->cq_count,
+ ((flags & OCS_DDUMP_FLAGS_CQES) ? qentries : 0));
+ ocs_ddump_sli4_queue(textbuf, "eq", hw, hw->eq, hw->eq_count,
+ ((flags & OCS_DDUMP_FLAGS_EQES) ? qentries : 0));
+
+ /* dump the IO quarantine list */
+ for (i = 0; i < hw->wq_count; i++) {
+ ocs_ddump_section(textbuf, "io_quarantine", i);
+ ocs_ddump_value(textbuf, "quarantine_index", "%d", hw->hw_wq[i]->quarantine_info.quarantine_index);
+ for (j = 0; j < OCS_HW_QUARANTINE_QUEUE_DEPTH; j++) {
+ if (hw->hw_wq[i]->quarantine_info.quarantine_ios[j] != NULL) {
+ ocs_ddump_hw_io(textbuf, hw->hw_wq[i]->quarantine_info.quarantine_ios[j]);
+ }
+ }
+ ocs_ddump_endsection(textbuf, "io_quarantine", i);
+ }
+
+ ocs_ddump_section(textbuf, "workaround", ocs->instance_index);
+ ocs_ddump_value(textbuf, "fwrev", "%08llx", (unsigned long long)hw->workaround.fwrev);
+ ocs_ddump_endsection(textbuf, "workaround", ocs->instance_index);
+
+ ocs_lock(&hw->io_lock);
+ ocs_ddump_section(textbuf, "io_inuse", ocs->instance_index);
+ ocs_list_foreach(&hw->io_inuse, io) {
+ ocs_ddump_hw_io(textbuf, io);
+ }
+ ocs_ddump_endsection(textbuf, "io_inuse", ocs->instance_index);
+
+ ocs_ddump_section(textbuf, "io_wait_free", ocs->instance_index);
+ ocs_list_foreach(&hw->io_wait_free, io) {
+ ocs_ddump_hw_io(textbuf, io);
+ }
+ ocs_ddump_endsection(textbuf, "io_wait_free", ocs->instance_index);
+ ocs_ddump_section(textbuf, "io_free", ocs->instance_index);
+ ocs_list_foreach(&hw->io_free, io) {
+ if (io->xbusy) {
+ /* only display free ios if they're active */
+ ocs_ddump_hw_io(textbuf, io);
+ }
+ cnt++;
+ }
+ ocs_ddump_endsection(textbuf, "io_free", ocs->instance_index);
+ ocs_ddump_value(textbuf, "ios_free", "%d", cnt);
+
+ ocs_ddump_value(textbuf, "sec_hio_wait_count", "%d", hw->sec_hio_wait_count);
+ ocs_unlock(&hw->io_lock);
+
+ /* now check the IOs not in a list; i.e. sequence coalescing xris */
+ ocs_ddump_section(textbuf, "port_owned_ios", ocs->instance_index);
+ for (i = 0; i < hw->config.n_io; i++) {
+ io = hw->io[i];
+ if (!io)
+ continue;
+
+ if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
+ if (ocs_ref_read_count(&io->ref)) {
+ /* only display free ios if they're active */
+ ocs_ddump_hw_io(textbuf, io);
+ }
+ }
+ }
+ ocs_ddump_endsection(textbuf, "port_owned_ios", ocs->instance_index);
+
+ ocs_textbuf_printf(textbuf, "<rpi_ref>");
+ for (i = 0; i < max_rpi; i++) {
+ if (ocs_atomic_read(&hw->rpi_ref[i].rpi_attached) ||
+ ocs_atomic_read(&hw->rpi_ref[i].rpi_count) ) {
+ ocs_textbuf_printf(textbuf, "[%d] att=%d cnt=%d\n", i,
+ ocs_atomic_read(&hw->rpi_ref[i].rpi_attached),
+ ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
+ }
+ }
+ ocs_textbuf_printf(textbuf, "</rpi_ref>");
+
+ for (i = 0; i < hw->wq_count; i++) {
+ ocs_ddump_value(textbuf, "wq_submit", "%d", hw->tcmd_wq_submit[i]);
+ }
+ for (i = 0; i < hw->wq_count; i++) {
+ ocs_ddump_value(textbuf, "wq_complete", "%d", hw->tcmd_wq_complete[i]);
+ }
+
+ hw_queue_ddump(textbuf, hw);
+
+ ocs_ddump_endsection(textbuf, "hw", ocs->instance_index);
+
+}
+
+void
+hw_queue_ddump(ocs_textbuf_t *textbuf, ocs_hw_t *hw)
+{
+ hw_eq_t *eq;
+ hw_cq_t *cq;
+ hw_q_t *q;
+ hw_mq_t *mq;
+ hw_wq_t *wq;
+ hw_rq_t *rq;
+
+ ocs_ddump_section(textbuf, "hw_queue", 0);
+ ocs_list_foreach(&hw->eq_list, eq) {
+ ocs_ddump_section(textbuf, "eq", eq->instance);
+ ocs_ddump_value(textbuf, "queue-id", "%d", eq->queue->id);
+ OCS_STAT(ocs_ddump_value(textbuf, "use_count", "%d", eq->use_count));
+ ocs_list_foreach(&eq->cq_list, cq) {
+ ocs_ddump_section(textbuf, "cq", cq->instance);
+ ocs_ddump_value(textbuf, "queue-id", "%d", cq->queue->id);
+ OCS_STAT(ocs_ddump_value(textbuf, "use_count", "%d", cq->use_count));
+ ocs_list_foreach(&cq->q_list, q) {
+ switch(q->type) {
+ case SLI_QTYPE_MQ:
+ mq = (hw_mq_t *) q;
+ ocs_ddump_section(textbuf, "mq", mq->instance);
+ ocs_ddump_value(textbuf, "queue-id", "%d", mq->queue->id);
+ OCS_STAT(ocs_ddump_value(textbuf, "use_count", "%d", mq->use_count));
+ ocs_ddump_endsection(textbuf, "mq", mq->instance);
+ break;
+ case SLI_QTYPE_WQ:
+ wq = (hw_wq_t *) q;
+ ocs_ddump_section(textbuf, "wq", wq->instance);
+ ocs_ddump_value(textbuf, "queue-id", "%d", wq->queue->id);
+ OCS_STAT(ocs_ddump_value(textbuf, "use_count", "%d", wq->use_count));
+ ocs_ddump_value(textbuf, "wqec_count", "%d", wq->wqec_count);
+ ocs_ddump_value(textbuf, "free_count", "%d", wq->free_count);
+ OCS_STAT(ocs_ddump_value(textbuf, "wq_pending_count", "%d",
+ wq->wq_pending_count));
+ ocs_ddump_endsection(textbuf, "wq", wq->instance);
+ break;
+ case SLI_QTYPE_RQ:
+ rq = (hw_rq_t *) q;
+ ocs_ddump_section(textbuf, "rq", rq->instance);
+ OCS_STAT(ocs_ddump_value(textbuf, "use_count", "%d", rq->use_count));
+ ocs_ddump_value(textbuf, "filter_mask", "%d", rq->filter_mask);
+ if (rq->hdr != NULL) {
+ ocs_ddump_value(textbuf, "hdr-id", "%d", rq->hdr->id);
+ OCS_STAT(ocs_ddump_value(textbuf, "hdr_use_count", "%d", rq->hdr_use_count));
+ }
+ if (rq->first_burst != NULL) {
+ OCS_STAT(ocs_ddump_value(textbuf, "fb-id", "%d", rq->first_burst->id));
+ OCS_STAT(ocs_ddump_value(textbuf, "fb_use_count", "%d", rq->fb_use_count));
+ }
+ if (rq->data != NULL) {
+ OCS_STAT(ocs_ddump_value(textbuf, "payload-id", "%d", rq->data->id));
+ OCS_STAT(ocs_ddump_value(textbuf, "payload_use_count", "%d", rq->payload_use_count));
+ }
+ ocs_ddump_endsection(textbuf, "rq", rq->instance);
+ break;
+ default:
+ break;
+ }
+ }
+ ocs_ddump_endsection(textbuf, "cq", cq->instance);
+ }
+ ocs_ddump_endsection(textbuf, "eq", eq->instance);
+ }
+ ocs_ddump_endsection(textbuf, "hw_queue", 0);
+}
+
+/**
+ * @brief Initiate ddump
+ *
+ * Traverses the ocs/domain/port/node/io data structures to generate a driver
+ * dump.
+ *
+ * @param ocs pointer to device context
+ * @param textbuf pointer to text buffer
+ * @param flags ddump flags
+ * @param qentries number of queue entries to dump
+ *
+ * @return Returns 0 on success, or a negative value on failure.
+ */
+
+int
+ocs_ddump(ocs_t *ocs, ocs_textbuf_t *textbuf, uint32_t flags, uint32_t qentries)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_domain_t *domain;
+ uint32_t instance;
+ ocs_vport_spec_t *vport;
+ ocs_io_t *io;
+ int retval = 0;
+ uint32_t i;
+
+ ocs_ddump_startfile(textbuf);
+
+ ocs_ddump_section(textbuf, "ocs", ocs->instance_index);
+
+ ocs_ddump_section(textbuf, "ocs_os", ocs->instance_index);
+#ifdef OCS_ENABLE_NUMA_SUPPORT
+ ocs_ddump_value(textbuf, "numa_node", "%d", ocs->ocs_os.numa_node);
+#endif
+ ocs_ddump_endsection(textbuf, "ocs_os", ocs->instance_index);
+
+ ocs_ddump_value(textbuf, "drv_name", "%s", DRV_NAME);
+ ocs_ddump_value(textbuf, "drv_version", "%s", DRV_VERSION);
+ ocs_ddump_value(textbuf, "display_name", "%s", ocs->display_name);
+ ocs_ddump_value(textbuf, "enable_ini", "%d", ocs->enable_ini);
+ ocs_ddump_value(textbuf, "enable_tgt", "%d", ocs->enable_tgt);
+ ocs_ddump_value(textbuf, "nodes_count", "%d", xport->nodes_count);
+ ocs_ddump_value(textbuf, "enable_hlm", "%d", ocs->enable_hlm);
+ ocs_ddump_value(textbuf, "hlm_group_size", "%d", ocs->hlm_group_size);
+ ocs_ddump_value(textbuf, "auto_xfer_rdy_size", "%d", ocs->auto_xfer_rdy_size);
+ ocs_ddump_value(textbuf, "io_alloc_failed_count", "%d", ocs_atomic_read(&xport->io_alloc_failed_count));
+ ocs_ddump_value(textbuf, "io_active_count", "%d", ocs_atomic_read(&xport->io_active_count));
+ ocs_ddump_value(textbuf, "io_pending_count", "%d", ocs_atomic_read(&xport->io_pending_count));
+ ocs_ddump_value(textbuf, "io_total_alloc", "%d", ocs_atomic_read(&xport->io_total_alloc));
+ ocs_ddump_value(textbuf, "io_total_free", "%d", ocs_atomic_read(&xport->io_total_free));
+ ocs_ddump_value(textbuf, "io_total_pending", "%d", ocs_atomic_read(&xport->io_total_pending));
+ ocs_ddump_value(textbuf, "io_pending_recursing", "%d", ocs_atomic_read(&xport->io_pending_recursing));
+ ocs_ddump_value(textbuf, "max_isr_time_msec", "%d", ocs->max_isr_time_msec);
+ for (i = 0; i < SLI4_MAX_FCFI; i++) {
+ ocs_lock(&xport->fcfi[i].pend_frames_lock);
+ if (!ocs_list_empty(&xport->fcfi[i].pend_frames)) {
+ ocs_hw_sequence_t *frame;
+ ocs_ddump_section(textbuf, "pending_frames", i);
+ ocs_ddump_value(textbuf, "hold_frames", "%d", xport->fcfi[i].hold_frames);
+ ocs_list_foreach(&xport->fcfi[i].pend_frames, frame) {
+ fc_header_t *hdr;
+ char buf[128];
+
+ hdr = frame->header->dma.virt;
+ ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu",
+ hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
+ frame->payload->dma.len);
+ ocs_ddump_value(textbuf, "frame", "%s", buf);
+ }
+ ocs_ddump_endsection(textbuf, "pending_frames", i);
+ }
+ ocs_unlock(&xport->fcfi[i].pend_frames_lock);
+ }
+
+ ocs_lock(&xport->io_pending_lock);
+ ocs_ddump_section(textbuf, "io_pending_list", ocs->instance_index);
+ ocs_list_foreach(&xport->io_pending_list, io) {
+ ocs_ddump_io(textbuf, io);
+ }
+ ocs_ddump_endsection(textbuf, "io_pending_list", ocs->instance_index);
+ ocs_unlock(&xport->io_pending_lock);
+
+#if defined(ENABLE_LOCK_DEBUG)
+ /* Dump the lock list */
+ ocs_ddump_section(textbuf, "locks", 0);
+ ocs_lock(&ocs->ocs_os.locklist_lock); {
+ ocs_lock_t *l;
+ uint32_t idx = 0;
+ ocs_list_foreach(&ocs->ocs_os.locklist, l) {
+ ocs_ddump_section(textbuf, "lock", idx);
+ ocs_ddump_value(textbuf, "name", "%s", l->name);
+ ocs_ddump_value(textbuf, "inuse", "%d", l->inuse);
+ ocs_ddump_value(textbuf, "caller", "%p", l->caller[0]);
+ ocs_ddump_value(textbuf, "pid", "%08x", l->pid.l);
+ ocs_ddump_endsection(textbuf, "lock", idx);
+ idx++;
+ }
+ } ocs_unlock(&ocs->ocs_os.locklist_lock);
+ ocs_ddump_endsection(textbuf, "locks", 0);
+#endif
+
+ /* Dump any pending vports */
+ if (ocs_device_lock_try(ocs) != TRUE) {
+ /* Didn't get the lock */
+ return -1;
+ }
+ instance = 0;
+ ocs_list_foreach(&xport->vport_list, vport) {
+ ocs_ddump_section(textbuf, "vport_spec", instance);
+ ocs_ddump_value(textbuf, "domain_instance", "%d", vport->domain_instance);
+ ocs_ddump_value(textbuf, "wwnn", "%llx", (unsigned long long)vport->wwnn);
+ ocs_ddump_value(textbuf, "wwpn", "%llx", (unsigned long long)vport->wwpn);
+ ocs_ddump_value(textbuf, "fc_id", "0x%x", vport->fc_id);
+ ocs_ddump_value(textbuf, "enable_tgt", "%d", vport->enable_tgt);
+ ocs_ddump_value(textbuf, "enable_ini", "%d" PRIx64, vport->enable_ini);
+ ocs_ddump_endsection(textbuf, "vport_spec", instance ++);
+ }
+ ocs_device_unlock(ocs);
+
+ /* Dump target and initiator private data */
+ ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_DEVICE, ocs);
+ ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_DEVICE, ocs);
+
+ ocs_ddump_hw(textbuf, &ocs->hw, flags, qentries);
+
+ if (ocs_device_lock_try(ocs) != TRUE) {
+ /* Didn't get the lock */
+ return -1;
+ }
+ /* Here the device lock is held */
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ retval = ocs_ddump_domain(textbuf, domain);
+ if (retval != 0) {
+ break;
+ }
+ }
+
+ /* Dump ramlog */
+ ocs_ddump_ramlog(textbuf, ocs->ramlog);
+ ocs_device_unlock(ocs);
+
+#if !defined(OCS_DEBUG_QUEUE_HISTORY)
+ ocs_ddump_section(textbuf, "q_hist", ocs->instance_index);
+ ocs_textbuf_printf(textbuf, "<history>\n");
+ ocs_textbuf_printf(textbuf, "No history available\n");
+ ocs_textbuf_printf(textbuf, "</history>\n");
+ ocs_ddump_endsection(textbuf, "q_hist", ocs->instance_index);
+#else
+ ocs_ddump_queue_history(textbuf, &ocs->hw.q_hist);
+#endif
+
+#if defined(OCS_DEBUG_MEMORY)
+ ocs_memory_allocated_ddump(textbuf);
+#endif
+
+ ocs_ddump_endsection(textbuf, "ocs", ocs->instance_index);
+
+ ocs_ddump_endfile(textbuf);
+
+ return retval;
+}
+
+/**
+ * @brief Capture and save ddump
+ *
+ * Captures and saves a ddump to the ocs_t structure to save the
+ * current state. The goal of this function is to save a ddump
+ * as soon as an issue is encountered. The saved ddump will be
+ * kept until the user reads it.
+ *
+ * @param ocs pointer to device context
+ * @param flags ddump flags
+ * @param qentries number of queue entries to dump
+ *
+ * @return 0 if ddump was saved; > 0 of one already exists; < 0
+ * error
+ */
+
+int32_t
+ocs_save_ddump(ocs_t *ocs, uint32_t flags, uint32_t qentries)
+{
+ if (ocs_textbuf_get_written(&ocs->ddump_saved) > 0) {
+ ocs_log_debug(ocs, "Saved ddump already exists\n");
+ return 1;
+ }
+
+ if (!ocs_textbuf_initialized(&ocs->ddump_saved)) {
+ ocs_log_err(ocs, "Saved ddump not allocated\n");
+ return -1;
+ }
+
+ ocs_log_debug(ocs, "Saving ddump\n");
+ ocs_ddump(ocs, &ocs->ddump_saved, flags, qentries);
+ ocs_log_debug(ocs, "Saved ddump: %d bytes written\n", ocs_textbuf_get_written(&ocs->ddump_saved));
+ return 0;
+}
+
+/**
+ * @brief Capture and save ddump for all OCS instances
+ *
+ * Calls ocs_save_ddump() for each OCS instance.
+ *
+ * @param flags ddump flags
+ * @param qentries number of queue entries to dump
+ * @param alloc_flag allocate dump buffer if not already allocated
+ *
+ * @return 0 if ddump was saved; > 0 of one already exists; < 0
+ * error
+ */
+
+int32_t
+ocs_save_ddump_all(uint32_t flags, uint32_t qentries, uint32_t alloc_flag)
+{
+ ocs_t *ocs;
+ uint32_t i;
+ int32_t rc = 0;
+
+ for (i = 0; (ocs = ocs_get_instance(i)) != NULL; i++) {
+ if (alloc_flag && (!ocs_textbuf_initialized(&ocs->ddump_saved))) {
+ rc = ocs_textbuf_alloc(ocs, &ocs->ddump_saved, DEFAULT_SAVED_DUMP_SIZE);
+ if (rc) {
+ break;
+ }
+ }
+
+ rc = ocs_save_ddump(ocs, flags, qentries);
+ if (rc < 0) {
+ break;
+ }
+ }
+ return rc;
+}
+
+/**
+ * @brief Clear saved ddump
+ *
+ * Clears saved ddump to make room for next one.
+ *
+ * @param ocs pointer to device context
+ *
+ * @return 0 if ddump was cleared; > 0 no saved ddump found
+ */
+
+int32_t
+ocs_clear_saved_ddump(ocs_t *ocs)
+{
+ /* if there's a saved ddump, copy to newly allocated textbuf */
+ if (ocs_textbuf_get_written(&ocs->ddump_saved)) {
+ ocs_log_debug(ocs, "saved ddump cleared\n");
+ ocs_textbuf_reset(&ocs->ddump_saved);
+ return 0;
+ } else {
+ ocs_log_debug(ocs, "no saved ddump found\n");
+ return 1;
+ }
+}
+
diff --git a/sys/dev/ocs_fc/ocs_ddump.h b/sys/dev/ocs_fc/ocs_ddump.h
new file mode 100644
index 000000000000..253a58b18f51
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_ddump.h
@@ -0,0 +1,60 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#if !defined(__OCS_DDUMP_H__)
+#define __OCS_DDUMP_H__
+
+#include "ocs_utils.h"
+
+#define OCS_DDUMP_FLAGS_WQES (1U << 0)
+#define OCS_DDUMP_FLAGS_CQES (1U << 1)
+#define OCS_DDUMP_FLAGS_MQES (1U << 2)
+#define OCS_DDUMP_FLAGS_RQES (1U << 3)
+#define OCS_DDUMP_FLAGS_EQES (1U << 4)
+
+extern int ocs_ddump(ocs_t *ocs, ocs_textbuf_t *textbuf, uint32_t flags, uint32_t qentries);
+
+extern void ocs_ddump_startfile(ocs_textbuf_t *textbuf);
+extern void ocs_ddump_endfile(ocs_textbuf_t *textbuf);
+extern void ocs_ddump_section(ocs_textbuf_t *textbuf, const char *name, uint32_t instance);
+extern void ocs_ddump_endsection(ocs_textbuf_t *textbuf, const char *name, uint32_t instance);
+__attribute__((format(printf,3,4)))
+extern void ocs_ddump_value(ocs_textbuf_t *textbuf, const char *name, const char *fmt, ...);
+extern void ocs_ddump_buffer(ocs_textbuf_t *textbuf, const char *name, uint32_t instance, void *buffer, uint32_t size);
+extern int32_t ocs_save_ddump(ocs_t *ocs, uint32_t flags, uint32_t qentries);
+extern int32_t ocs_get_saved_ddump(ocs_t *ocs, ocs_textbuf_t *textbuf);
+extern int32_t ocs_save_ddump_all(uint32_t flags, uint32_t qentries, uint32_t alloc_flag);
+extern int32_t ocs_clear_saved_ddump(ocs_t *ocs);
+extern void ocs_ddump_queue_entries(ocs_textbuf_t *textbuf, void *q_addr, uint32_t size, uint32_t length, int32_t index, uint32_t qentries);
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_device.c b/sys/dev/ocs_fc/ocs_device.c
new file mode 100644
index 000000000000..a157ba801d22
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_device.c
@@ -0,0 +1,1929 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Implement remote device state machine for target and initiator.
+ */
+
+/*!
+@defgroup device_sm Node State Machine: Remote Device States
+*/
+
+#include "ocs.h"
+#include "ocs_device.h"
+#include "ocs_fabric.h"
+#include "ocs_els.h"
+
+static void *__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+static void *__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+static void *__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+static int32_t ocs_process_abts(ocs_io_t *io, fc_header_t *hdr);
+
+/**
+ * @ingroup device_sm
+ * @brief Send response to PRLI.
+ *
+ * <h3 class="desc">Description</h3>
+ * For device nodes, this function sends a PRLI response.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id OX_ID of PRLI
+ *
+ * @return Returns None.
+ */
+
+void
+ocs_d_send_prli_rsp(ocs_io_t *io, uint16_t ox_id)
+{
+ ocs_t *ocs = io->ocs;
+ ocs_node_t *node = io->node;
+
+ /* If the back-end doesn't support the fc-type, we send an LS_RJT */
+ if (ocs->fc_type != node->fc_type) {
+ node_printf(node, "PRLI rejected by target-server, fc-type not supported\n");
+ ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
+ FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ }
+
+ /* If the back-end doesn't want to talk to this initiator, we send an LS_RJT */
+ if (node->sport->enable_tgt && (ocs_scsi_validate_initiator(node) == 0)) {
+ node_printf(node, "PRLI rejected by target-server\n");
+ ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
+ FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL);
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ } else {
+ /*sm: process PRLI payload, send PRLI acc */
+ ocs_send_prli_acc(io, ox_id, ocs->fc_type, NULL, NULL);
+
+ /* Immediately go to ready state to avoid window where we're
+ * waiting for the PRLI LS_ACC to complete while holding FCP_CMNDs
+ */
+ ocs_node_transition(node, __ocs_d_device_ready, NULL);
+ }
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Initiate node shutdown
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_initiate_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ int32_t rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
+
+ ocs_scsi_io_alloc_disable(node);
+
+ /* make necessary delete upcall(s) */
+ if (node->init && !node->targ) {
+ ocs_log_debug(node->ocs,
+ "[%s] delete (initiator) WWPN %s WWNN %s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
+ if (node->sport->enable_tgt) {
+ rc = ocs_scsi_del_initiator(node,
+ OCS_SCSI_INITIATOR_DELETED);
+ }
+ if (rc == OCS_SCSI_CALL_COMPLETE) {
+ ocs_node_post_event(node,
+ OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ }
+ } else if (node->targ && !node->init) {
+ ocs_log_debug(node->ocs,
+ "[%s] delete (target) WWPN %s WWNN %s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
+ if (node->sport->enable_ini) {
+ rc = ocs_scsi_del_target(node,
+ OCS_SCSI_TARGET_DELETED);
+ }
+ if (rc == OCS_SCSI_CALL_COMPLETE) {
+ ocs_node_post_event(node,
+ OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ }
+ } else if (node->init && node->targ) {
+ ocs_log_debug(node->ocs,
+ "[%s] delete (initiator+target) WWPN %s WWNN %s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ ocs_node_transition(node, __ocs_d_wait_del_ini_tgt, NULL);
+ if (node->sport->enable_tgt) {
+ rc = ocs_scsi_del_initiator(node,
+ OCS_SCSI_INITIATOR_DELETED);
+ }
+ if (rc == OCS_SCSI_CALL_COMPLETE) {
+ ocs_node_post_event(node,
+ OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ }
+ rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
+ if (node->sport->enable_ini) {
+ rc = ocs_scsi_del_target(node,
+ OCS_SCSI_TARGET_DELETED);
+ }
+ if (rc == OCS_SCSI_CALL_COMPLETE) {
+ ocs_node_post_event(node,
+ OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ }
+ }
+
+ /* we've initiated the upcalls as needed, now kick off the node
+ * detach to precipitate the aborting of outstanding exchanges
+ * associated with said node
+ *
+ * Beware: if we've made upcall(s), we've already transitioned
+ * to a new state by the time we execute this.
+ * TODO: consider doing this before the upcalls...
+ */
+ if (node->attached) {
+ /* issue hw node free; don't care if succeeds right away
+ * or sometime later, will check node->attached later in
+ * shutdown process
+ */
+ rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
+ if (node->rnode.free_group) {
+ ocs_remote_node_group_free(node->node_group);
+ node->node_group = NULL;
+ node->rnode.free_group = FALSE;
+ }
+ if (rc != OCS_HW_RTN_SUCCESS &&
+ rc != OCS_HW_RTN_SUCCESS_SYNC) {
+ node_printf(node,
+ "Failed freeing HW node, rc=%d\n", rc);
+ }
+ }
+
+ /* if neither initiator nor target, proceed to cleanup */
+ if (!node->init && !node->targ){
+ /*
+ * node has either been detached or is in the process
+ * of being detached, call common node's initiate
+ * cleanup function.
+ */
+ ocs_node_initiate_cleanup(node);
+ }
+ break;
+ }
+ case OCS_EVT_ALL_CHILD_NODES_FREE:
+ /* Ignore, this can happen if an ELS is aborted,
+ * while in a delay/retry state */
+ break;
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Common device event handler.
+ *
+ * <h3 class="desc">Description</h3>
+ * For device nodes, this event handler manages default and common events.
+ *
+ * @param funcname Function name text.
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+static void *
+__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_t *node = NULL;
+ ocs_t *ocs = NULL;
+ ocs_assert(ctx, NULL);
+ node = ctx->app;
+ ocs_assert(node, NULL);
+ ocs = node->ocs;
+ ocs_assert(ocs, NULL);
+
+ switch(evt) {
+
+ /* Handle shutdown events */
+ case OCS_EVT_SHUTDOWN:
+ ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ break;
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ break;
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __ocs_node_common(funcname, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for a domain-attach completion in loop topology.
+ *
+ * <h3 class="desc">Description</h3>
+ * State waits for a domain-attached completion while in loop topology.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_loop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_DOMAIN_ATTACH_OK: {
+ /* send PLOGI automatically if initiator */
+ ocs_node_init_device(node, TRUE);
+ break;
+ }
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+
+
+
+/**
+ * @ingroup device_sm
+ * @brief state: wait for node resume event
+ *
+ * State is entered when a node is in I+T mode and sends a delete initiator/target
+ * call to the target-server/initiator-client and needs to wait for that work to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg per event optional argument
+ *
+ * @return returns NULL
+ */
+
+void *
+__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ /* Fall through */
+
+ case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case OCS_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case OCS_EVT_NODE_DEL_INI_COMPLETE:
+ case OCS_EVT_NODE_DEL_TGT_COMPLETE:
+ ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case OCS_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ /* fall through */
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ break;
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @ingroup device_sm
+ * @brief state: Wait for node resume event.
+ *
+ * State is entered when a node sends a delete initiator/target call to the
+ * target-server/initiator-client and needs to wait for that work to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ /* Fall through */
+
+ case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case OCS_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case OCS_EVT_NODE_DEL_INI_COMPLETE:
+ case OCS_EVT_NODE_DEL_TGT_COMPLETE:
+ /*
+ * node has either been detached or is in the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ ocs_node_initiate_cleanup(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case OCS_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ /* fall through */
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ break;
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Save the OX_ID for sending LS_ACC sometime later.
+ *
+ * <h3 class="desc">Description</h3>
+ * When deferring the response to an ELS request, the OX_ID of the request
+ * is saved using this function.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param hdr Pointer to the FC header.
+ * @param ls Defines the type of ELS to send: LS_ACC, LS_ACC for PLOGI;
+ * or LSS_ACC for PRLI.
+ *
+ * @return None.
+ */
+
+void
+ocs_send_ls_acc_after_attach(ocs_io_t *io, fc_header_t *hdr, ocs_node_send_ls_acc_e ls)
+{
+ ocs_node_t *node = io->node;
+ uint16_t ox_id = ocs_be16toh(hdr->ox_id);
+
+ ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_NONE);
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = ls;
+ node->ls_acc_io = io;
+ node->ls_acc_did = fc_be24toh(hdr->d_id);
+}
+
+/**
+ * @brief Process the PRLI payload.
+ *
+ * <h3 class="desc">Description</h3>
+ * The PRLI payload is processed; the initiator/target capabilities of the
+ * remote node are extracted and saved in the node object.
+ *
+ * @param node Pointer to the node object.
+ * @param prli Pointer to the PRLI payload.
+ *
+ * @return None.
+ */
+
+void
+ocs_process_prli_payload(ocs_node_t *node, fc_prli_payload_t *prli)
+{
+ node->init = (ocs_be16toh(prli->service_params) & FC_PRLI_INITIATOR_FUNCTION) != 0;
+ node->targ = (ocs_be16toh(prli->service_params) & FC_PRLI_TARGET_FUNCTION) != 0;
+ node->fcp2device = (ocs_be16toh(prli->service_params) & FC_PRLI_RETRY) != 0;
+ node->fc_type = prli->type;
+}
+
+/**
+ * @brief Process the ABTS.
+ *
+ * <h3 class="desc">Description</h3>
+ * Common code to process a received ABTS. If an active IO can be found
+ * that matches the OX_ID of the ABTS request, a call is made to the
+ * backend. Otherwise, a BA_ACC is returned to the initiator.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param hdr Pointer to the FC header.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+static int32_t
+ocs_process_abts(ocs_io_t *io, fc_header_t *hdr)
+{
+ ocs_node_t *node = io->node;
+ ocs_t *ocs = node->ocs;
+ uint16_t ox_id = ocs_be16toh(hdr->ox_id);
+ uint16_t rx_id = ocs_be16toh(hdr->rx_id);
+ ocs_io_t *abortio;
+
+ abortio = ocs_io_find_tgt_io(ocs, node, ox_id, rx_id);
+
+ /* If an IO was found, attempt to take a reference on it */
+ if (abortio != NULL && (ocs_ref_get_unless_zero(&abortio->ref) != 0)) {
+
+ /* Got a reference on the IO. Hold it until backend is notified below */
+ node_printf(node, "Abort request: ox_id [%04x] rx_id [%04x]\n",
+ ox_id, rx_id);
+
+ /*
+ * Save the ox_id for the ABTS as the init_task_tag in our manufactured
+ * TMF IO object
+ */
+ io->display_name = "abts";
+ io->init_task_tag = ox_id;
+ /* don't set tgt_task_tag, don't want to confuse with XRI */
+
+ /*
+ * Save the rx_id from the ABTS as it is needed for the BLS response,
+ * regardless of the IO context's rx_id
+ */
+ io->abort_rx_id = rx_id;
+
+ /* Call target server command abort */
+ io->tmf_cmd = OCS_SCSI_TMF_ABORT_TASK;
+ ocs_scsi_recv_tmf(io, abortio->tgt_io.lun, OCS_SCSI_TMF_ABORT_TASK, abortio, 0);
+
+ /*
+ * Backend will have taken an additional reference on the IO if needed;
+ * done with current reference.
+ */
+ ocs_ref_put(&abortio->ref); /* ocs_ref_get(): same function */
+ } else {
+ /*
+ * Either IO was not found or it has been freed between finding it
+ * and attempting to get the reference,
+ */
+ node_printf(node, "Abort request: ox_id [%04x], IO not found (exists=%d)\n",
+ ox_id, (abortio != NULL));
+
+ /* Send a BA_ACC */
+ ocs_bls_send_acc_hdr(io, hdr);
+ }
+ return 0;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for the PLOGI accept to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_FAIL:
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
+ break;
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for the LOGO response.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_logo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* TODO: may want to remove this;
+ * if we'll want to know about PLOGI */
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ /* LOGO response received, sent shutdown */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_LOGO, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ node_printf(node, "LOGO sent (evt=%s), shutdown node\n", ocs_sm_event_name(evt));
+ /* sm: post explicit logout */
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+
+ /* TODO: PLOGI: abort LOGO and process PLOGI? (SHUTDOWN_EXPLICIT/IMPLICIT_LOGO?) */
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for the PRLO response.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_prlo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLO, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ node_printf(node, "PRLO sent (evt=%s)\n", ocs_sm_event_name(evt));
+ ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
+ break;
+
+ default:
+ __ocs_node_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+
+/**
+ * @brief Initialize device node.
+ *
+ * Initialize device node. If a node is an initiator, then send a PLOGI and transition
+ * to __ocs_d_wait_plogi_rsp, otherwise transition to __ocs_d_init.
+ *
+ * @param node Pointer to the node object.
+ * @param send_plogi Boolean indicating to send PLOGI command or not.
+ *
+ * @return none
+ */
+
+void
+ocs_node_init_device(ocs_node_t *node, int send_plogi)
+{
+ node->send_plogi = send_plogi;
+ if ((node->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NEW_NODES) && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)) {
+ node->nodedb_state = __ocs_d_init;
+ ocs_node_transition(node, __ocs_node_paused, NULL);
+ } else {
+ ocs_node_transition(node, __ocs_d_init, NULL);
+ }
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Initial node state for an initiator or a target.
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered when a node is instantiated, either having been
+ * discovered from a name services query, or having received a PLOGI/FLOGI.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ * - OCS_EVT_ENTER: (uint8_t *) - 1 to send a PLOGI on
+ * entry (initiator-only); 0 indicates a PLOGI is
+ * not sent on entry (initiator-only). Not applicable for a target.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* check if we need to send PLOGI */
+ if (node->send_plogi) {
+ /* only send if we have initiator capability, and domain is attached */
+ if (node->sport->enable_ini && node->sport->domain->attached) {
+ ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
+ OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_plogi_rsp, NULL);
+ } else {
+ node_printf(node, "not sending plogi sport.ini=%d, domain attached=%d\n",
+ node->sport->enable_ini, node->sport->domain->attached);
+ }
+ }
+ break;
+ case OCS_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+
+ ocs_node_save_sparms(node, cbdata->payload->dma.virt);
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
+
+ /* domain already attached */
+ if (node->sport->domain->attached) {
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+ }
+
+ /* domain not attached; several possibilities: */
+ switch (node->sport->topology) {
+ case OCS_SPORT_TOPOLOGY_P2P:
+ /* we're not attached and sport is p2p, need to attach */
+ ocs_domain_attach(node->sport->domain, d_id);
+ ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
+ break;
+ case OCS_SPORT_TOPOLOGY_FABRIC:
+ /* we're not attached and sport is fabric, domain attach should have
+ * already been requested as part of the fabric state machine, wait for it
+ */
+ ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
+ break;
+ case OCS_SPORT_TOPOLOGY_UNKNOWN:
+ /* Two possibilities:
+ * 1. received a PLOGI before our FLOGI has completed (possible since
+ * completion comes in on another CQ), thus we don't know what we're
+ * connected to yet; transition to a state to wait for the fabric
+ * node to tell us;
+ * 2. PLOGI received before link went down and we haven't performed
+ * domain attach yet.
+ * Note: we cannot distinguish between 1. and 2. so have to assume PLOGI
+ * was received after link back up.
+ */
+ node_printf(node, "received PLOGI, with unknown topology did=0x%x\n", d_id);
+ ocs_node_transition(node, __ocs_d_wait_topology_notify, NULL);
+ break;
+ default:
+ node_printf(node, "received PLOGI, with unexpectd topology %d\n",
+ node->sport->topology);
+ ocs_assert(FALSE, NULL);
+ break;
+ }
+ break;
+ }
+
+ case OCS_EVT_FDISC_RCVD: {
+ __ocs_d_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ case OCS_EVT_FLOGI_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+
+ /* this better be coming from an NPort */
+ ocs_assert(ocs_rnode_is_nport(cbdata->payload->dma.virt), NULL);
+
+ /* sm: save sparams, send FLOGI acc */
+ ocs_domain_save_sparms(node->sport->domain, cbdata->payload->dma.virt);
+
+ /* send FC LS_ACC response, override s_id */
+ ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
+ ocs_send_flogi_p2p_acc(cbdata->io, ocs_be16toh(hdr->ox_id), fc_be24toh(hdr->d_id), NULL, NULL);
+ if (ocs_p2p_setup(node->sport)) {
+ node_printf(node, "p2p setup failed, shutting down node\n");
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
+ } else {
+ ocs_node_transition(node, __ocs_p2p_wait_flogi_acc_cmpl, NULL);
+ }
+
+ break;
+ }
+
+ case OCS_EVT_LOGO_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+
+ if (!node->sport->domain->attached) {
+ /* most likely a frame left over from before a link down; drop and
+ * shut node down w/ "explicit logout" so pending frames are processed */
+ node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ }
+ ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case OCS_EVT_PRLI_RCVD:
+ case OCS_EVT_PRLO_RCVD:
+ case OCS_EVT_PDISC_RCVD:
+ case OCS_EVT_ADISC_RCVD:
+ case OCS_EVT_RSCN_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ if (!node->sport->domain->attached) {
+ /* most likely a frame left over from before a link down; drop and
+ * shut node down w/ "explicit logout" so pending frames are processed */
+ node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ }
+ node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
+ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
+ FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
+ NULL, NULL);
+
+ break;
+ }
+
+ case OCS_EVT_FCP_CMD_RCVD: {
+ /* note: problem, we're now expecting an ELS REQ completion
+ * from both the LOGO and PLOGI */
+ if (!node->sport->domain->attached) {
+ /* most likely a frame left over from before a link down; drop and
+ * shut node down w/ "explicit logout" so pending frames are processed */
+ node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ }
+
+ /* Send LOGO */
+ node_printf(node, "FCP_CMND received, send LOGO\n");
+ if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 0, NULL, NULL) == NULL) {
+ /* failed to send LOGO, go ahead and cleanup node anyways */
+ node_printf(node, "Failed to send LOGO\n");
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ } else {
+ /* sent LOGO, wait for response */
+ ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
+ }
+ break;
+ }
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait on a response for a sent PLOGI.
+ *
+ * <h3 class="desc">Description</h3>
+ * State is entered when an initiator-capable node has sent
+ * a PLOGI and is waiting for a response.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ /* received PLOGI with svc parms, go ahead and attach node
+ * when PLOGI that was sent ultimately completes, it'll be a no-op
+ */
+
+ /* TODO: there is an outstanding PLOGI sent, can we set a flag
+ * to indicate that we don't want to retry it if it times out? */
+ ocs_node_save_sparms(node, cbdata->payload->dma.virt);
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
+ /* sm: domain->attached / ocs_node_attach */
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+ }
+
+ case OCS_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ ocs_process_prli_payload(node, cbdata->payload->dma.virt);
+ if (ocs->fc_type == node->fc_type) {
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
+ ocs_node_transition(node, __ocs_d_wait_plogi_rsp_recvd_prli, NULL);
+ } else {
+ /* TODO this need to be looked at. What do we do here ? */
+ }
+ break;
+
+ /* TODO this need to be looked at. we could very well be logged in */
+ case OCS_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
+ case OCS_EVT_PRLO_RCVD:
+ case OCS_EVT_PDISC_RCVD:
+ case OCS_EVT_FDISC_RCVD:
+ case OCS_EVT_ADISC_RCVD:
+ case OCS_EVT_RSCN_RCVD:
+ case OCS_EVT_SCR_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
+ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
+ FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
+ NULL, NULL);
+
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: save sparams, ocs_node_attach */
+ ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
+ ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
+ ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ /* PLOGI failed, shutdown the node */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_RJT: /* Our PLOGI was rejected, this is ok in some cases */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ break;
+
+ case OCS_EVT_FCP_CMD_RCVD: {
+ /* not logged in yet and outstanding PLOGI so don't send LOGO,
+ * just drop
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Waiting on a response for a
+ * sent PLOGI.
+ *
+ * <h3 class="desc">Description</h3>
+ * State is entered when an initiator-capable node has sent
+ * a PLOGI and is waiting for a response. Before receiving the
+ * response, a PRLI was received, implying that the PLOGI was
+ * successful.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have accepted).
+ * At this time, we are not waiting on any other unsolicited
+ * frames to continue with the login process. Thus, it will not
+ * hurt to hold frames here.
+ */
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: save sparams, ocs_node_attach */
+ ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
+ ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
+ ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
+ break;
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for a domain attach.
+ *
+ * <h3 class="desc">Description</h3>
+ * Waits for a domain-attach complete ok event.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ ocs_assert(node->sport->domain->attached, NULL);
+ /* sm: ocs_node_attach */
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for topology
+ * notification
+ *
+ * <h3 class="desc">Description</h3>
+ * Waits for topology notification from fabric node, then
+ * attaches domain and node.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_topology_notify(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: {
+ ocs_sport_topology_e topology = (ocs_sport_topology_e)arg;
+ ocs_assert(!node->sport->domain->attached, NULL);
+ ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
+ node_printf(node, "topology notification, topology=%d\n", topology);
+
+ /* At the time the PLOGI was received, the topology was unknown,
+ * so we didn't know which node would perform the domain attach:
+ * 1. The node from which the PLOGI was sent (p2p) or
+ * 2. The node to which the FLOGI was sent (fabric).
+ */
+ if (topology == OCS_SPORT_TOPOLOGY_P2P) {
+ /* if this is p2p, need to attach to the domain using the
+ * d_id from the PLOGI received
+ */
+ ocs_domain_attach(node->sport->domain, node->ls_acc_did);
+ }
+ /* else, if this is fabric, the domain attach should be performed
+ * by the fabric node (node sending FLOGI); just wait for attach
+ * to complete
+ */
+
+ ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
+ break;
+ }
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ ocs_assert(node->sport->domain->attached, NULL);
+ node_printf(node, "domain attach ok\n");
+ /*sm: ocs_node_attach */
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for a node attach when found by a remote node.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ switch (node->send_ls_acc) {
+ case OCS_NODE_SEND_LS_ACC_PLOGI: {
+ /* sm: send_plogi_acc is set / send PLOGI acc */
+ /* Normal case for T, or I+T */
+ ocs_send_plogi_acc(node->ls_acc_io, node->ls_acc_oxid, NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_plogi_acc_cmpl, NULL);
+ node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case OCS_NODE_SEND_LS_ACC_PRLI: {
+ ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
+ node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case OCS_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
+ break;
+ }
+ break;
+
+ case OCS_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = FALSE;
+ node_printf(node, "node attach failed\n");
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ break;
+
+ /* Handle shutdown events */
+ case OCS_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
+ break;
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
+ break;
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
+ break;
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for a node/domain
+ * attach then shutdown node.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = FALSE;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
+ ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case OCS_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ /* fall through */
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ break;
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Port is logged in.
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered when a remote port has completed port login (PLOGI).
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process
+ * @param arg Per event optional argument
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_d_port_logged_in(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ /* TODO: I+T: what if PLOGI response not yet received ? */
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* Normal case for I or I+T */
+ if (node->sport->enable_ini && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)
+ && !node->sent_prli) {
+ /* sm: if enable_ini / send PRLI */
+ ocs_send_prli(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ node->sent_prli = TRUE;
+ /* can now expect ELS_REQ_OK/FAIL/RJT */
+ }
+ break;
+
+ case OCS_EVT_FCP_CMD_RCVD: {
+ /* For target functionality send PRLO and drop the CMD frame. */
+ if (node->sport->enable_tgt) {
+ if (ocs_send_prlo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
+ OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL)) {
+ ocs_node_transition(node, __ocs_d_wait_prlo_rsp, NULL);
+ }
+ }
+ break;
+ }
+
+ case OCS_EVT_PRLI_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+
+ /* Normal for T or I+T */
+
+ ocs_process_prli_payload(node, cbdata->payload->dma.virt);
+ ocs_d_send_prli_rsp(cbdata->io, ocs_be16toh(hdr->ox_id));
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
+ /* Normal case for I or I+T */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: process PRLI payload */
+ ocs_process_prli_payload(node, cbdata->els->els_rsp.virt);
+ ocs_node_transition(node, __ocs_d_device_ready, NULL);
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
+ /* I, I+T, assume some link failure, shutdown node */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_RJT: {/* PRLI rejected by remote */
+ /* Normal for I, I+T (connected to an I) */
+ /* Node doesn't want to be a target, stay here and wait for a PRLI from the remote node
+ * if it really wants to connect to us as target */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_CMPL_OK: {
+ /* Normal T, I+T, target-server rejected the process login */
+ /* This would be received only in the case where we sent LS_RJT for the PRLI, so
+ * do nothing. (note: as T only we could shutdown the node)
+ */
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ break;
+ }
+
+ case OCS_EVT_PLOGI_RCVD: {
+ /* sm: save sparams, set send_plogi_acc, post implicit logout
+ * Save plogi parameters */
+ ocs_node_save_sparms(node, cbdata->payload->dma.virt);
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
+
+ /* Restart node attach with new service parameters, and send ACC */
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
+ break;
+ }
+
+ case OCS_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
+ ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for a LOGO accept.
+ *
+ * <h3 class="desc">Description</h3>
+ * Waits for a LOGO accept completion.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process
+ * @param arg Per event optional argument
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_OK:
+ case OCS_EVT_SRRS_ELS_CMPL_FAIL:
+ /* sm: / post explicit logout */
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Device is ready.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_device_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ if (evt != OCS_EVT_FCP_CMD_RCVD) {
+ node_sm_trace();
+ }
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ node->fcp_enabled = TRUE;
+ if (node->init) {
+ device_printf(ocs->dev, "[%s] found (initiator) WWPN %s WWNN %s\n", node->display_name,
+ node->wwpn, node->wwnn);
+ if (node->sport->enable_tgt)
+ ocs_scsi_new_initiator(node);
+ }
+ if (node->targ) {
+ device_printf(ocs->dev, "[%s] found (target) WWPN %s WWNN %s\n", node->display_name,
+ node->wwpn, node->wwnn);
+ if (node->sport->enable_ini)
+ ocs_scsi_new_target(node);
+ }
+ break;
+
+ case OCS_EVT_EXIT:
+ node->fcp_enabled = FALSE;
+ break;
+
+ case OCS_EVT_PLOGI_RCVD: {
+ /* sm: save sparams, set send_plogi_acc, post implicit logout
+ * Save plogi parameters */
+ ocs_node_save_sparms(node, cbdata->payload->dma.virt);
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
+
+ /* Restart node attach with new service parameters, and send ACC */
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
+ break;
+ }
+
+
+ case OCS_EVT_PDISC_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ break;
+ }
+
+ case OCS_EVT_PRLI_RCVD: {
+ /* T, I+T: remote initiator is slow to get started */
+ fc_header_t *hdr = cbdata->header->dma.virt;
+
+ ocs_process_prli_payload(node, cbdata->payload->dma.virt);
+
+ /* sm: send PRLI acc/reject */
+ if (ocs->fc_type == node->fc_type)
+ ocs_send_prli_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
+ else
+ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
+ FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
+ break;
+ }
+
+ case OCS_EVT_PRLO_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ fc_prlo_payload_t *prlo = cbdata->payload->dma.virt;
+
+ /* sm: send PRLO acc/reject */
+ if (ocs->fc_type == prlo->type)
+ ocs_send_prlo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
+ else
+ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
+ FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
+ /*TODO: need implicit logout */
+ break;
+ }
+
+ case OCS_EVT_LOGO_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
+ ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case OCS_EVT_ADISC_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ ocs_send_adisc_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ break;
+ }
+
+ case OCS_EVT_RRQ_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ /* Send LS_ACC */
+ ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ break;
+ }
+
+ case OCS_EVT_ABTS_RCVD:
+ ocs_process_abts(cbdata->io, cbdata->header->dma.virt);
+ break;
+
+ case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ break;
+
+ case OCS_EVT_NODE_REFOUND:
+ break;
+
+ case OCS_EVT_NODE_MISSING:
+ if (node->sport->enable_rscn) {
+ ocs_node_transition(node, __ocs_d_device_gone, NULL);
+ }
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_OK:
+ /* T, or I+T, PRLI accept completed ok */
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_FAIL:
+ /* T, or I+T, PRLI accept failed to complete */
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ node_printf(node, "Failed to send PRLI LS_ACC\n");
+ break;
+
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Node is gone (absent from GID_PT).
+ *
+ * <h3 class="desc">Description</h3>
+ * State entered when a node is detected as being gone (absent from GID_PT).
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process
+ * @param arg Per event optional argument
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_device_gone(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc = OCS_SCSI_CALL_COMPLETE;
+ int32_t rc_2 = OCS_SCSI_CALL_COMPLETE;
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ const char *labels[] = {"none", "initiator", "target", "initiator+target"};
+
+ device_printf(ocs->dev, "[%s] missing (%s) WWPN %s WWNN %s\n", node->display_name,
+ labels[(node->targ << 1) | (node->init)], node->wwpn, node->wwnn);
+
+ switch(ocs_node_get_enable(node)) {
+ case OCS_NODE_ENABLE_T_TO_T:
+ case OCS_NODE_ENABLE_I_TO_T:
+ case OCS_NODE_ENABLE_IT_TO_T:
+ rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
+ break;
+
+ case OCS_NODE_ENABLE_T_TO_I:
+ case OCS_NODE_ENABLE_I_TO_I:
+ case OCS_NODE_ENABLE_IT_TO_I:
+ rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
+ break;
+
+ case OCS_NODE_ENABLE_T_TO_IT:
+ rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
+ break;
+
+ case OCS_NODE_ENABLE_I_TO_IT:
+ rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
+ break;
+
+ case OCS_NODE_ENABLE_IT_TO_IT:
+ rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
+ rc_2 = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
+ break;
+
+ default:
+ rc = OCS_SCSI_CALL_COMPLETE;
+ break;
+
+ }
+
+ if ((rc == OCS_SCSI_CALL_COMPLETE) && (rc_2 == OCS_SCSI_CALL_COMPLETE)) {
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
+ }
+
+ break;
+ }
+ case OCS_EVT_NODE_REFOUND:
+ /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
+
+ /* reauthenticate with PLOGI/PRLI */
+ /* ocs_node_transition(node, __ocs_d_discovered, NULL); */
+
+ /* reauthenticate with ADISC
+ * sm: send ADISC */
+ ocs_send_adisc(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_adisc_rsp, NULL);
+ break;
+
+ case OCS_EVT_PLOGI_RCVD: {
+ /* sm: save sparams, set send_plogi_acc, post implicit logout
+ * Save plogi parameters */
+ ocs_node_save_sparms(node, cbdata->payload->dma.virt);
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
+
+ /* Restart node attach with new service parameters, and send ACC */
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
+ break;
+ }
+
+ case OCS_EVT_FCP_CMD_RCVD: {
+ /* most likely a stale frame (received prior to link down), if attempt
+ * to send LOGO, will probably timeout and eat up 20s; thus, drop FCP_CMND
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+ case OCS_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
+ /* sm: send LOGO acc */
+ ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup device_sm
+ * @brief Device node state machine: Wait for the ADISC response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Waits for the ADISC response from the remote node.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_d_wait_adisc_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_node_transition(node, __ocs_d_device_ready, NULL);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ /* received an LS_RJT, in this case, send shutdown (explicit logo)
+ * event which will unregister the node, and start over with PLOGI
+ */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /*sm: post explicit logout */
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+
+ case OCS_EVT_LOGO_RCVD: {
+ /* In this case, we have the equivalent of an LS_RJT for the ADISC,
+ * so we need to abort the ADISC, and re-login with PLOGI
+ */
+ /*sm: request abort, send LOGO acc */
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
+ ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __ocs_d_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
diff --git a/sys/dev/ocs_fc/ocs_device.h b/sys/dev/ocs_fc/ocs_device.h
new file mode 100644
index 000000000000..337737aeb944
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_device.h
@@ -0,0 +1,133 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Node state machine functions for remote device node sm
+ */
+
+#if !defined(__OCS_DEVICE_H__)
+#define __OCS_DEVICE_H__
+
+/***************************************************************************
+ * Receive queue configuration
+ */
+
+#ifndef OCS_FC_RQ_SIZE_DEFAULT
+#define OCS_FC_RQ_SIZE_DEFAULT 1024
+#endif
+
+
+/***************************************************************************
+ * IO Configuration
+ */
+
+/**
+ * @brief Defines the number of SGLs allocated on each IO object
+ */
+#ifndef OCS_FC_MAX_SGL
+#define OCS_FC_MAX_SGL 128
+#endif
+
+
+/***************************************************************************
+ * DIF Configuration
+ */
+
+/**
+ * @brief Defines the DIF seed value used for the CRC calculation.
+ */
+#ifndef OCS_FC_DIF_SEED
+#define OCS_FC_DIF_SEED 0
+#endif
+
+/***************************************************************************
+ * Timeouts
+ */
+#ifndef OCS_FC_ELS_SEND_DEFAULT_TIMEOUT
+#define OCS_FC_ELS_SEND_DEFAULT_TIMEOUT 0
+#endif
+
+#ifndef OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT
+#define OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT 5
+#endif
+
+#ifndef OCS_FC_ELS_DEFAULT_RETRIES
+#define OCS_FC_ELS_DEFAULT_RETRIES 3
+#endif
+
+#ifndef OCS_FC_FLOGI_TIMEOUT_SEC
+#define OCS_FC_FLOGI_TIMEOUT_SEC 5 /* shorter than default */
+#endif
+
+#ifndef OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC
+#define OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC 30000000 /* 30 seconds */
+#endif
+
+/***************************************************************************
+ * Watermark
+ */
+#ifndef OCS_WATERMARK_HIGH_PCT
+#define OCS_WATERMARK_HIGH_PCT 90
+#endif
+#ifndef OCS_WATERMARK_LOW_PCT
+#define OCS_WATERMARK_LOW_PCT 80
+#endif
+#ifndef OCS_IO_WATERMARK_PER_INITIATOR
+#define OCS_IO_WATERMARK_PER_INITIATOR 8
+#endif
+
+extern void ocs_node_init_device(ocs_node_t *node, int send_plogi);
+extern void ocs_process_prli_payload(ocs_node_t *node, fc_prli_payload_t *prli);
+extern void ocs_d_send_prli_rsp(ocs_io_t *io, uint16_t ox_id);
+extern void ocs_send_ls_acc_after_attach(ocs_io_t *io, fc_header_t *hdr, ocs_node_send_ls_acc_e ls);
+
+extern void*__ocs_d_wait_loop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_topology_notify(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_initiate_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_port_logged_in(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_device_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_device_gone(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_adisc_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_logo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void*__ocs_d_wait_prlo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+#endif /* __OCS_DEVICE_H__ */
diff --git a/sys/dev/ocs_fc/ocs_domain.c b/sys/dev/ocs_fc/ocs_domain.c
new file mode 100644
index 000000000000..fe5489343d68
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_domain.c
@@ -0,0 +1,1585 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Handles the domain object callback from the HW.
+ */
+
+/*!
+@defgroup domain_sm Domain State Machine: States
+*/
+
+#include "ocs.h"
+
+#include "ocs_fabric.h"
+#include "ocs_device.h"
+
+#define domain_sm_trace(domain) \
+ do { \
+ if (OCS_LOG_ENABLE_DOMAIN_SM_TRACE(domain->ocs)) \
+ ocs_log_info(domain->ocs, "[domain] %-20s %-20s\n", __func__, ocs_sm_event_name(evt)); \
+ } while (0)
+
+#define domain_trace(domain, fmt, ...) \
+ do { \
+ if (OCS_LOG_ENABLE_DOMAIN_SM_TRACE(domain ? domain->ocs : NULL)) \
+ ocs_log_info(domain ? domain->ocs : NULL, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define domain_printf(domain, fmt, ...) \
+ do { \
+ ocs_log_info(domain ? domain->ocs : NULL, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+void ocs_mgmt_domain_list(ocs_textbuf_t *textbuf, void *domain);
+void ocs_mgmt_domain_get_all(ocs_textbuf_t *textbuf, void *domain);
+int ocs_mgmt_domain_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *domain);
+int ocs_mgmt_domain_set(char *parent, char *name, char *value, void *domain);
+int ocs_mgmt_domain_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length, void *domain);
+
+static ocs_mgmt_functions_t domain_mgmt_functions = {
+ .get_list_handler = ocs_mgmt_domain_list,
+ .get_handler = ocs_mgmt_domain_get,
+ .get_all_handler = ocs_mgmt_domain_get_all,
+ .set_handler = ocs_mgmt_domain_set,
+ .exec_handler = ocs_mgmt_domain_exec,
+};
+
+
+
+/**
+ * @brief Accept domain callback events from the HW.
+ *
+ * <h3 class="desc">Description</h3>
+ * HW calls this function with various domain-related events.
+ *
+ * @param arg Application-specified argument.
+ * @param event Domain event.
+ * @param data Event specific data.
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+int32_t
+ocs_domain_cb(void *arg, ocs_hw_domain_event_e event, void *data)
+{
+ ocs_t *ocs = arg;
+ ocs_domain_t *domain = NULL;
+ int32_t rc = 0;
+
+ ocs_assert(data, -1);
+
+ if (event != OCS_HW_DOMAIN_FOUND) {
+ domain = data;
+ }
+
+ switch (event) {
+ case OCS_HW_DOMAIN_FOUND: {
+ uint64_t fcf_wwn = 0;
+ ocs_domain_record_t *drec = data;
+ ocs_assert(drec, -1);
+
+ /* extract the fcf_wwn */
+ fcf_wwn = ocs_be64toh(*((uint64_t*)drec->wwn));
+
+ /* lookup domain, or allocate a new one if one doesn't exist already */
+ domain = ocs_domain_find(ocs, fcf_wwn);
+ if (domain == NULL) {
+ domain = ocs_domain_alloc(ocs, fcf_wwn);
+ if (domain == NULL) {
+ ocs_log_err(ocs, "ocs_domain_alloc() failed\n");
+ rc = -1;
+ break;
+ }
+ ocs_sm_transition(&domain->drvsm, __ocs_domain_init, NULL);
+ }
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_FOUND, drec);
+ break;
+ }
+
+ case OCS_HW_DOMAIN_LOST:
+ domain_trace(domain, "OCS_HW_DOMAIN_LOST:\n");
+ ocs_domain_hold_frames(domain);
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_LOST, NULL);
+ break;
+
+ case OCS_HW_DOMAIN_ALLOC_OK: {
+ domain_trace(domain, "OCS_HW_DOMAIN_ALLOC_OK:\n");
+ domain->instance_index = 0;
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_ALLOC_OK, NULL);
+ break;
+ }
+
+ case OCS_HW_DOMAIN_ALLOC_FAIL:
+ domain_trace(domain, "OCS_HW_DOMAIN_ALLOC_FAIL:\n");
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_ALLOC_FAIL, NULL);
+ break;
+
+ case OCS_HW_DOMAIN_ATTACH_OK:
+ domain_trace(domain, "OCS_HW_DOMAIN_ATTACH_OK:\n");
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
+ break;
+
+ case OCS_HW_DOMAIN_ATTACH_FAIL:
+ domain_trace(domain, "OCS_HW_DOMAIN_ATTACH_FAIL:\n");
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_ATTACH_FAIL, NULL);
+ break;
+
+ case OCS_HW_DOMAIN_FREE_OK:
+ domain_trace(domain, "OCS_HW_DOMAIN_FREE_OK:\n");
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_FREE_OK, NULL);
+ break;
+
+ case OCS_HW_DOMAIN_FREE_FAIL:
+ domain_trace(domain, "OCS_HW_DOMAIN_FREE_FAIL:\n");
+ ocs_domain_post_event(domain, OCS_EVT_DOMAIN_FREE_FAIL, NULL);
+ break;
+
+ default:
+ ocs_log_warn(ocs, "unsupported event %#x\n", event);
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief Find the domain, given its FCF_WWN.
+ *
+ * <h3 class="desc">Description</h3>
+ * Search the domain_list to find a matching domain object.
+ *
+ * @param ocs Pointer to the OCS device.
+ * @param fcf_wwn FCF WWN to find.
+ *
+ * @return Returns the pointer to the domain if found; or NULL otherwise.
+ */
+
+ocs_domain_t *
+ocs_domain_find(ocs_t *ocs, uint64_t fcf_wwn)
+{
+ ocs_domain_t *domain = NULL;
+
+ /* Check to see if this domain is already allocated */
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if (fcf_wwn == domain->fcf_wwn) {
+ break;
+ }
+ }
+ ocs_device_unlock(ocs);
+ return domain;
+}
+
+/**
+ * @brief Allocate a domain object.
+ *
+ * <h3 class="desc">Description</h3>
+ * A domain object is allocated and initialized. It is associated with the
+ * \c ocs argument.
+ *
+ * @param ocs Pointer to the OCS device.
+ * @param fcf_wwn FCF WWN of the domain.
+ *
+ * @return Returns a pointer to the ocs_domain_t object; or NULL.
+ */
+
+ocs_domain_t *
+ocs_domain_alloc(ocs_t *ocs, uint64_t fcf_wwn)
+{
+ ocs_domain_t *domain;
+
+ ocs_assert(ocs, NULL);
+
+ domain = ocs_malloc(ocs, sizeof(*domain), OCS_M_NOWAIT | OCS_M_ZERO);
+ if (domain) {
+
+ domain->ocs = ocs;
+ domain->instance_index = ocs->domain_instance_count++;
+ domain->drvsm.app = domain;
+ ocs_domain_lock_init(domain);
+ ocs_lock_init(ocs, &domain->lookup_lock, "Domain lookup[%d]", domain->instance_index);
+
+ /* Allocate a sparse vector for sport FC_ID's */
+ domain->lookup = spv_new(ocs);
+ if (domain->lookup == NULL) {
+ ocs_log_err(ocs, "spv_new() failed\n");
+ ocs_free(ocs, domain, sizeof(*domain));
+ return NULL;
+ }
+
+ ocs_list_init(&domain->sport_list, ocs_sport_t, link);
+ domain->fcf_wwn = fcf_wwn;
+ ocs_log_debug(ocs, "Domain allocated: wwn %016" PRIX64 "\n", domain->fcf_wwn);
+ domain->femul_enable = (ocs->ctrlmask & OCS_CTRLMASK_ENABLE_FABRIC_EMULATION) != 0;
+
+ ocs_device_lock(ocs);
+ /* if this is the first domain, then assign it as the "root" domain */
+ if (ocs_list_empty(&ocs->domain_list)) {
+ ocs->domain = domain;
+ }
+ ocs_list_add_tail(&ocs->domain_list, domain);
+ ocs_device_unlock(ocs);
+
+ domain->mgmt_functions = &domain_mgmt_functions;
+ } else {
+ ocs_log_err(ocs, "domain allocation failed\n");
+ }
+
+
+ return domain;
+}
+
+/**
+ * @brief Free a domain object.
+ *
+ * <h3 class="desc">Description</h3>
+ * The domain object is freed.
+ *
+ * @param domain Domain object to free.
+ *
+ * @return None.
+ */
+
+void
+ocs_domain_free(ocs_domain_t *domain)
+{
+ ocs_t *ocs;
+
+ ocs_assert(domain);
+ ocs_assert(domain->ocs);
+
+ /* Hold frames to clear the domain pointer from the xport lookup */
+ ocs_domain_hold_frames(domain);
+
+ ocs = domain->ocs;
+
+ ocs_log_debug(ocs, "Domain free: wwn %016" PRIX64 "\n", domain->fcf_wwn);
+
+ spv_del(domain->lookup);
+ domain->lookup = NULL;
+
+ ocs_device_lock(ocs);
+ ocs_list_remove(&ocs->domain_list, domain);
+ if (domain == ocs->domain) {
+ /* set global domain to the new head */
+ ocs->domain = ocs_list_get_head(&ocs->domain_list);
+ if (ocs->domain) {
+ ocs_log_debug(ocs, "setting new domain, old=%p new=%p\n",
+ domain, ocs->domain);
+ }
+ }
+
+ if (ocs_list_empty(&ocs->domain_list) && ocs->domain_list_empty_cb ) {
+ (*ocs->domain_list_empty_cb)(ocs, ocs->domain_list_empty_cb_arg);
+ }
+ ocs_device_unlock(ocs);
+
+ ocs_lock_free(&domain->lookup_lock);
+
+ ocs_free(ocs, domain, sizeof(*domain));
+}
+
+/**
+ * @brief Free memory resources of a domain object.
+ *
+ * <h3 class="desc">Description</h3>
+ * After the domain object is freed, its child objects are also freed.
+ *
+ * @param domain Pointer to a domain object.
+ *
+ * @return None.
+ */
+
+void
+ocs_domain_force_free(ocs_domain_t *domain)
+{
+ ocs_sport_t *sport;
+ ocs_sport_t *next;
+
+ /* Shutdown domain sm */
+ ocs_sm_disable(&domain->drvsm);
+
+ ocs_scsi_notify_domain_force_free(domain);
+
+ ocs_domain_lock(domain);
+ ocs_list_foreach_safe(&domain->sport_list, sport, next) {
+ ocs_sport_force_free(sport);
+ }
+ ocs_domain_unlock(domain);
+ ocs_hw_domain_force_free(&domain->ocs->hw, domain);
+ ocs_domain_free(domain);
+}
+
+/**
+ * @brief Register a callback when the domain_list goes empty.
+ *
+ * <h3 class="desc">Description</h3>
+ * A function callback may be registered when the domain_list goes empty.
+ *
+ * @param ocs Pointer to a device object.
+ * @param callback Callback function.
+ * @param arg Callback argument.
+ *
+ * @return None.
+ */
+
+void
+ocs_register_domain_list_empty_cb(ocs_t *ocs, void (*callback)(ocs_t *ocs, void *arg), void *arg)
+{
+ ocs_device_lock(ocs);
+ ocs->domain_list_empty_cb = callback;
+ ocs->domain_list_empty_cb_arg = arg;
+ if (ocs_list_empty(&ocs->domain_list) && callback) {
+ (*callback)(ocs, arg);
+ }
+ ocs_device_unlock(ocs);
+}
+
+/**
+ * @brief Return a pointer to the domain, given the instance index.
+ *
+ * <h3 class="desc">Description</h3>
+ * A pointer to the domain context, given by the index, is returned.
+ *
+ * @param ocs Pointer to the driver instance context.
+ * @param index Instance index.
+ *
+ * @return Returns a pointer to the domain; or NULL.
+ */
+
+ocs_domain_t *
+ocs_domain_get_instance(ocs_t *ocs, uint32_t index)
+{
+ ocs_domain_t *domain = NULL;
+
+ if (index >= OCS_MAX_DOMAINS) {
+ ocs_log_err(ocs, "invalid index: %d\n", index);
+ return NULL;
+ }
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if (domain->instance_index == index) {
+ break;
+ }
+ }
+ ocs_device_unlock(ocs);
+ return domain;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Common event handler.
+ *
+ * <h3 class="desc">Description</h3>
+ * Common/shared events are handled here for the domain state machine.
+ *
+ * @param funcname Function name text.
+ * @param ctx Domain state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+static void *
+__ocs_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_domain_t *domain = ctx->app;
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ case OCS_EVT_REENTER:
+ case OCS_EVT_EXIT:
+ case OCS_EVT_ALL_CHILD_NODES_FREE:
+ /* this can arise if an FLOGI fails on the SPORT, and the SPORT is shutdown */
+ break;
+ default:
+ ocs_log_warn(domain->ocs, "%-20s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Common shutdown.
+ *
+ * <h3 class="desc">Description</h3>
+ * Handles common shutdown events.
+ *
+ * @param funcname Function name text.
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+static void *
+__ocs_domain_common_shutdown(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_domain_t *domain = ctx->app;
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ case OCS_EVT_REENTER:
+ case OCS_EVT_EXIT:
+ break;
+ case OCS_EVT_DOMAIN_FOUND:
+ ocs_assert(arg, NULL);
+ /* sm: / save drec, mark domain_found_pending */
+ ocs_memcpy(&domain->pending_drec, arg, sizeof(domain->pending_drec));
+ domain->domain_found_pending = TRUE;
+ break;
+ case OCS_EVT_DOMAIN_LOST:
+ /* clear drec available
+ * sm: unmark domain_found_pending */
+ domain->domain_found_pending = FALSE;
+ break;
+
+ default:
+ ocs_log_warn(domain->ocs, "%-20s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
+ break;
+ }
+
+ return NULL;
+}
+
+#define std_domain_state_decl(...) \
+ ocs_domain_t *domain = NULL; \
+ ocs_t *ocs = NULL; \
+ \
+ ocs_assert(ctx, NULL); \
+ ocs_assert(ctx->app, NULL); \
+ domain = ctx->app; \
+ ocs_assert(domain->ocs, NULL); \
+ ocs = domain->ocs; \
+ ocs_assert(ocs->xport, NULL);
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Initial state.
+ *
+ * <h3 class="desc">Description</h3>
+ * The initial state for a domain. Each domain is initialized to
+ * this state at start of day (SOD).
+ *
+ * @param ctx Domain state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ domain->attached = 0;
+ break;
+
+ case OCS_EVT_DOMAIN_FOUND: {
+ int32_t vlan = 0;
+ uint32_t i;
+ ocs_domain_record_t *drec = arg;
+ ocs_sport_t *sport;
+
+ uint64_t my_wwnn = ocs->xport->req_wwnn;
+ uint64_t my_wwpn = ocs->xport->req_wwpn;
+ uint64_t be_wwpn;
+
+ /* For now, user must specify both port name and node name, or we let firmware
+ * pick both (same as for vports).
+ * TODO: do we want to allow setting only port name or only node name?
+ */
+ if ((my_wwpn == 0) || (my_wwnn == 0)) {
+ ocs_log_debug(ocs, "using default hardware WWN configuration \n");
+ my_wwpn = ocs_get_wwn(&ocs->hw, OCS_HW_WWN_PORT);
+ my_wwnn = ocs_get_wwn(&ocs->hw, OCS_HW_WWN_NODE);
+ }
+
+ ocs_log_debug(ocs, "Creating base sport using WWPN %016" PRIx64 " WWNN %016" PRIx64 "\n",
+ my_wwpn, my_wwnn);
+
+ /* Allocate a sport and transition to __ocs_sport_allocated */
+ sport = ocs_sport_alloc(domain, my_wwpn, my_wwnn, UINT32_MAX, ocs->enable_ini, ocs->enable_tgt);
+
+ if (sport == NULL) {
+ ocs_log_err(ocs, "ocs_sport_alloc() failed\n");
+ break;
+ }
+ ocs_sm_transition(&sport->sm, __ocs_sport_allocated, NULL);
+
+ /* If domain is ethernet, then fetch the vlan id value */
+ if (drec->is_ethernet) {
+ vlan = ocs_bitmap_search((void *)drec->map.vlan, TRUE, 512 * 8);
+ if (vlan < 0) {
+ ocs_log_err(ocs, "no VLAN id available (FCF=%d)\n",
+ drec->index);
+ break;
+ }
+ }
+
+ be_wwpn = ocs_htobe64(sport->wwpn);
+
+ /* allocate ocs_sli_port_t object for local port
+ * Note: drec->fc_id is ALPA from read_topology only if loop
+ */
+ if (ocs_hw_port_alloc(&ocs->hw, sport, NULL, (uint8_t *)&be_wwpn)) {
+ ocs_log_err(ocs, "Can't allocate port\n");
+ ocs_sport_free(sport);
+ break;
+ }
+
+ /* initialize domain object */
+ domain->is_loop = drec->is_loop;
+ domain->is_fc = drec->is_fc;
+
+ /*
+ * If the loop position map includes ALPA == 0, then we are in a public loop (NL_PORT)
+ * Note that the first element of the loopmap[] contains the count of elements, and if
+ * ALPA == 0 is present, it will occupy the first location after the count.
+ */
+ domain->is_nlport = drec->map.loop[1] == 0x00;
+
+ if (domain->is_loop) {
+ ocs_log_debug(ocs, "%s fc_id=%#x speed=%d\n",
+ drec->is_loop ? (domain->is_nlport ? "public-loop" : "loop") : "other",
+ drec->fc_id, drec->speed);
+
+ sport->fc_id = drec->fc_id;
+ sport->topology = OCS_SPORT_TOPOLOGY_LOOP;
+ ocs_snprintf(sport->display_name, sizeof(sport->display_name), "s%06x", drec->fc_id);
+
+ if (ocs->enable_ini) {
+ uint32_t count = drec->map.loop[0];
+ ocs_log_debug(ocs, "%d position map entries\n", count);
+ for (i = 1; i <= count; i++) {
+ if (drec->map.loop[i] != drec->fc_id) {
+ ocs_node_t *node;
+
+ ocs_log_debug(ocs, "%#x -> %#x\n",
+ drec->fc_id, drec->map.loop[i]);
+ node = ocs_node_alloc(sport, drec->map.loop[i], FALSE, TRUE);
+ if (node == NULL) {
+ ocs_log_err(ocs, "ocs_node_alloc() failed\n");
+ break;
+ }
+ ocs_node_transition(node, __ocs_d_wait_loop, NULL);
+ }
+ }
+ }
+ }
+
+ /* Initiate HW domain alloc */
+ if (ocs_hw_domain_alloc(&ocs->hw, domain, drec->index, vlan)) {
+ ocs_log_err(ocs, "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ ocs_sm_transition(ctx, __ocs_domain_wait_alloc, arg);
+ break;
+ }
+ default:
+ __ocs_domain_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Wait for the domain allocation to complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * Waits for the domain state to be allocated. After the HW domain
+ * allocation process has been initiated, this state waits for
+ * that process to complete (i.e. a domain-alloc-ok event).
+ *
+ * @param ctx Domain state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_wait_alloc(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_sport_t *sport;
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_DOMAIN_ALLOC_OK: {
+ char prop_buf[32];
+ uint64_t wwn_bump = 0;
+ fc_plogi_payload_t *sp;
+
+ if (ocs_get_property("wwn_bump", prop_buf, sizeof(prop_buf)) == 0) {
+ wwn_bump = ocs_strtoull(prop_buf, 0, 0);
+ }
+
+ sport = domain->sport;
+ ocs_assert(sport, NULL);
+ sp = (fc_plogi_payload_t*) sport->service_params;
+
+ /* Save the domain service parameters */
+ ocs_memcpy(domain->service_params + 4, domain->dma.virt, sizeof(fc_plogi_payload_t) - 4);
+ ocs_memcpy(sport->service_params + 4, domain->dma.virt, sizeof(fc_plogi_payload_t) - 4);
+
+ /* If we're in fabric emulation mode, the flogi service parameters have not been setup yet,
+ * so we need some reasonable BB credit value
+ */
+ if (domain->femul_enable) {
+ ocs_memcpy(domain->flogi_service_params + 4, domain->service_params + 4, sizeof(fc_plogi_payload_t) - 4);
+ }
+
+ /* Update the sport's service parameters, user might have specified non-default names */
+ sp->port_name_hi = ocs_htobe32((uint32_t) (sport->wwpn >> 32ll));
+ sp->port_name_lo = ocs_htobe32((uint32_t) sport->wwpn);
+ sp->node_name_hi = ocs_htobe32((uint32_t) (sport->wwnn >> 32ll));
+ sp->node_name_lo = ocs_htobe32((uint32_t) sport->wwnn);
+
+ if (wwn_bump) {
+ sp->port_name_lo = ocs_htobe32(ocs_be32toh(sp->port_name_lo) ^ ((uint32_t)(wwn_bump)));
+ sp->port_name_hi = ocs_htobe32(ocs_be32toh(sp->port_name_hi) ^ ((uint32_t)(wwn_bump >> 32)));
+ sp->node_name_lo = ocs_htobe32(ocs_be32toh(sp->node_name_lo) ^ ((uint32_t)(wwn_bump)));
+ sp->node_name_hi = ocs_htobe32(ocs_be32toh(sp->node_name_hi) ^ ((uint32_t)(wwn_bump >> 32)));
+ ocs_log_info(ocs, "Overriding WWN\n");
+ }
+
+ /* Take the loop topology path, unless we are an NL_PORT (public loop) */
+ if (domain->is_loop && !domain->is_nlport) {
+ /*
+ * For loop, we already have our FC ID and don't need fabric login.
+ * Transition to the allocated state and post an event to attach to
+ * the domain. Note that this breaks the normal action/transition
+ * pattern here to avoid a race with the domain attach callback.
+ */
+ /* sm: is_loop / domain_attach */
+ ocs_sm_transition(ctx, __ocs_domain_allocated, NULL);
+ __ocs_domain_attach_internal(domain, sport->fc_id);
+ break;
+ } else {
+ ocs_node_t *node;
+
+ /* alloc fabric node, send FLOGI */
+ node = ocs_node_find(sport, FC_ADDR_FABRIC);
+ if (node) {
+ ocs_log_err(ocs, "Hmmmm ... Fabric Controller node already exists\n");
+ break;
+ }
+ node = ocs_node_alloc(sport, FC_ADDR_FABRIC, FALSE, FALSE);
+ if (!node) {
+ ocs_log_err(ocs, "Error: ocs_node_alloc() failed\n");
+ } else {
+ if (ocs->nodedb_mask & OCS_NODEDB_PAUSE_FABRIC_LOGIN) {
+ ocs_node_pause(node, __ocs_fabric_init);
+ } else {
+ ocs_node_transition(node, __ocs_fabric_init, NULL);
+ }
+ }
+ /* Accept frames */
+ domain->req_accept_frames = 1;
+ }
+ /* sm: start fabric logins */
+ ocs_sm_transition(ctx, __ocs_domain_allocated, NULL);
+ break;
+ }
+
+ case OCS_EVT_DOMAIN_ALLOC_FAIL:
+ /* TODO: hw/device reset */
+ ocs_log_err(ocs, "%s recv'd waiting for DOMAIN_ALLOC_OK; shutting down domain\n",
+ ocs_sm_event_name(evt));
+ domain->req_domain_free = 1;
+ break;
+
+ case OCS_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ ocs_assert(evt, NULL);
+ break;
+
+ case OCS_EVT_DOMAIN_LOST:
+ ocs_log_debug(ocs, "%s received while waiting for ocs_hw_domain_alloc() to complete\n", ocs_sm_event_name(evt));
+ ocs_sm_transition(ctx, __ocs_domain_wait_domain_lost, NULL);
+ break;
+
+ default:
+ __ocs_domain_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Wait for the domain attach request.
+ *
+ * <h3 class="desc">Description</h3>
+ * In this state, the domain has been allocated and is waiting for a domain attach request.
+ * The attach request comes from a node instance completing the fabric login,
+ * or from a point-to-point negotiation and login.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc = 0;
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_DOMAIN_REQ_ATTACH: {
+ uint32_t fc_id;
+
+ ocs_assert(arg, NULL);
+
+ fc_id = *((uint32_t*)arg);
+ ocs_log_debug(ocs, "Requesting hw domain attach fc_id x%x\n", fc_id);
+ /* Update sport lookup */
+ ocs_lock(&domain->lookup_lock);
+ spv_set(domain->lookup, fc_id, domain->sport);
+ ocs_unlock(&domain->lookup_lock);
+
+ /* Update display name for the sport */
+ ocs_node_fcid_display(fc_id, domain->sport->display_name, sizeof(domain->sport->display_name));
+
+ /* Issue domain attach call */
+ rc = ocs_hw_domain_attach(&ocs->hw, domain, fc_id);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_domain_attach failed: %d\n", rc);
+ return NULL;
+ }
+ /* sm: / domain_attach */
+ ocs_sm_transition(ctx, __ocs_domain_wait_attach, NULL);
+ break;
+ }
+
+ case OCS_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ ocs_assert(evt, NULL);
+ break;
+
+ case OCS_EVT_DOMAIN_LOST: {
+ int32_t rc;
+ ocs_log_debug(ocs, "%s received while waiting for OCS_EVT_DOMAIN_REQ_ATTACH\n",
+ ocs_sm_event_name(evt));
+ ocs_domain_lock(domain);
+ if (!ocs_list_empty(&domain->sport_list)) {
+ /* if there are sports, transition to wait state and
+ * send shutdown to each sport */
+ ocs_sport_t *sport = NULL;
+ ocs_sport_t *sport_next = NULL;
+ ocs_sm_transition(ctx, __ocs_domain_wait_sports_free, NULL);
+ ocs_list_foreach_safe(&domain->sport_list, sport, sport_next) {
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
+ }
+ ocs_domain_unlock(domain);
+ } else {
+ ocs_domain_unlock(domain);
+ /* no sports exist, free domain */
+ ocs_sm_transition(ctx, __ocs_domain_wait_shutdown, NULL);
+ rc = ocs_hw_domain_free(&ocs->hw, domain);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_domain_free() failed: %d\n", rc);
+ /* TODO: hw/device reset needed */
+ }
+ }
+
+ break;
+ }
+
+ default:
+ __ocs_domain_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Wait for the HW domain attach to complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * Waits for the HW domain attach to complete. Forwards attach ok event to the
+ * fabric node state machine.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_wait_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_DOMAIN_ATTACH_OK: {
+ ocs_node_t *node = NULL;
+ ocs_node_t *next_node = NULL;
+ ocs_sport_t *sport;
+ ocs_sport_t *next_sport;
+
+ /* Mark as attached */
+ domain->attached = 1;
+
+ /* Register with SCSI API */
+ if (ocs->enable_tgt)
+ ocs_scsi_tgt_new_domain(domain);
+ if (ocs->enable_ini)
+ ocs_scsi_ini_new_domain(domain);
+
+ /* Transition to ready */
+ /* sm: / forward event to all sports and nodes */
+ ocs_sm_transition(ctx, __ocs_domain_ready, NULL);
+
+ /* We have an FCFI, so we can accept frames */
+ domain->req_accept_frames = 1;
+ /* Set domain notify pending state to avoid duplicate domain event post */
+ domain->domain_notify_pend = 1;
+
+ /* Notify all nodes that the domain attach request has completed
+ * Note: sport will have already received notification of sport attached
+ * as a result of the HW's port attach.
+ */
+ ocs_domain_lock(domain);
+ ocs_list_foreach_safe(&domain->sport_list, sport, next_sport) {
+ ocs_sport_lock(sport);
+ ocs_list_foreach_safe(&sport->node_list, node, next_node) {
+ ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
+ }
+ ocs_sport_unlock(sport);
+ }
+ ocs_domain_unlock(domain);
+ domain->domain_notify_pend = 0;
+ break;
+ }
+
+ case OCS_EVT_DOMAIN_ATTACH_FAIL:
+ ocs_log_debug(ocs, "%s received while waiting for hw attach to complete\n", ocs_sm_event_name(evt));
+ /* TODO: hw/device reset */
+ break;
+
+ case OCS_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ ocs_assert(evt, NULL);
+ break;
+
+ case OCS_EVT_DOMAIN_LOST:
+ /* Domain lost while waiting for an attach to complete, go to a state that waits for
+ * the domain attach to complete, then handle domain lost
+ */
+ ocs_sm_transition(ctx, __ocs_domain_wait_domain_lost, NULL);
+ break;
+
+ case OCS_EVT_DOMAIN_REQ_ATTACH:
+ /* In P2P we can get an attach request from the other FLOGI path, so drop this one */
+ break;
+
+ default:
+ __ocs_domain_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Ready state.
+ *
+ * <h3 class="desc">Description</h3>
+ * This is a domain ready state. It waits for a domain-lost event, and initiates shutdown.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+
+ /* start any pending vports */
+ if (ocs_vport_start(domain)) {
+ ocs_log_debug(domain->ocs, "ocs_vport_start() did not start all vports\n");
+ }
+ break;
+ }
+ case OCS_EVT_DOMAIN_LOST: {
+ int32_t rc;
+ ocs_domain_lock(domain);
+ if (!ocs_list_empty(&domain->sport_list)) {
+ /* if there are sports, transition to wait state and send
+ * shutdown to each sport */
+ ocs_sport_t *sport = NULL;
+ ocs_sport_t *sport_next = NULL;
+ ocs_sm_transition(ctx, __ocs_domain_wait_sports_free, NULL);
+ ocs_list_foreach_safe(&domain->sport_list, sport, sport_next) {
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
+ }
+ ocs_domain_unlock(domain);
+ } else {
+ ocs_domain_unlock(domain);
+ /* no sports exist, free domain */
+ ocs_sm_transition(ctx, __ocs_domain_wait_shutdown, NULL);
+ rc = ocs_hw_domain_free(&ocs->hw, domain);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_domain_free() failed: %d\n", rc);
+ /* TODO: hw/device reset needed */
+ }
+ }
+ break;
+ }
+
+ case OCS_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ ocs_assert(evt, NULL);
+ break;
+
+ case OCS_EVT_DOMAIN_REQ_ATTACH: {
+ /* can happen during p2p */
+ uint32_t fc_id;
+
+ ocs_assert(arg, NULL);
+ fc_id = *((uint32_t*)arg);
+
+ /* Assume that the domain is attached */
+ ocs_assert(domain->attached, NULL);
+
+ /* Verify that the requested FC_ID is the same as the one we're working with */
+ ocs_assert(domain->sport->fc_id == fc_id, NULL);
+ break;
+ }
+
+ default:
+ __ocs_domain_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Wait for nodes to free prior to the domain shutdown.
+ *
+ * <h3 class="desc">Description</h3>
+ * All nodes are freed, and ready for a domain shutdown.
+ *
+ * @param ctx Remote node sm context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_wait_sports_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_ALL_CHILD_NODES_FREE: {
+ int32_t rc;
+
+ /* sm: ocs_hw_domain_free */
+ ocs_sm_transition(ctx, __ocs_domain_wait_shutdown, NULL);
+
+ /* Request ocs_hw_domain_free and wait for completion */
+ rc = ocs_hw_domain_free(&ocs->hw, domain);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_domain_free() failed: %d\n", rc);
+ }
+ break;
+ }
+ default:
+ __ocs_domain_common_shutdown(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Complete the domain shutdown.
+ *
+ * <h3 class="desc">Description</h3>
+ * Waits for a HW domain free to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_wait_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_DOMAIN_FREE_OK: {
+ if (ocs->enable_ini)
+ ocs_scsi_ini_del_domain(domain);
+ if (ocs->enable_tgt)
+ ocs_scsi_tgt_del_domain(domain);
+
+ /* sm: domain_free */
+ if (domain->domain_found_pending) {
+ /* save fcf_wwn and drec from this domain, free current domain and allocate
+ * a new one with the same fcf_wwn
+ * TODO: could use a SLI-4 "re-register VPI" operation here
+ */
+ uint64_t fcf_wwn = domain->fcf_wwn;
+ ocs_domain_record_t drec = domain->pending_drec;
+
+ ocs_log_debug(ocs, "Reallocating domain\n");
+ domain->req_domain_free = 1;
+ domain = ocs_domain_alloc(ocs, fcf_wwn);
+
+ if (domain == NULL) {
+ ocs_log_err(ocs, "ocs_domain_alloc() failed\n");
+ /* TODO: hw/device reset needed */
+ return NULL;
+ }
+ /*
+ * got a new domain; at this point, there are at least two domains
+ * once the req_domain_free flag is processed, the associated domain
+ * will be removed.
+ */
+ ocs_sm_transition(&domain->drvsm, __ocs_domain_init, NULL);
+ ocs_sm_post_event(&domain->drvsm, OCS_EVT_DOMAIN_FOUND, &drec);
+ } else {
+ domain->req_domain_free = 1;
+ }
+ break;
+ }
+
+ default:
+ __ocs_domain_common_shutdown(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup domain_sm
+ * @brief Domain state machine: Wait for the domain alloc/attach completion
+ * after receiving a domain lost.
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered when receiving a domain lost while waiting for a domain alloc
+ * or a domain attach to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_domain_wait_domain_lost(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch(evt) {
+ case OCS_EVT_DOMAIN_ALLOC_OK:
+ case OCS_EVT_DOMAIN_ATTACH_OK: {
+ int32_t rc;
+ ocs_domain_lock(domain);
+ if (!ocs_list_empty(&domain->sport_list)) {
+ /* if there are sports, transition to wait state and send
+ * shutdown to each sport */
+ ocs_sport_t *sport = NULL;
+ ocs_sport_t *sport_next = NULL;
+ ocs_sm_transition(ctx, __ocs_domain_wait_sports_free, NULL);
+ ocs_list_foreach_safe(&domain->sport_list, sport, sport_next) {
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
+ }
+ ocs_domain_unlock(domain);
+ } else {
+ ocs_domain_unlock(domain);
+ /* no sports exist, free domain */
+ ocs_sm_transition(ctx, __ocs_domain_wait_shutdown, NULL);
+ rc = ocs_hw_domain_free(&ocs->hw, domain);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_domain_free() failed: %d\n", rc);
+ /* TODO: hw/device reset needed */
+ }
+ }
+ break;
+ }
+ case OCS_EVT_DOMAIN_ALLOC_FAIL:
+ case OCS_EVT_DOMAIN_ATTACH_FAIL:
+ ocs_log_err(ocs, "[domain] %-20s: failed\n", ocs_sm_event_name(evt));
+ /* TODO: hw/device reset needed */
+ break;
+
+ default:
+ __ocs_domain_common_shutdown(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Save the port's service parameters.
+ *
+ * <h3 class="desc">Description</h3>
+ * Service parameters from the fabric FLOGI are saved in the domain's
+ * flogi_service_params array.
+ *
+ * @param domain Pointer to the domain.
+ * @param payload Service parameters to save.
+ *
+ * @return None.
+ */
+
+void
+ocs_domain_save_sparms(ocs_domain_t *domain, void *payload)
+{
+ ocs_memcpy(domain->flogi_service_params, payload, sizeof (fc_plogi_payload_t));
+}
+/**
+ * @brief Initiator domain attach. (internal call only)
+ *
+ * Assumes that the domain SM lock is already locked
+ *
+ * <h3 class="desc">Description</h3>
+ * The HW domain attach function is started.
+ *
+ * @param domain Pointer to the domain object.
+ * @param s_id FC_ID of which to register this domain.
+ *
+ * @return None.
+ */
+
+void
+__ocs_domain_attach_internal(ocs_domain_t *domain, uint32_t s_id)
+{
+ ocs_memcpy(domain->dma.virt, ((uint8_t*)domain->flogi_service_params)+4, sizeof (fc_plogi_payload_t) - 4);
+ (void)ocs_sm_post_event(&domain->drvsm, OCS_EVT_DOMAIN_REQ_ATTACH, &s_id);
+}
+
+/**
+ * @brief Initiator domain attach.
+ *
+ * <h3 class="desc">Description</h3>
+ * The HW domain attach function is started.
+ *
+ * @param domain Pointer to the domain object.
+ * @param s_id FC_ID of which to register this domain.
+ *
+ * @return None.
+ */
+
+void
+ocs_domain_attach(ocs_domain_t *domain, uint32_t s_id)
+{
+ __ocs_domain_attach_internal(domain, s_id);
+}
+
+int
+ocs_domain_post_event(ocs_domain_t *domain, ocs_sm_event_t event, void *arg)
+{
+ int rc;
+ int accept_frames;
+ int req_domain_free;
+
+ rc = ocs_sm_post_event(&domain->drvsm, event, arg);
+
+ req_domain_free = domain->req_domain_free;
+ domain->req_domain_free = 0;
+
+ accept_frames = domain->req_accept_frames;
+ domain->req_accept_frames = 0;
+
+ if (accept_frames) {
+ ocs_domain_accept_frames(domain);
+ }
+
+ if (req_domain_free) {
+ ocs_domain_free(domain);
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief Return the WWN as a uint64_t.
+ *
+ * <h3 class="desc">Description</h3>
+ * Calls the HW property function for the WWNN or WWPN, and returns the value
+ * as a uint64_t.
+ *
+ * @param hw Pointer to the HW object.
+ * @param prop HW property.
+ *
+ * @return Returns uint64_t request value.
+ */
+
+uint64_t
+ocs_get_wwn(ocs_hw_t *hw, ocs_hw_property_e prop)
+{
+ uint8_t *p = ocs_hw_get_ptr(hw, prop);
+ uint64_t value = 0;
+
+ if (p) {
+ uint32_t i;
+ for (i = 0; i < sizeof(value); i++) {
+ value = (value << 8) | p[i];
+ }
+ }
+ return value;
+}
+
+/**
+ * @brief Generate a domain ddump.
+ *
+ * <h3 class="desc">Description</h3>
+ * Generates a domain ddump.
+ *
+ * @param textbuf Pointer to the text buffer.
+ * @param domain Pointer to the domain context.
+ *
+ * @return Returns 0 on success, or a negative value on failure.
+ */
+
+int
+ocs_ddump_domain(ocs_textbuf_t *textbuf, ocs_domain_t *domain)
+{
+ ocs_sport_t *sport;
+ int retval = 0;
+
+ ocs_ddump_section(textbuf, "domain", domain->instance_index);
+ ocs_ddump_value(textbuf, "display_name", "%s", domain->display_name);
+
+ ocs_ddump_value(textbuf, "fcf", "%#x", domain->fcf);
+ ocs_ddump_value(textbuf, "fcf_indicator", "%#x", domain->fcf_indicator);
+ ocs_ddump_value(textbuf, "vlan_id", "%#x", domain->vlan_id);
+ ocs_ddump_value(textbuf, "indicator", "%#x", domain->indicator);
+ ocs_ddump_value(textbuf, "attached", "%d", domain->attached);
+ ocs_ddump_value(textbuf, "is_loop", "%d", domain->is_loop);
+ ocs_ddump_value(textbuf, "is_nlport", "%d", domain->is_nlport);
+
+ ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_DOMAIN, domain);
+ ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_DOMAIN, domain);
+
+ ocs_display_sparams(NULL, "domain_sparms", 1, textbuf, domain->dma.virt);
+
+ if (ocs_domain_lock_try(domain) != TRUE) {
+ /* Didn't get the lock */
+ return -1;
+ }
+ ocs_list_foreach(&domain->sport_list, sport) {
+ retval = ocs_ddump_sport(textbuf, sport);
+ if (retval != 0) {
+ break;
+ }
+ }
+
+#if defined(ENABLE_FABRIC_EMULATION)
+ ocs_ddump_ns(textbuf, domain->ocs_ns);
+#endif
+
+ ocs_domain_unlock(domain);
+
+ ocs_ddump_endsection(textbuf, "domain", domain->instance_index);
+
+ return retval;
+}
+
+
+void
+ocs_mgmt_domain_list(ocs_textbuf_t *textbuf, void *object)
+{
+ ocs_sport_t *sport;
+ ocs_domain_t *domain = (ocs_domain_t *)object;
+
+ ocs_mgmt_start_section(textbuf, "domain", domain->instance_index);
+
+ /* Add my status values to textbuf */
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fcf");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fcf_indicator");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "vlan_id");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "attached");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "is_loop");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "num_sports");
+#if defined(ENABLE_FABRIC_EMULATION)
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RW, "femul_enable");
+#endif
+
+ if (ocs_domain_lock_try(domain) == TRUE) {
+
+
+ /* If we get here, then we are holding the domain lock */
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if ((sport->mgmt_functions) && (sport->mgmt_functions->get_list_handler)) {
+ sport->mgmt_functions->get_list_handler(textbuf, sport);
+ }
+ }
+ ocs_domain_unlock(domain);
+ }
+
+ ocs_mgmt_end_section(textbuf, "domain", domain->instance_index);
+}
+
+int
+ocs_mgmt_domain_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
+{
+ ocs_sport_t *sport;
+ ocs_domain_t *domain = (ocs_domain_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ ocs_mgmt_start_section(textbuf, "domain", domain->instance_index);
+
+ snprintf(qualifier, sizeof(qualifier), "%s/domain[%d]", parent, domain->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = name + strlen(qualifier) +1;
+
+ /* See if it's a value I can supply */
+ if (ocs_strcmp(unqualified_name, "display_name") == 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", domain->display_name);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "fcf") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fcf", "%#x", domain->fcf);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "fcf_indicator") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fcf_indicator", "%#x", domain->fcf_indicator);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "vlan_id") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "vlan_id", "%#x", domain->vlan_id);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "indicator") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "%#x", domain->indicator);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "attached") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", domain->attached);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "is_loop") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_loop", domain->is_loop);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "is_nlport") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_nlport", domain->is_nlport);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "display_name") == 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", domain->display_name);
+ retval = 0;
+#if defined(ENABLE_FABRIC_EMULATION)
+ } else if (ocs_strcmp(unqualified_name, "femul_enable") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "femul_enable", "%d", domain->femul_enable);
+ retval = 0;
+#endif
+ } else if (ocs_strcmp(unqualified_name, "num_sports") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "num_sports", "%d", domain->sport_instance_count);
+ retval = 0;
+ } else {
+ /* If I didn't know the value of this status pass the request to each of my children */
+
+ ocs_domain_lock(domain);
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if ((sport->mgmt_functions) && (sport->mgmt_functions->get_handler)) {
+ retval = sport->mgmt_functions->get_handler(textbuf, qualifier, name, sport);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+
+ }
+ ocs_domain_unlock(domain);
+ }
+ }
+
+ ocs_mgmt_end_section(textbuf, "domain", domain->instance_index);
+ return retval;
+}
+
+void
+ocs_mgmt_domain_get_all(ocs_textbuf_t *textbuf, void *object)
+{
+ ocs_sport_t *sport;
+ ocs_domain_t *domain = (ocs_domain_t *)object;
+
+ ocs_mgmt_start_section(textbuf, "domain", domain->instance_index);
+
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", domain->display_name);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fcf", "%#x", domain->fcf);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fcf_indicator", "%#x", domain->fcf_indicator);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "vlan_id", "%#x", domain->vlan_id);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "%#x", domain->indicator);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", domain->attached);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_loop", domain->is_loop);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_nlport", domain->is_nlport);
+#if defined(ENABLE_FABRIC_EMULATION)
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "femul_enable", "%d", domain->femul_enable);
+#endif
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "num_sports", "%d", domain->sport_instance_count);
+
+ ocs_domain_lock(domain);
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if ((sport->mgmt_functions) && (sport->mgmt_functions->get_all_handler)) {
+ sport->mgmt_functions->get_all_handler(textbuf, sport);
+ }
+ }
+ ocs_domain_unlock(domain);
+
+
+ ocs_mgmt_end_unnumbered_section(textbuf, "domain");
+
+}
+
+int
+ocs_mgmt_domain_set(char *parent, char *name, char *value, void *object)
+{
+ ocs_sport_t *sport;
+ ocs_domain_t *domain = (ocs_domain_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ snprintf(qualifier, sizeof(qualifier), "%s/domain[%d]", parent, domain->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+
+ /* See if it's a value I can supply */
+
+ /* if (ocs_strcmp(unqualified_name, "display_name") == 0) {
+
+ } else */
+ {
+ /* If I didn't know the value of this status pass the request to each of my children */
+ ocs_domain_lock(domain);
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if ((sport->mgmt_functions) && (sport->mgmt_functions->set_handler)) {
+ retval = sport->mgmt_functions->set_handler(qualifier, name, value, sport);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+
+ }
+ ocs_domain_unlock(domain);
+ }
+ }
+
+ return retval;
+}
+
+int
+ocs_mgmt_domain_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length, void *object)
+{
+ ocs_sport_t *sport;
+ ocs_domain_t *domain = (ocs_domain_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ snprintf(qualifier, sizeof(qualifier), "%s.domain%d", parent, domain->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
+
+ {
+ /* If I didn't know how to do this action pass the request to each of my children */
+ ocs_domain_lock(domain);
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if ((sport->mgmt_functions) && (sport->mgmt_functions->exec_handler)) {
+ retval = sport->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, sport);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+
+ }
+ ocs_domain_unlock(domain);
+ }
+ }
+
+ return retval;
+}
diff --git a/sys/dev/ocs_fc/ocs_domain.h b/sys/dev/ocs_fc/ocs_domain.h
new file mode 100644
index 000000000000..1522a3c22687
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_domain.h
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Declare driver's domain handler exported interface
+ */
+
+#if !defined(__OCS_DOMAIN_H__)
+#define __OCS_DOMAIN_H__
+extern int32_t ocs_domain_init(ocs_t *ocs, ocs_domain_t *domain);
+extern ocs_domain_t *ocs_domain_find(ocs_t *ocs, uint64_t fcf_wwn);
+extern ocs_domain_t *ocs_domain_alloc(ocs_t *ocs, uint64_t fcf_wwn);
+extern void ocs_domain_free(ocs_domain_t *domain);
+extern void ocs_domain_force_free(ocs_domain_t *domain);
+extern void ocs_register_domain_list_empty_cb(ocs_t *ocs, void (*callback)(ocs_t *ocs, void *arg), void *arg);
+extern uint64_t ocs_get_wwn(ocs_hw_t *hw, ocs_hw_property_e prop);
+
+static inline void
+ocs_domain_lock_init(ocs_domain_t *domain)
+{
+}
+
+static inline int32_t
+ocs_domain_lock_try(ocs_domain_t *domain)
+{
+ /* Use the device wide lock */
+ return ocs_device_lock_try(domain->ocs);
+}
+
+static inline void
+ocs_domain_lock(ocs_domain_t *domain)
+{
+ /* Use the device wide lock */
+ ocs_device_lock(domain->ocs);
+}
+
+static inline void
+ocs_domain_unlock(ocs_domain_t *domain)
+{
+ /* Use the device wide lock */
+ ocs_device_unlock(domain->ocs);
+}
+
+extern int32_t ocs_domain_cb(void *arg, ocs_hw_domain_event_e event, void *data);
+extern void *__ocs_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_domain_wait_alloc(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_domain_wait_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_domain_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_domain_wait_sports_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_domain_wait_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_domain_wait_domain_lost(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern void ocs_domain_save_sparms(ocs_domain_t *domain, void *payload);
+extern void ocs_domain_attach(ocs_domain_t *domain, uint32_t s_id);
+extern int ocs_domain_post_event(ocs_domain_t *domain, ocs_sm_event_t, void *);
+
+extern int ocs_ddump_domain(ocs_textbuf_t *textbuf, ocs_domain_t *domain);
+extern void __ocs_domain_attach_internal(ocs_domain_t *domain, uint32_t s_id);;
+#endif
diff --git a/sys/dev/ocs_fc/ocs_drv_fc.h b/sys/dev/ocs_fc/ocs_drv_fc.h
new file mode 100644
index 000000000000..95684e30354f
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_drv_fc.h
@@ -0,0 +1,212 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS linux driver common include file
+ */
+
+
+#if !defined(__OCS_DRV_FC_H__)
+#define __OCS_DRV_FC_H__
+
+#define OCS_INCLUDE_FC
+
+#include "ocs_os.h"
+#include "ocs_debug.h"
+#include "ocs_common.h"
+#include "ocs_hw.h"
+#include "ocs_io.h"
+#include "ocs_pm.h"
+#include "ocs_xport.h"
+#include "ocs_stats.h"
+
+struct ocs_s {
+
+ ocs_os_t ocs_os;
+ char display_name[OCS_DISPLAY_NAME_LENGTH];
+ ocs_rlock_t lock; /*>> Device wide lock */
+ ocs_list_t domain_list; /*>> linked list of virtual fabric objects */
+ ocs_io_pool_t *io_pool; /**< pointer to IO pool */
+ ocs_ramlog_t *ramlog;
+ ocs_drv_t drv_ocs;
+ ocs_scsi_tgt_t tgt_ocs;
+ ocs_scsi_ini_t ini_ocs;
+ ocs_xport_e ocs_xport;
+ ocs_xport_t *xport; /*>> Pointer to transport object */
+ bool enable_ini;
+ bool enable_tgt;
+ uint8_t fc_type;
+ int ctrlmask;
+ int logmask;
+ uint32_t max_isr_time_msec; /*>> Maximum ISR time */
+ char *hw_war_version;
+ ocs_pm_context_t pm_context; /*<< power management context */
+ ocs_mgmt_functions_t *mgmt_functions;
+ ocs_mgmt_functions_t *tgt_mgmt_functions;
+ ocs_mgmt_functions_t *ini_mgmt_functions;
+ ocs_err_injection_e err_injection; /**< for error injection testing */
+ uint32_t cmd_err_inject; /**< specific cmd to inject error into */
+ time_t delay_value_msec; /**< for injecting delays */
+
+ const char *desc;
+ uint32_t instance_index;
+ uint16_t pci_vendor;
+ uint16_t pci_device;
+ uint16_t pci_subsystem_vendor;
+ uint16_t pci_subsystem_device;
+ char businfo[OCS_DISPLAY_BUS_INFO_LENGTH];
+
+ const char *model;
+ const char *driver_version;
+ const char *fw_version;
+
+ ocs_hw_t hw;
+
+ ocs_domain_t *domain; /*>> pointer to first (physical) domain (also on domain_list) */
+ uint32_t domain_instance_count; /*>> domain instance count */
+ void (*domain_list_empty_cb)(ocs_t *ocs, void *arg); /*>> domain list empty callback */
+ void *domain_list_empty_cb_arg; /*>> domain list empty callback argument */
+
+ bool explicit_buffer_list;
+ bool external_loopback;
+ uint32_t num_vports;
+ uint32_t hw_bounce;
+ uint32_t rq_threads;
+ uint32_t rq_selection_policy;
+ uint32_t rr_quanta;
+ char *filter_def;
+ uint32_t max_remote_nodes;
+
+ bool soft_wwn_enable;
+
+ /*
+ * tgt_rscn_delay - delay in kicking off RSCN processing (nameserver queries)
+ * after receiving an RSCN on the target. This prevents thrashing of nameserver
+ * requests due to a huge burst of RSCNs received in a short period of time
+ * Note: this is only valid when target RSCN handling is enabled -- see ctrlmask.
+ */
+ time_t tgt_rscn_delay_msec; /*>> minimum target RSCN delay */
+
+ /*
+ * tgt_rscn_period - determines maximum frequency when processing back-to-back
+ * RSCNs; e.g. if this value is 30, there will never be any more than 1 RSCN
+ * handling per 30s window. This prevents initiators on a faulty link generating
+ * many RSCN from causing the target to continually query the nameserver. Note:
+ * this is only valid when target RSCN handling is enabled
+ */
+ time_t tgt_rscn_period_msec; /*>> minimum target RSCN period */
+
+ /*
+ * Target IO timer value:
+ * Zero: target command timeout disabled.
+ * Non-zero: Timeout value, in seconds, for target commands
+ */
+ uint32_t target_io_timer_sec;
+
+ int speed;
+ int topology;
+ int ethernet_license;
+ int num_scsi_ios;
+ bool enable_hlm; /*>> high login mode is enabled */
+ uint32_t hlm_group_size; /*>> RPI count for high login mode */
+ char *wwn_bump;
+ uint32_t nodedb_mask; /*>> Node debugging mask */
+
+ uint32_t auto_xfer_rdy_size; /*>> Maximum sized write to use auto xfer rdy */
+ bool esoc;
+ uint8_t ocs_req_fw_upgrade;
+
+ ocs_textbuf_t ddump_saved;
+
+};
+
+#define ocs_is_fc_initiator_enabled() (ocs->enable_ini)
+#define ocs_is_fc_target_enabled() (ocs->enable_tgt)
+
+static inline void
+ocs_device_lock_init(ocs_t *ocs)
+{
+ ocs_rlock_init(ocs, &ocs->lock, "ocsdevicelock");
+}
+static inline void
+ocs_device_lock_free(ocs_t *ocs)
+{
+ ocs_rlock_free(&ocs->lock);
+}
+static inline int32_t
+ocs_device_lock_try(ocs_t *ocs)
+{
+ return ocs_rlock_try(&ocs->lock);
+}
+static inline void
+ocs_device_lock(ocs_t *ocs)
+{
+ ocs_rlock_acquire(&ocs->lock);
+}
+static inline void
+ocs_device_unlock(ocs_t *ocs)
+{
+ ocs_rlock_release(&ocs->lock);
+}
+
+extern ocs_t *ocs_get_instance(uint32_t index);
+extern int32_t ocs_get_bus_dev_func(ocs_t *ocs, uint8_t* bus, uint8_t* dev, uint8_t* func);
+
+static inline ocs_io_t *
+ocs_io_alloc(ocs_t *ocs)
+{
+ return ocs_io_pool_io_alloc(ocs->xport->io_pool);
+}
+
+static inline void
+ocs_io_free(ocs_t *ocs, ocs_io_t *io)
+{
+ ocs_io_pool_io_free(ocs->xport->io_pool, io);
+}
+
+extern void ocs_stop_event_processing(ocs_os_t *ocs_os);
+extern int32_t ocs_start_event_processing(ocs_os_t *ocs_os);
+
+#include "ocs_domain.h"
+#include "ocs_sport.h"
+#include "ocs_node.h"
+#include "ocs_io.h"
+#include "ocs_unsol.h"
+#include "ocs_scsi.h"
+
+#include "ocs_ioctl.h"
+#include "ocs_elxu.h"
+
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_els.c b/sys/dev/ocs_fc/ocs_els.c
new file mode 100644
index 000000000000..6f19eaf15947
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_els.c
@@ -0,0 +1,2777 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Functions to build and send ELS/CT/BLS commands and responses.
+ */
+
+/*!
+@defgroup els_api ELS/BLS/CT Command and Response Functions
+*/
+
+#include "ocs.h"
+#include "ocs_els.h"
+#include "ocs_scsi.h"
+#include "ocs_device.h"
+
+#define ELS_IOFMT "[i:%04x t:%04x h:%04x]"
+#define ELS_IOFMT_ARGS(els) els->init_task_tag, els->tgt_task_tag, els->hw_tag
+
+#define node_els_trace() \
+ do { \
+ if (OCS_LOG_ENABLE_ELS_TRACE(ocs)) \
+ ocs_log_info(ocs, "[%s] %-20s\n", node->display_name, __func__); \
+ } while (0)
+
+#define els_io_printf(els, fmt, ...) \
+ ocs_log_debug(els->node->ocs, "[%s]" ELS_IOFMT " %-8s " fmt, els->node->display_name, ELS_IOFMT_ARGS(els), els->display_name, ##__VA_ARGS__);
+
+static int32_t ocs_els_send(ocs_io_t *els, uint32_t reqlen, uint32_t timeout_sec, ocs_hw_srrs_cb_t cb);
+static int32_t ocs_els_send_rsp(ocs_io_t *els, uint32_t rsplen);
+static int32_t ocs_els_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg);
+static ocs_io_t *ocs_bls_send_acc(ocs_io_t *io, uint32_t s_id, uint16_t ox_id, uint16_t rx_id);
+static int32_t ocs_bls_send_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length,
+ int32_t status, uint32_t ext_status, void *app);
+static void ocs_io_transition(ocs_io_t *els, ocs_sm_function_t state, void *data);
+static ocs_io_t *ocs_els_abort_io(ocs_io_t *els, int send_abts);
+static void _ocs_els_io_free(void *arg);
+static void ocs_els_delay_timer_cb(void *arg);
+
+
+/**
+ * @ingroup els_api
+ * @brief ELS state machine transition wrapper.
+ *
+ * <h3 class="desc">Description</h3>
+ * This function is the transition wrapper for the ELS state machine. It grabs
+ * the node lock prior to making the transition to protect
+ * against multiple threads accessing a particular ELS. For example,
+ * one thread transitioning from __els_init to
+ * __ocs_els_wait_resp and another thread (tasklet) handling the
+ * completion of that ELS request.
+ *
+ * @param els Pointer to the IO context.
+ * @param state State to transition to.
+ * @param data Data to pass in with the transition.
+ *
+ * @return None.
+ */
+static void
+ocs_io_transition(ocs_io_t *els, ocs_sm_function_t state, void *data)
+{
+ /* protect ELS events with node lock */
+ ocs_node_t *node = els->node;
+ ocs_node_lock(node);
+ ocs_sm_transition(&els->els_sm, state, data);
+ ocs_node_unlock(node);
+}
+
+/**
+ * @ingroup els_api
+ * @brief ELS state machine post event wrapper.
+ *
+ * <h3 class="desc">Description</h3>
+ * Post an event wrapper for the ELS state machine. This function grabs
+ * the node lock prior to posting the event.
+ *
+ * @param els Pointer to the IO context.
+ * @param evt Event to process.
+ * @param data Data to pass in with the transition.
+ *
+ * @return None.
+ */
+void
+ocs_els_post_event(ocs_io_t *els, ocs_sm_event_t evt, void *data)
+{
+ /* protect ELS events with node lock */
+ ocs_node_t *node = els->node;
+ ocs_node_lock(node);
+ els->els_evtdepth ++;
+ ocs_sm_post_event(&els->els_sm, evt, data);
+ els->els_evtdepth --;
+ ocs_node_unlock(node);
+ if (els->els_evtdepth == 0 && els->els_req_free) {
+ ocs_els_io_free(els);
+ }
+}
+
+/**
+ * @ingroup els_api
+ * @brief Allocate an IO structure for an ELS IO context.
+ *
+ * <h3 class="desc">Description</h3>
+ * Allocate an IO for an ELS context. Uses OCS_ELS_RSP_LEN as response size.
+ *
+ * @param node node to associate ELS IO with
+ * @param reqlen Length of ELS request
+ * @param role Role of ELS (originator/responder)
+ *
+ * @return pointer to IO structure allocated
+ */
+
+ocs_io_t *
+ocs_els_io_alloc(ocs_node_t *node, uint32_t reqlen, ocs_els_role_e role)
+{
+ return ocs_els_io_alloc_size(node, reqlen, OCS_ELS_RSP_LEN, role);
+}
+
+/**
+ * @ingroup els_api
+ * @brief Allocate an IO structure for an ELS IO context.
+ *
+ * <h3 class="desc">Description</h3>
+ * Allocate an IO for an ELS context, allowing the caller to specify the size of the response.
+ *
+ * @param node node to associate ELS IO with
+ * @param reqlen Length of ELS request
+ * @param rsplen Length of ELS response
+ * @param role Role of ELS (originator/responder)
+ *
+ * @return pointer to IO structure allocated
+ */
+
+ocs_io_t *
+ocs_els_io_alloc_size(ocs_node_t *node, uint32_t reqlen, uint32_t rsplen, ocs_els_role_e role)
+{
+
+ ocs_t *ocs;
+ ocs_xport_t *xport;
+ ocs_io_t *els;
+ ocs_assert(node, NULL);
+ ocs_assert(node->ocs, NULL);
+ ocs = node->ocs;
+ ocs_assert(ocs->xport, NULL);
+ xport = ocs->xport;
+
+ ocs_lock(&node->active_ios_lock);
+ if (!node->io_alloc_enabled) {
+ ocs_log_debug(ocs, "called with io_alloc_enabled = FALSE\n");
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ els = ocs_io_alloc(ocs);
+ if (els == NULL) {
+ ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ ocs_ref_init(&els->ref, _ocs_els_io_free, els);
+
+ switch (role) {
+ case OCS_ELS_ROLE_ORIGINATOR:
+ els->cmd_ini = TRUE;
+ els->cmd_tgt = FALSE;
+ break;
+ case OCS_ELS_ROLE_RESPONDER:
+ els->cmd_ini = FALSE;
+ els->cmd_tgt = TRUE;
+ break;
+ }
+
+ /* IO should not have an associated HW IO yet. Assigned below. */
+ if (els->hio != NULL) {
+ ocs_log_err(ocs, "assertion failed. HIO is not null\n");
+ ocs_io_free(ocs, els);
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ /* populate generic io fields */
+ els->ocs = ocs;
+ els->node = node;
+
+ /* set type and ELS-specific fields */
+ els->io_type = OCS_IO_TYPE_ELS;
+ els->display_name = "pending";
+
+ if (reqlen > OCS_ELS_REQ_LEN) {
+ ocs_log_err(ocs, "ELS command request len greater than allocated\n");
+ ocs_io_free(ocs, els);
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ if (rsplen > OCS_ELS_GID_PT_RSP_LEN) {
+ ocs_log_err(ocs, "ELS command response len: %d "
+ "greater than allocated\n", rsplen);
+ ocs_io_free(ocs, els);
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ els->els_req.size = reqlen;
+ els->els_rsp.size = rsplen;
+
+ if (els != NULL) {
+ ocs_memset(&els->els_sm, 0, sizeof(els->els_sm));
+ els->els_sm.app = els;
+
+ /* initialize fields */
+ els->els_retries_remaining = OCS_FC_ELS_DEFAULT_RETRIES;
+ els->els_evtdepth = 0;
+ els->els_pend = 0;
+ els->els_active = 0;
+
+ /* add els structure to ELS IO list */
+ ocs_list_add_tail(&node->els_io_pend_list, els);
+ els->els_pend = 1;
+ }
+ ocs_unlock(&node->active_ios_lock);
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Free IO structure for an ELS IO context.
+ *
+ * <h3 class="desc">Description</h3> Free IO for an ELS
+ * IO context
+ *
+ * @param els ELS IO structure for which IO is allocated
+ *
+ * @return None
+ */
+
+void
+ocs_els_io_free(ocs_io_t *els)
+{
+ ocs_ref_put(&els->ref);
+}
+
+/**
+ * @ingroup els_api
+ * @brief Free IO structure for an ELS IO context.
+ *
+ * <h3 class="desc">Description</h3> Free IO for an ELS
+ * IO context
+ *
+ * @param arg ELS IO structure for which IO is allocated
+ *
+ * @return None
+ */
+
+static void
+_ocs_els_io_free(void *arg)
+{
+ ocs_io_t *els = (ocs_io_t *)arg;
+ ocs_t *ocs;
+ ocs_node_t *node;
+ int send_empty_event = FALSE;
+
+ ocs_assert(els);
+ ocs_assert(els->node);
+ ocs_assert(els->node->ocs);
+ ocs = els->node->ocs;
+
+ node = els->node;
+ ocs = node->ocs;
+
+ ocs_lock(&node->active_ios_lock);
+ if (els->els_active) {
+ /* if active, remove from active list and check empty */
+ ocs_list_remove(&node->els_io_active_list, els);
+ /* Send list empty event if the IO allocator is disabled, and the list is empty
+ * If node->io_alloc_enabled was not checked, the event would be posted continually
+ */
+ send_empty_event = (!node->io_alloc_enabled) && ocs_list_empty(&node->els_io_active_list);
+ els->els_active = 0;
+ } else if (els->els_pend) {
+ /* if pending, remove from pending list; node shutdown isn't
+ * gated off the pending list (only the active list), so no
+ * need to check if pending list is empty
+ */
+ ocs_list_remove(&node->els_io_pend_list, els);
+ els->els_pend = 0;
+ } else {
+ ocs_log_err(ocs, "assertion failed: niether els->els_pend nor els->active set\n");
+ ocs_unlock(&node->active_ios_lock);
+ return;
+ }
+
+ ocs_unlock(&node->active_ios_lock);
+
+ ocs_io_free(ocs, els);
+
+ if (send_empty_event) {
+ ocs_node_post_event(node, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
+ }
+
+ ocs_scsi_check_pending(ocs);
+}
+
+/**
+ * @ingroup els_api
+ * @brief Make ELS IO active
+ *
+ * @param els Pointer to the IO context to make active.
+ *
+ * @return Returns 0 on success; or a negative error code value on failure.
+ */
+
+static void
+ocs_els_make_active(ocs_io_t *els)
+{
+ ocs_node_t *node = els->node;
+
+ /* move ELS from pending list to active list */
+ ocs_lock(&node->active_ios_lock);
+ if (els->els_pend) {
+ if (els->els_active) {
+ ocs_log_err(node->ocs, "assertion failed: both els->els_pend and els->active set\n");
+ ocs_unlock(&node->active_ios_lock);
+ return;
+ } else {
+
+ /* remove from pending list */
+ ocs_list_remove(&node->els_io_pend_list, els);
+ els->els_pend = 0;
+
+ /* add els structure to ELS IO list */
+ ocs_list_add_tail(&node->els_io_active_list, els);
+ els->els_active = 1;
+ }
+ } else {
+ /* must be retrying; make sure it's already active */
+ if (!els->els_active) {
+ ocs_log_err(node->ocs, "assertion failed: niether els->els_pend nor els->active set\n");
+ }
+ }
+ ocs_unlock(&node->active_ios_lock);
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send the ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * The command, given by the \c els IO context, is sent to the node that the IO was
+ * configured with, using ocs_hw_srrs_send(). Upon completion,
+ * the \c cb callback is invoked,
+ * with the application-specific argument set to the \c els IO context.
+ *
+ * @param els Pointer to the IO context.
+ * @param reqlen Byte count in the payload to send.
+ * @param timeout_sec Command timeout, in seconds (0 -> 2*R_A_TOV).
+ * @param cb Completion callback.
+ *
+ * @return Returns 0 on success; or a negative error code value on failure.
+ */
+
+static int32_t
+ocs_els_send(ocs_io_t *els, uint32_t reqlen, uint32_t timeout_sec, ocs_hw_srrs_cb_t cb)
+{
+ ocs_node_t *node = els->node;
+
+ /* update ELS request counter */
+ node->els_req_cnt++;
+
+ /* move ELS from pending list to active list */
+ ocs_els_make_active(els);
+
+ els->wire_len = reqlen;
+ return ocs_scsi_io_dispatch(els, cb);
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send the ELS response.
+ *
+ * <h3 class="desc">Description</h3>
+ * The ELS response, given by the \c els IO context, is sent to the node
+ * that the IO was configured with, using ocs_hw_srrs_send().
+ *
+ * @param els Pointer to the IO context.
+ * @param rsplen Byte count in the payload to send.
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+static int32_t
+ocs_els_send_rsp(ocs_io_t *els, uint32_t rsplen)
+{
+ ocs_node_t *node = els->node;
+
+ /* increment ELS completion counter */
+ node->els_cmpl_cnt++;
+
+ /* move ELS from pending list to active list */
+ ocs_els_make_active(els);
+
+ els->wire_len = rsplen;
+ return ocs_scsi_io_dispatch(els, ocs_els_acc_cb);
+}
+
+/**
+ * @ingroup els_api
+ * @brief Handle ELS IO request completions.
+ *
+ * <h3 class="desc">Description</h3>
+ * This callback is used for several ELS send operations.
+ *
+ * @param hio Pointer to the HW IO context that completed.
+ * @param rnode Pointer to the remote node.
+ * @param length Length of the returned payload data.
+ * @param status Status of the completion.
+ * @param ext_status Extended status of the completion.
+ * @param arg Application-specific argument (generally a pointer to the ELS IO context).
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+static int32_t
+ocs_els_req_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg)
+{
+ ocs_io_t *els;
+ ocs_node_t *node;
+ ocs_t *ocs;
+ ocs_node_cb_t cbdata;
+ ocs_io_t *io;
+
+ ocs_assert(arg, -1);
+ io = arg;
+ els = io;
+ ocs_assert(els, -1);
+ ocs_assert(els->node, -1);
+ node = els->node;
+ ocs_assert(node->ocs, -1);
+ ocs = node->ocs;
+
+ ocs_assert(io->hio, -1);
+ ocs_assert(hio == io->hio, -1);
+
+ if (status != 0) {
+ els_io_printf(els, "status x%x ext x%x\n", status, ext_status);
+ }
+
+ /* set the response len element of els->rsp */
+ els->els_rsp.len = length;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els = els;
+
+ /* FW returns the number of bytes received on the link in
+ * the WCQE, not the amount placed in the buffer; use this info to
+ * check if there was an overrun.
+ */
+ if (length > els->els_rsp.size) {
+ ocs_log_warn(ocs, "ELS response returned len=%d > buflen=%zu\n",
+ length, els->els_rsp.size);
+ ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ return 0;
+ }
+
+ /* Post event to ELS IO object */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_OK, &cbdata);
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_RJT, &cbdata);
+ break;
+
+
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT:
+ ocs_els_post_event(els, OCS_EVT_ELS_REQ_TIMEOUT, &cbdata);
+ break;
+ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
+ ocs_els_post_event(els, OCS_EVT_ELS_REQ_ABORTED, &cbdata);
+ break;
+ default:
+ ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ break;
+ }
+ break;
+ default:
+ ocs_log_warn(ocs, "els req complete: failed status x%x, ext_status, x%x\n", status, ext_status);
+ ocs_els_post_event(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Handle ELS IO accept/response completions.
+ *
+ * <h3 class="desc">Description</h3>
+ * This callback is used for several ELS send operations.
+ *
+ * @param hio Pointer to the HW IO context that completed.
+ * @param rnode Pointer to the remote node.
+ * @param length Length of the returned payload data.
+ * @param status Status of the completion.
+ * @param ext_status Extended status of the completion.
+ * @param arg Application-specific argument (generally a pointer to the ELS IO context).
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+static int32_t
+ocs_els_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg)
+{
+ ocs_io_t *els;
+ ocs_node_t *node;
+ ocs_t *ocs;
+ ocs_node_cb_t cbdata;
+ ocs_io_t *io;
+
+ ocs_assert(arg, -1);
+ io = arg;
+ els = io;
+ ocs_assert(els, -1);
+ ocs_assert(els->node, -1);
+ node = els->node;
+ ocs_assert(node->ocs, -1);
+ ocs = node->ocs;
+
+ ocs_assert(io->hio, -1);
+ ocs_assert(hio == io->hio, -1);
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els = els;
+
+ /* Post node event */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ ocs_node_post_event(node, OCS_EVT_SRRS_ELS_CMPL_OK, &cbdata);
+ break;
+
+ default:
+ ocs_log_warn(ocs, "[%s] %-8s failed status x%x, ext_status x%x\n",
+ node->display_name, els->display_name, status, ext_status);
+ ocs_log_warn(ocs, "els acc complete: failed status x%x, ext_status, x%x\n", status, ext_status);
+ ocs_node_post_event(node, OCS_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+ break;
+ }
+
+ /* If this IO has a callback, invoke it */
+ if (els->els_callback) {
+ (*els->els_callback)(node, &cbdata, els->els_callback_arg);
+ }
+
+ ocs_els_io_free(els);
+
+ return 0;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Format and send a PLOGI ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a PLOGI payload using the domain SLI port service parameters,
+ * and send to the \c node.
+ *
+ * @param node Node to which the PLOGI is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_plogi(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ void (*cb)(ocs_node_t *node, ocs_node_cb_t *cbdata, void *arg), void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fc_plogi_payload_t *plogi;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*plogi), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "plogi";
+
+ /* Build PLOGI request */
+ plogi = els->els_req.virt;
+
+ ocs_memcpy(plogi, node->sport->service_params, sizeof(*plogi));
+
+ plogi->command_code = FC_ELS_CMD_PLOGI;
+ plogi->resv1 = 0;
+
+ ocs_display_sparams(node->display_name, "plogi send req", 0, NULL, plogi->common_service_parameters);
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+
+ ocs_io_transition(els, __ocs_els_init, NULL);
+
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Format and send a FLOGI ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct an FLOGI payload, and send to the \c node.
+ *
+ * @param node Node to which the FLOGI is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_flogi(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs;
+ fc_plogi_payload_t *flogi;
+
+ ocs_assert(node, NULL);
+ ocs_assert(node->ocs, NULL);
+ ocs_assert(node->sport, NULL);
+ ocs = node->ocs;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*flogi), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "flogi";
+
+ /* Build FLOGI request */
+ flogi = els->els_req.virt;
+
+ ocs_memcpy(flogi, node->sport->service_params, sizeof(*flogi));
+ flogi->command_code = FC_ELS_CMD_FLOGI;
+ flogi->resv1 = 0;
+
+ /* Priority tagging support */
+ flogi->common_service_parameters[1] |= ocs_htobe32(1U << 23);
+
+ ocs_display_sparams(node->display_name, "flogi send req", 0, NULL, flogi->common_service_parameters);
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Format and send a FDISC ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct an FDISC payload, and send to the \c node.
+ *
+ * @param node Node to which the FDISC is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_fdisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs;
+ fc_plogi_payload_t *fdisc;
+
+ ocs_assert(node, NULL);
+ ocs_assert(node->ocs, NULL);
+ ocs = node->ocs;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*fdisc), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "fdisc";
+
+ /* Build FDISC request */
+ fdisc = els->els_req.virt;
+
+ ocs_memcpy(fdisc, node->sport->service_params, sizeof(*fdisc));
+ fdisc->command_code = FC_ELS_CMD_FDISC;
+ fdisc->resv1 = 0;
+
+ ocs_display_sparams(node->display_name, "fdisc send req", 0, NULL, fdisc->common_service_parameters);
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a PRLI ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a PRLI ELS command, and send to the \c node.
+ *
+ * @param node Node to which the PRLI is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_prli(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_t *ocs = node->ocs;
+ ocs_io_t *els;
+ fc_prli_payload_t *prli;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*prli), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "prli";
+
+ /* Build PRLI request */
+ prli = els->els_req.virt;
+
+ ocs_memset(prli, 0, sizeof(*prli));
+
+ prli->command_code = FC_ELS_CMD_PRLI;
+ prli->page_length = 16;
+ prli->payload_length = ocs_htobe16(sizeof(fc_prli_payload_t));
+ prli->type = FC_TYPE_FCP;
+ prli->type_ext = 0;
+ prli->flags = ocs_htobe16(FC_PRLI_ESTABLISH_IMAGE_PAIR);
+ prli->service_params = ocs_htobe16(FC_PRLI_READ_XRDY_DISABLED |
+ (node->sport->enable_ini ? FC_PRLI_INITIATOR_FUNCTION : 0) |
+ (node->sport->enable_tgt ? FC_PRLI_TARGET_FUNCTION : 0));
+
+ /* For Tape Drive support */
+ prli->service_params |= ocs_htobe16(FC_PRLI_CONFIRMED_COMPLETION | FC_PRLI_RETRY |
+ FC_PRLI_TASK_RETRY_ID_REQ| FC_PRLI_REC_SUPPORT);
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a PRLO ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a PRLO ELS command, and send to the \c node.
+ *
+ * @param node Node to which the PRLO is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_prlo(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_t *ocs = node->ocs;
+ ocs_io_t *els;
+ fc_prlo_payload_t *prlo;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*prlo), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "prlo";
+
+ /* Build PRLO request */
+ prlo = els->els_req.virt;
+
+ ocs_memset(prlo, 0, sizeof(*prlo));
+ prlo->command_code = FC_ELS_CMD_PRLO;
+ prlo->page_length = 16;
+ prlo->payload_length = ocs_htobe16(sizeof(fc_prlo_payload_t));
+ prlo->type = FC_TYPE_FCP;
+ prlo->type_ext = 0;
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a LOGO ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Format a LOGO, and send to the \c node.
+ *
+ * @param node Node to which the LOGO is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_logo(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs;
+ fc_logo_payload_t *logo;
+ fc_plogi_payload_t *sparams;
+
+
+ ocs = node->ocs;
+
+ node_els_trace();
+
+ sparams = (fc_plogi_payload_t*) node->sport->service_params;
+
+ els = ocs_els_io_alloc(node, sizeof(*logo), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "logo";
+
+ /* Build LOGO request */
+
+ logo = els->els_req.virt;
+
+ ocs_memset(logo, 0, sizeof(*logo));
+ logo->command_code = FC_ELS_CMD_LOGO;
+ logo->resv1 = 0;
+ logo->port_id = fc_htobe24(node->rnode.sport->fc_id);
+ logo->port_name_hi = sparams->port_name_hi;
+ logo->port_name_lo = sparams->port_name_lo;
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send an ADISC ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct an ADISC ELS command, and send to the \c node.
+ *
+ * @param node Node to which the ADISC is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_adisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs;
+ fc_adisc_payload_t *adisc;
+ fc_plogi_payload_t *sparams;
+ ocs_sport_t *sport = node->sport;
+
+ ocs = node->ocs;
+
+ node_els_trace();
+
+ sparams = (fc_plogi_payload_t*) node->sport->service_params;
+
+ els = ocs_els_io_alloc(node, sizeof(*adisc), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "adisc";
+
+ /* Build ADISC request */
+
+ adisc = els->els_req.virt;
+ sparams = (fc_plogi_payload_t*) node->sport->service_params;
+
+ ocs_memset(adisc, 0, sizeof(*adisc));
+ adisc->command_code = FC_ELS_CMD_ADISC;
+ adisc->hard_address = fc_htobe24(sport->fc_id);
+ adisc->port_name_hi = sparams->port_name_hi;
+ adisc->port_name_lo = sparams->port_name_lo;
+ adisc->node_name_hi = sparams->node_name_hi;
+ adisc->node_name_lo = sparams->node_name_lo;
+ adisc->port_id = fc_htobe24(node->rnode.sport->fc_id);
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a PDISC ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a PDISC ELS command, and send to the \c node.
+ *
+ * @param node Node to which the PDISC is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_pdisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fc_plogi_payload_t *pdisc;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*pdisc), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "pdisc";
+
+ pdisc = els->els_req.virt;
+
+ ocs_memcpy(pdisc, node->sport->service_params, sizeof(*pdisc));
+
+ pdisc->command_code = FC_ELS_CMD_PDISC;
+ pdisc->resv1 = 0;
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send an SCR ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Format an SCR, and send to the \c node.
+ *
+ * @param node Node to which the SCR is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function
+ * @param cbarg Callback function arg
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_scr(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fc_scr_payload_t *req;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*req), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "scr";
+
+ req = els->els_req.virt;
+
+ ocs_memset(req, 0, sizeof(*req));
+ req->command_code = FC_ELS_CMD_SCR;
+ req->function = FC_SCR_REG_FULL;
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send an RRQ ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Format an RRQ, and send to the \c node.
+ *
+ * @param node Node to which the RRQ is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function
+ * @param cbarg Callback function arg
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_rrq(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fc_scr_payload_t *req;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*req), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "scr";
+
+ req = els->els_req.virt;
+
+ ocs_memset(req, 0, sizeof(*req));
+ req->command_code = FC_ELS_CMD_RRQ;
+ req->function = FC_SCR_REG_FULL;
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send an RSCN ELS command.
+ *
+ * <h3 class="desc">Description</h3>
+ * Format an RSCN, and send to the \c node.
+ *
+ * @param node Node to which the RRQ is sent.
+ * @param timeout_sec Command timeout, in seconds.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param port_ids Pointer to port IDs
+ * @param port_ids_count Count of port IDs
+ * @param cb Callback function
+ * @param cbarg Callback function arg
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+ocs_io_t *
+ocs_send_rscn(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ void *port_ids, uint32_t port_ids_count, els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fc_rscn_payload_t *req;
+ uint32_t payload_length = sizeof(fc_rscn_affected_port_id_page_t)*(port_ids_count - 1) +
+ sizeof(fc_rscn_payload_t);
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, payload_length, OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+ els->els_timeout_sec = timeout_sec;
+ els->els_retries_remaining = retries;
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "rscn";
+
+ req = els->els_req.virt;
+
+ req->command_code = FC_ELS_CMD_RSCN;
+ req->page_length = sizeof(fc_rscn_affected_port_id_page_t);
+ req->payload_length = ocs_htobe16(sizeof(*req) +
+ sizeof(fc_rscn_affected_port_id_page_t)*(port_ids_count-1));
+
+ els->hio_type = OCS_HW_ELS_REQ;
+ els->iparam.els.timeout = timeout_sec;
+
+ /* copy in the payload */
+ ocs_memcpy(req->port_list, port_ids, port_ids_count*sizeof(fc_rscn_affected_port_id_page_t));
+
+ /* Submit the request */
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @brief Send an LS_RJT ELS response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Send an LS_RJT ELS response.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange ID being responded to.
+ * @param reason_code Reason code value for LS_RJT.
+ * @param reason_code_expl Reason code explanation value for LS_RJT.
+ * @param vendor_unique Vendor-unique value for LS_RJT.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_ls_rjt(ocs_io_t *io, uint32_t ox_id, uint32_t reason_code, uint32_t reason_code_expl,
+ uint32_t vendor_unique, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_ls_rjt_payload_t *rjt;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "ls_rjt";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els.ox_id = ox_id;
+
+ rjt = io->els_req.virt;
+ ocs_memset(rjt, 0, sizeof(*rjt));
+
+ rjt->command_code = FC_ELS_CMD_RJT;
+ rjt->reason_code = reason_code;
+ rjt->reason_code_exp = reason_code_expl;
+
+ io->hio_type = OCS_HW_ELS_RSP;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*rjt)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a PLOGI accept response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a PLOGI LS_ACC, and send to the \c node, using the originator exchange ID
+ * \c ox_id.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange ID being responsed to.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+ocs_io_t *
+ocs_send_plogi_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_plogi_payload_t *plogi;
+ fc_plogi_payload_t *req = (fc_plogi_payload_t *)node->service_params;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "plog_acc";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els.ox_id = ox_id;
+
+ plogi = io->els_req.virt;
+
+ /* copy our port's service parameters to payload */
+ ocs_memcpy(plogi, node->sport->service_params, sizeof(*plogi));
+ plogi->command_code = FC_ELS_CMD_ACC;
+ plogi->resv1 = 0;
+
+ /* Set Application header support bit if requested */
+ if (req->common_service_parameters[1] & ocs_htobe32(1U << 24)) {
+ plogi->common_service_parameters[1] |= ocs_htobe32(1U << 24);
+ }
+
+ /* Priority tagging support. */
+ if (req->common_service_parameters[1] & ocs_htobe32(1U << 23)) {
+ plogi->common_service_parameters[1] |= ocs_htobe32(1U << 23);
+ }
+
+ ocs_display_sparams(node->display_name, "plogi send resp", 0, NULL, plogi->common_service_parameters);
+
+ io->hio_type = OCS_HW_ELS_RSP;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*plogi)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send an FLOGI accept response for point-to-point negotiation.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct an FLOGI accept response, and send to the \c node using the originator
+ * exchange id \c ox_id. The \c s_id is used for the response frame source FC ID.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange ID for the response.
+ * @param s_id Source FC ID to be used in the response frame.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+ocs_io_t *
+ocs_send_flogi_p2p_acc(ocs_io_t *io, uint32_t ox_id, uint32_t s_id, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_plogi_payload_t *flogi;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "flogi_p2p_acc";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els_sid.ox_id = ox_id;
+ io->iparam.els_sid.s_id = s_id;
+
+ flogi = io->els_req.virt;
+
+ /* copy our port's service parameters to payload */
+ ocs_memcpy(flogi, node->sport->service_params, sizeof(*flogi));
+ flogi->command_code = FC_ELS_CMD_ACC;
+ flogi->resv1 = 0;
+ ocs_memset(flogi->class1_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+ ocs_memset(flogi->class2_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+ ocs_memset(flogi->class3_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+ ocs_memset(flogi->class4_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+
+ io->hio_type = OCS_HW_ELS_RSP_SID;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*flogi)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+ocs_io_t *
+ocs_send_flogi_acc(ocs_io_t *io, uint32_t ox_id, uint32_t is_fport, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_plogi_payload_t *flogi;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "flogi_acc";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els_sid.ox_id = ox_id;
+ io->iparam.els_sid.s_id = io->node->sport->fc_id;
+
+ flogi = io->els_req.virt;
+
+ /* copy our port's service parameters to payload */
+ ocs_memcpy(flogi, node->sport->service_params, sizeof(*flogi));
+
+ /* Set F_port */
+ if (is_fport) {
+ /* Set F_PORT and Multiple N_PORT_ID Assignment */
+ flogi->common_service_parameters[1] |= ocs_be32toh(3U << 28);
+ }
+
+ flogi->command_code = FC_ELS_CMD_ACC;
+ flogi->resv1 = 0;
+
+ ocs_display_sparams(node->display_name, "flogi send resp", 0, NULL, flogi->common_service_parameters);
+
+ ocs_memset(flogi->class1_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+ ocs_memset(flogi->class2_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+ ocs_memset(flogi->class3_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+ ocs_memset(flogi->class4_service_parameters, 0, sizeof(flogi->class1_service_parameters));
+
+ io->hio_type = OCS_HW_ELS_RSP_SID;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*flogi)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a PRLI accept response
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a PRLI LS_ACC response, and send to the \c node, using the originator
+ * \c ox_id exchange ID.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange ID.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_prli_acc(ocs_io_t *io, uint32_t ox_id, uint8_t fc_type, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_prli_payload_t *prli;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "prli_acc";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els.ox_id = ox_id;
+
+ prli = io->els_req.virt;
+ ocs_memset(prli, 0, sizeof(*prli));
+
+ prli->command_code = FC_ELS_CMD_ACC;
+ prli->page_length = 16;
+ prli->payload_length = ocs_htobe16(sizeof(fc_prli_payload_t));
+ prli->type = fc_type;
+ prli->type_ext = 0;
+ prli->flags = ocs_htobe16(FC_PRLI_ESTABLISH_IMAGE_PAIR | FC_PRLI_REQUEST_EXECUTED);
+
+ prli->service_params = ocs_htobe16(FC_PRLI_READ_XRDY_DISABLED |
+ (node->sport->enable_ini ? FC_PRLI_INITIATOR_FUNCTION : 0) |
+ (node->sport->enable_tgt ? FC_PRLI_TARGET_FUNCTION : 0));
+
+ io->hio_type = OCS_HW_ELS_RSP;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*prli)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a PRLO accept response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a PRLO LS_ACC response, and send to the \c node, using the originator
+ * exchange ID \c ox_id.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange ID.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_prlo_acc(ocs_io_t *io, uint32_t ox_id, uint8_t fc_type, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_prlo_acc_payload_t *prlo_acc;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "prlo_acc";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els.ox_id = ox_id;
+
+ prlo_acc = io->els_req.virt;
+ ocs_memset(prlo_acc, 0, sizeof(*prlo_acc));
+
+ prlo_acc->command_code = FC_ELS_CMD_ACC;
+ prlo_acc->page_length = 16;
+ prlo_acc->payload_length = ocs_htobe16(sizeof(fc_prlo_acc_payload_t));
+ prlo_acc->type = fc_type;
+ prlo_acc->type_ext = 0;
+ prlo_acc->response_code = FC_PRLO_REQUEST_EXECUTED;
+
+ io->hio_type = OCS_HW_ELS_RSP;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*prlo_acc)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a generic LS_ACC response without a payload.
+ *
+ * <h3 class="desc">Description</h3>
+ * A generic LS_ACC response is sent to the \c node using the originator exchange ID
+ * \c ox_id.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange id.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+ocs_io_t *
+ocs_send_ls_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_acc_payload_t *acc;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "ls_acc";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els.ox_id = ox_id;
+
+ acc = io->els_req.virt;
+ ocs_memset(acc, 0, sizeof(*acc));
+
+ acc->command_code = FC_ELS_CMD_ACC;
+
+ io->hio_type = OCS_HW_ELS_RSP;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*acc)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a LOGO accept response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a LOGO LS_ACC response, and send to the \c node, using the originator
+ * exchange ID \c ox_id.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange ID.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+ocs_io_t *
+ocs_send_logo_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ ocs_t *ocs = node->ocs;
+ fc_acc_payload_t *logo;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "logo_acc";
+ io->init_task_tag = ox_id;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els.ox_id = ox_id;
+
+ logo = io->els_req.virt;
+ ocs_memset(logo, 0, sizeof(*logo));
+
+ logo->command_code = FC_ELS_CMD_ACC;
+ logo->resv1 = 0;
+
+ io->hio_type = OCS_HW_ELS_RSP;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*logo)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send an ADISC accept response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct an ADISC LS__ACC, and send to the \c node, using the originator
+ * exchange id \c ox_id.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param ox_id Originator exchange ID.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_send_adisc_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ fc_adisc_payload_t *adisc;
+ fc_plogi_payload_t *sparams;
+ ocs_t *ocs;
+
+ ocs_assert(node, NULL);
+ ocs_assert(node->ocs, NULL);
+ ocs = node->ocs;
+
+ node_els_trace();
+
+ io->els_callback = cb;
+ io->els_callback_arg = cbarg;
+ io->display_name = "adisc_acc";
+ io->init_task_tag = ox_id;
+
+ /* Go ahead and send the ELS_ACC */
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.els.ox_id = ox_id;
+
+ sparams = (fc_plogi_payload_t*) node->sport->service_params;
+ adisc = io->els_req.virt;
+ ocs_memset(adisc, 0, sizeof(fc_adisc_payload_t));
+ adisc->command_code = FC_ELS_CMD_ACC;
+ adisc->hard_address = 0;
+ adisc->port_name_hi = sparams->port_name_hi;
+ adisc->port_name_lo = sparams->port_name_lo;
+ adisc->node_name_hi = sparams->node_name_hi;
+ adisc->node_name_lo = sparams->node_name_lo;
+ adisc->port_id = fc_htobe24(node->rnode.sport->fc_id);
+
+ io->hio_type = OCS_HW_ELS_RSP;
+ if ((rc = ocs_els_send_rsp(io, sizeof(*adisc)))) {
+ ocs_els_io_free(io);
+ io = NULL;
+ }
+
+ return io;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a RFTID CT request.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct an RFTID CT request, and send to the \c node.
+ *
+ * @param node Node to which the RFTID request is sent.
+ * @param timeout_sec Time, in seconds, to wait before timing out the ELS.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+ocs_io_t *
+ocs_ns_send_rftid(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fcct_rftid_req_t *rftid;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*rftid), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+
+ els->iparam.fc_ct.r_ctl = FC_RCTL_ELS;
+ els->iparam.fc_ct.type = FC_TYPE_GS;
+ els->iparam.fc_ct.df_ctl = 0;
+ els->iparam.fc_ct.timeout = timeout_sec;
+
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "rftid";
+
+ rftid = els->els_req.virt;
+
+ ocs_memset(rftid, 0, sizeof(*rftid));
+ fcct_build_req_header(&rftid->hdr, FC_GS_NAMESERVER_RFT_ID, (OCS_ELS_RSP_LEN - sizeof(rftid->hdr)));
+ rftid->port_id = ocs_htobe32(node->rnode.sport->fc_id);
+ rftid->fc4_types[FC_GS_TYPE_WORD(FC_TYPE_FCP)] = ocs_htobe32(1 << FC_GS_TYPE_BIT(FC_TYPE_FCP));
+
+ els->hio_type = OCS_HW_FC_CT;
+
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a RFFID CT request.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct an RFFID CT request, and send to the \c node.
+ *
+ * @param node Node to which the RFFID request is sent.
+ * @param timeout_sec Time, in seconds, to wait before timing out the ELS.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+ocs_io_t *
+ocs_ns_send_rffid(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fcct_rffid_req_t *rffid;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc(node, sizeof(*rffid), OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+
+ els->iparam.fc_ct.r_ctl = FC_RCTL_ELS;
+ els->iparam.fc_ct.type = FC_TYPE_GS;
+ els->iparam.fc_ct.df_ctl = 0;
+ els->iparam.fc_ct.timeout = timeout_sec;
+
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "rffid";
+
+ rffid = els->els_req.virt;
+
+ ocs_memset(rffid, 0, sizeof(*rffid));
+
+ fcct_build_req_header(&rffid->hdr, FC_GS_NAMESERVER_RFF_ID, (OCS_ELS_RSP_LEN - sizeof(rffid->hdr)));
+ rffid->port_id = ocs_htobe32(node->rnode.sport->fc_id);
+ if (node->sport->enable_ini) {
+ rffid->fc4_feature_bits |= FC4_FEATURE_INITIATOR;
+ }
+ if (node->sport->enable_tgt) {
+ rffid->fc4_feature_bits |= FC4_FEATURE_TARGET;
+ }
+ rffid->type = FC_TYPE_FCP;
+
+ els->hio_type = OCS_HW_FC_CT;
+
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+
+/**
+ * @ingroup els_api
+ * @brief Send a GIDPT CT request.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a GIDPT CT request, and send to the \c node.
+ *
+ * @param node Node to which the GIDPT request is sent.
+ * @param timeout_sec Time, in seconds, to wait before timing out the ELS.
+ * @param retries Number of times to retry errors before reporting a failure.
+ * @param cb Callback function.
+ * @param cbarg Callback function argument.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_ns_send_gidpt(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ els_cb_t cb, void *cbarg)
+{
+ ocs_io_t *els;
+ ocs_t *ocs = node->ocs;
+ fcct_gidpt_req_t *gidpt;
+
+ node_els_trace();
+
+ els = ocs_els_io_alloc_size(node, sizeof(*gidpt), OCS_ELS_GID_PT_RSP_LEN, OCS_ELS_ROLE_ORIGINATOR);
+ if (els == NULL) {
+ ocs_log_err(ocs, "IO alloc failed\n");
+ } else {
+
+ els->iparam.fc_ct.r_ctl = FC_RCTL_ELS;
+ els->iparam.fc_ct.type = FC_TYPE_GS;
+ els->iparam.fc_ct.df_ctl = 0;
+ els->iparam.fc_ct.timeout = timeout_sec;
+
+ els->els_callback = cb;
+ els->els_callback_arg = cbarg;
+ els->display_name = "gidpt";
+
+ gidpt = els->els_req.virt;
+
+ ocs_memset(gidpt, 0, sizeof(*gidpt));
+ fcct_build_req_header(&gidpt->hdr, FC_GS_NAMESERVER_GID_PT, (OCS_ELS_GID_PT_RSP_LEN - sizeof(gidpt->hdr)) );
+ gidpt->domain_id_scope = 0;
+ gidpt->area_id_scope = 0;
+ gidpt->port_type = 0x7f;
+
+ els->hio_type = OCS_HW_FC_CT;
+
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ }
+ return els;
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a BA_ACC given the request's FC header
+ *
+ * <h3 class="desc">Description</h3>
+ * Using the S_ID/D_ID from the request's FC header, generate a BA_ACC.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param hdr Pointer to the FC header.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+ocs_io_t *
+ocs_bls_send_acc_hdr(ocs_io_t *io, fc_header_t *hdr)
+{
+ uint16_t ox_id = ocs_be16toh(hdr->ox_id);
+ uint16_t rx_id = ocs_be16toh(hdr->rx_id);
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+
+ return ocs_bls_send_acc(io, d_id, ox_id, rx_id);
+}
+
+/**
+ * @ingroup els_api
+ * @brief Send a BLS BA_ACC response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Construct a BLS BA_ACC response, and send to the \c node.
+ *
+ * @param io Pointer to a SCSI IO object.
+ * @param s_id S_ID to use for the response. If UINT32_MAX, then use our SLI port
+ * (sport) S_ID.
+ * @param ox_id Originator exchange ID.
+ * @param rx_id Responder exchange ID.
+ *
+ * @return Returns pointer to IO object, or NULL if error.
+ */
+
+static ocs_io_t *
+ocs_bls_send_acc(ocs_io_t *io, uint32_t s_id, uint16_t ox_id, uint16_t rx_id)
+{
+ ocs_node_t *node = io->node;
+ int32_t rc;
+ fc_ba_acc_payload_t *acc;
+ ocs_t *ocs;
+
+ ocs_assert(node, NULL);
+ ocs_assert(node->ocs, NULL);
+ ocs = node->ocs;
+
+ if (node->rnode.sport->fc_id == s_id) {
+ s_id = UINT32_MAX;
+ }
+
+ /* fill out generic fields */
+ io->ocs = ocs;
+ io->node = node;
+ io->cmd_tgt = TRUE;
+
+ /* fill out BLS Response-specific fields */
+ io->io_type = OCS_IO_TYPE_BLS_RESP;
+ io->display_name = "ba_acc";
+ io->hio_type = OCS_HW_BLS_ACC_SID;
+ io->init_task_tag = ox_id;
+
+ /* fill out iparam fields */
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.bls_sid.s_id = s_id;
+ io->iparam.bls_sid.ox_id = ox_id;
+ io->iparam.bls_sid.rx_id = rx_id;
+
+ acc = (void *)io->iparam.bls_sid.payload;
+
+ ocs_memset(io->iparam.bls_sid.payload, 0, sizeof(io->iparam.bls_sid.payload));
+ acc->ox_id = io->iparam.bls_sid.ox_id;
+ acc->rx_id = io->iparam.bls_sid.rx_id;
+ acc->high_seq_cnt = UINT16_MAX;
+
+ if ((rc = ocs_scsi_io_dispatch(io, ocs_bls_send_acc_cb))) {
+ ocs_log_err(ocs, "ocs_scsi_io_dispatch() failed: %d\n", rc);
+ ocs_scsi_io_free(io);
+ io = NULL;
+ }
+ return io;
+}
+
+/**
+ * @brief Handle the BLS accept completion.
+ *
+ * <h3 class="desc">Description</h3>
+ * Upon completion of sending a BA_ACC, this callback is invoked by the HW.
+ *
+ * @param hio Pointer to the HW IO object.
+ * @param rnode Pointer to the HW remote node.
+ * @param length Length of the response payload, in bytes.
+ * @param status Completion status.
+ * @param ext_status Extended completion status.
+ * @param app Callback private argument.
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+static int32_t
+ocs_bls_send_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
+{
+ ocs_io_t *io = app;
+
+ ocs_assert(io, -1);
+
+ ocs_scsi_io_free(io);
+ return 0;
+}
+
+/**
+ * @brief ELS abort callback.
+ *
+ * <h3 class="desc">Description</h3>
+ * This callback is invoked by the HW when an ELS IO is aborted.
+ *
+ * @param hio Pointer to the HW IO object.
+ * @param rnode Pointer to the HW remote node.
+ * @param length Length of the response payload, in bytes.
+ * @param status Completion status.
+ * @param ext_status Extended completion status.
+ * @param app Callback private argument.
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+static int32_t
+ocs_els_abort_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
+{
+ ocs_io_t *els;
+ ocs_io_t *abort_io = NULL; /* IO structure used to abort ELS */
+ ocs_t *ocs;
+
+ ocs_assert(app, -1);
+ abort_io = app;
+ els = abort_io->io_to_abort;
+ ocs_assert(els->node, -1);
+ ocs_assert(els->node->ocs, -1);
+
+ ocs = els->node->ocs;
+
+ if (status != 0) {
+ ocs_log_warn(ocs, "status x%x ext x%x\n", status, ext_status);
+ }
+
+ /* now free the abort IO */
+ ocs_io_free(ocs, abort_io);
+
+ /* send completion event to indicate abort process is complete
+ * Note: The ELS SM will already be receiving ELS_REQ_OK/FAIL/RJT/ABORTED
+ */
+ ocs_els_post_event(els, OCS_EVT_ELS_ABORT_CMPL, NULL);
+
+ /* done with ELS IO to abort */
+ ocs_ref_put(&els->ref); /* ocs_ref_get(): ocs_els_abort_io() */
+ return 0;
+}
+
+/**
+ * @brief Abort an ELS IO.
+ *
+ * <h3 class="desc">Description</h3>
+ * The ELS IO is aborted by making a HW abort IO request,
+ * optionally requesting that an ABTS is sent.
+ *
+ * \b Note: This function allocates a HW IO, and associates the HW IO
+ * with the ELS IO that it is aborting. It does not associate
+ * the HW IO with the node directly, like for ELS requests. The
+ * abort completion is propagated up to the node once the
+ * original WQE and the abort WQE are complete (the original WQE
+ * completion is not propagated up to node).
+ *
+ * @param els Pointer to the ELS IO.
+ * @param send_abts Boolean to indicate if hardware will automatically generate an ABTS.
+ *
+ * @return Returns pointer to Abort IO object, or NULL if error.
+ */
+
+static ocs_io_t *
+ocs_els_abort_io(ocs_io_t *els, int send_abts)
+{
+ ocs_t *ocs;
+ ocs_xport_t *xport;
+ int32_t rc;
+ ocs_io_t *abort_io = NULL;
+
+ ocs_assert(els, NULL);
+ ocs_assert(els->node, NULL);
+ ocs_assert(els->node->ocs, NULL);
+
+ ocs = els->node->ocs;
+ ocs_assert(ocs->xport, NULL);
+ xport = ocs->xport;
+
+ /* take a reference on IO being aborted */
+ if ((ocs_ref_get_unless_zero(&els->ref) == 0)) {
+ /* command no longer active */
+ ocs_log_debug(ocs, "els no longer active\n");
+ return NULL;
+ }
+
+ /* allocate IO structure to send abort */
+ abort_io = ocs_io_alloc(ocs);
+ if (abort_io == NULL) {
+ ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
+ } else {
+ ocs_assert(abort_io->hio == NULL, NULL);
+
+ /* set generic fields */
+ abort_io->ocs = ocs;
+ abort_io->node = els->node;
+ abort_io->cmd_ini = TRUE;
+
+ /* set type and ABORT-specific fields */
+ abort_io->io_type = OCS_IO_TYPE_ABORT;
+ abort_io->display_name = "abort_els";
+ abort_io->io_to_abort = els;
+ abort_io->send_abts = send_abts;
+
+ /* now dispatch IO */
+ if ((rc = ocs_scsi_io_dispatch_abort(abort_io, ocs_els_abort_cb))) {
+ ocs_log_err(ocs, "ocs_scsi_io_dispatch failed: %d\n", rc);
+ ocs_io_free(ocs, abort_io);
+ abort_io = NULL;
+ }
+ }
+
+ /* if something failed, put reference on ELS to abort */
+ if (abort_io == NULL) {
+ ocs_ref_put(&els->ref); /* ocs_ref_get(): same function */
+ }
+ return abort_io;
+}
+
+
+/*
+ * ELS IO State Machine
+ */
+
+#define std_els_state_decl(...) \
+ ocs_io_t *els = NULL; \
+ ocs_node_t *node = NULL; \
+ ocs_t *ocs = NULL; \
+ ocs_assert(ctx != NULL, NULL); \
+ els = ctx->app; \
+ ocs_assert(els != NULL, NULL); \
+ node = els->node; \
+ ocs_assert(node != NULL, NULL); \
+ ocs = node->ocs; \
+ ocs_assert(ocs != NULL, NULL);
+
+#define els_sm_trace(...) \
+ do { \
+ if (OCS_LOG_ENABLE_ELS_TRACE(ocs)) \
+ ocs_log_info(ocs, "[%s] %-8s %-20s %-20s\n", node->display_name, els->display_name, \
+ __func__, ocs_sm_event_name(evt)); \
+ } while (0)
+
+
+/**
+ * @brief Cleanup an ELS IO
+ *
+ * <h3 class="desc">Description</h3>
+ * Cleans up an ELS IO by posting the requested event to the owning node object;
+ * invoking the callback, if one is provided; and then freeing the
+ * ELS IO object.
+ *
+ * @param els Pointer to the ELS IO.
+ * @param node_evt Node SM event to post.
+ * @param arg Node SM event argument.
+ *
+ * @return None.
+ */
+
+void
+ocs_els_io_cleanup(ocs_io_t *els, ocs_sm_event_t node_evt, void *arg)
+{
+ ocs_assert(els);
+
+ /* don't want further events that could come; e.g. abort requests
+ * from the node state machine; thus, disable state machine
+ */
+ ocs_sm_disable(&els->els_sm);
+ ocs_node_post_event(els->node, node_evt, arg);
+
+ /* If this IO has a callback, invoke it */
+ if (els->els_callback) {
+ (*els->els_callback)(els->node, arg, els->els_callback_arg);
+ }
+ els->els_req_free = 1;
+}
+
+
+/**
+ * @brief Common event handler for the ELS IO state machine.
+ *
+ * <h3 class="desc">Description</h3>
+ * Provide handler for events for which default actions are desired.
+ *
+ * @param funcname Name of the calling function (for logging).
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_els_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_els_state_decl();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ case OCS_EVT_REENTER:
+ case OCS_EVT_EXIT:
+ break;
+
+ /* If ELS_REQ_FAIL is not handled in state, then we'll terminate this ELS and
+ * pass the event to the node
+ */
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ ocs_log_warn(els->node->ocs, "[%s] %-20s %-20s not handled - terminating ELS\n", node->display_name, funcname,
+ ocs_sm_event_name(evt));
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
+ break;
+ default:
+ ocs_log_warn(els->node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname,
+ ocs_sm_event_name(evt));
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Initial ELS IO state
+ *
+ * <h3 class="desc">Description</h3>
+ * This is the initial ELS IO state. Upon entry, the requested ELS/CT is submitted to
+ * the hardware.
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_els_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc = 0;
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ rc = ocs_els_send(els, els->els_req.size, els->els_timeout_sec, ocs_els_req_cb);
+ if (rc) {
+ ocs_node_cb_t cbdata;
+ cbdata.status = cbdata.ext_status = (~0);
+ cbdata.els = els;
+ ocs_log_err(ocs, "ocs_els_send failed: %d\n", rc);
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ } else {
+ ocs_io_transition(els, __ocs_els_wait_resp, NULL);
+ }
+ break;
+ }
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Wait for the ELS request to complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * This is the ELS IO state that waits for the submitted ELS event to complete.
+ * If an error completion event is received, the requested ELS is aborted.
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_els_wait_resp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_io_t *io;
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK: {
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_OK, arg);
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: {
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
+ break;
+ }
+
+ case OCS_EVT_ELS_REQ_TIMEOUT: {
+ els_io_printf(els, "Timed out, retry (%d tries remaining)\n",
+ els->els_retries_remaining-1);
+ ocs_io_transition(els, __ocs_els_retry, NULL);
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_RJT: {
+ ocs_node_cb_t *cbdata = arg;
+ uint32_t reason_code = (cbdata->ext_status >> 16) & 0xff;
+
+ /* delay and retry if reason code is Logical Busy */
+ switch (reason_code) {
+ case FC_REASON_LOGICAL_BUSY:
+ els->node->els_req_cnt--;
+ els_io_printf(els, "LS_RJT Logical Busy response, delay and retry\n");
+ ocs_io_transition(els, __ocs_els_delay_retry, NULL);
+ break;
+ default:
+ ocs_els_io_cleanup(els, evt, arg);
+ break;
+ }
+ break;
+ }
+
+ case OCS_EVT_ABORT_ELS: {
+ /* request to abort this ELS without an ABTS */
+ els_io_printf(els, "ELS abort requested\n");
+ els->els_retries_remaining = 0; /* Set retries to zero, we are done */
+ io = ocs_els_abort_io(els, FALSE);
+ if (io == NULL) {
+ ocs_log_err(ocs, "ocs_els_send failed\n");
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
+ } else {
+ ocs_io_transition(els, __ocs_els_aborting, NULL);
+ }
+ break;
+ }
+
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Wait for the ELS IO abort request to complete, and retry the ELS.
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered when waiting for an abort of an ELS
+ * request to complete so the request can be retried.
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_els_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc = 0;
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ /* handle event for ABORT_XRI WQE
+ * once abort is complete, retry if retries left;
+ * don't need to wait for OCS_EVT_SRRS_ELS_REQ_* event because we got
+ * by receiving OCS_EVT_ELS_REQ_TIMEOUT
+ */
+ ocs_node_cb_t node_cbdata;
+ node_cbdata.status = node_cbdata.ext_status = (~0);
+ node_cbdata.els = els;
+ if (els->els_retries_remaining && --els->els_retries_remaining) {
+ /* Use a different XRI for the retry (would like a new oxid),
+ * so free the HW IO (dispatch will allocate a new one). It's an
+ * optimization to only free the HW IO here and not the ocs_io_t;
+ * Freeing the ocs_io_t object would require copying all the necessary
+ * info from the old ocs_io_t object to the * new one; and allocating
+ * a new ocs_io_t could fail.
+ */
+ ocs_assert(els->hio, NULL);
+ ocs_hw_io_free(&ocs->hw, els->hio);
+ els->hio = NULL;
+
+ /* result isn't propagated up to node sm, need to decrement req cnt */
+ ocs_assert(els->node->els_req_cnt, NULL);
+ els->node->els_req_cnt--;
+ rc = ocs_els_send(els, els->els_req.size, els->els_timeout_sec, ocs_els_req_cb);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_els_send failed: %d\n", rc);
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &node_cbdata);
+ }
+ ocs_io_transition(els, __ocs_els_wait_resp, NULL);
+ } else {
+ els_io_printf(els, "Retries exhausted\n");
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, &node_cbdata);
+ }
+ break;
+ }
+
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Wait for a retry timer to expire having received an abort request
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered when waiting for a timer event, after having received
+ * an abort request, to avoid a race condition with the timer handler
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_els_aborted_delay_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* mod/resched the timer for a short duration */
+ ocs_mod_timer(&els->delay_timer, 1);
+ break;
+ case OCS_EVT_TIMER_EXPIRED:
+ /* Cancel the timer, skip post node event, and free the io */
+ node->els_req_cnt++;
+ ocs_els_io_cleanup(els, OCS_EVT_SRRS_ELS_REQ_FAIL, arg);
+ break;
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Wait for a retry timer to expire
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered when waiting for a timer event, so that
+ * the ELS request can be retried.
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_els_delay_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_setup_timer(ocs, &els->delay_timer, ocs_els_delay_timer_cb, els, 5000);
+ break;
+ case OCS_EVT_TIMER_EXPIRED:
+ /* Retry delay timer expired, retry the ELS request, Free the HW IO so
+ * that a new oxid is used.
+ */
+ if (els->hio != NULL) {
+ ocs_hw_io_free(&ocs->hw, els->hio);
+ els->hio = NULL;
+ }
+ ocs_io_transition(els, __ocs_els_init, NULL);
+ break;
+ case OCS_EVT_ABORT_ELS:
+ ocs_io_transition(els, __ocs_els_aborted_delay_retry, NULL);
+ break;
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Wait for the ELS IO abort request to complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered after we abort an ELS WQE and are
+ * waiting for either the original ELS WQE request or the abort
+ * to complete.
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_els_aborting(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_ELS_REQ_TIMEOUT:
+ case OCS_EVT_ELS_REQ_ABORTED: {
+ /* completion for ELS received first, transition to wait for abort cmpl */
+ els_io_printf(els, "request cmpl evt=%s\n", ocs_sm_event_name(evt));
+ ocs_io_transition(els, __ocs_els_aborting_wait_abort_cmpl, NULL);
+ break;
+ }
+ case OCS_EVT_ELS_ABORT_CMPL: {
+ /* completion for abort was received first, transition to wait for req cmpl */
+ els_io_printf(els, "abort cmpl evt=%s\n", ocs_sm_event_name(evt));
+ ocs_io_transition(els, __ocs_els_aborting_wait_req_cmpl, NULL);
+ break;
+ }
+ case OCS_EVT_ABORT_ELS:
+ /* nothing we can do but wait */
+ break;
+
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief cleanup ELS after abort
+ *
+ * @param els ELS IO to cleanup
+ *
+ * @return Returns None.
+ */
+
+static void
+ocs_els_abort_cleanup(ocs_io_t *els)
+{
+ /* handle event for ABORT_WQE
+ * whatever state ELS happened to be in, propagate aborted event up
+ * to node state machine in lieu of OCS_EVT_SRRS_ELS_* event
+ */
+ ocs_node_cb_t cbdata;
+ cbdata.status = cbdata.ext_status = 0;
+ cbdata.els = els;
+ els_io_printf(els, "Request aborted\n");
+ ocs_els_io_cleanup(els, OCS_EVT_ELS_REQ_ABORTED, &cbdata);
+}
+
+/**
+ * @brief Wait for the ELS IO abort request to complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered after we abort an ELS WQE, we received
+ * the abort completion first and are waiting for the original
+ * ELS WQE request to complete.
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_els_aborting_wait_req_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_ELS_REQ_TIMEOUT:
+ case OCS_EVT_ELS_REQ_ABORTED: {
+ /* completion for ELS that was aborted */
+ ocs_els_abort_cleanup(els);
+ break;
+ }
+ case OCS_EVT_ABORT_ELS:
+ /* nothing we can do but wait */
+ break;
+
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Wait for the ELS IO abort request to complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * This state is entered after we abort an ELS WQE, we received
+ * the original ELS WQE request completion first and are waiting
+ * for the abort to complete.
+ *
+ * @param ctx Remote node SM context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_els_aborting_wait_abort_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_els_state_decl();
+
+ els_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ELS_ABORT_CMPL: {
+ ocs_els_abort_cleanup(els);
+ break;
+ }
+ case OCS_EVT_ABORT_ELS:
+ /* nothing we can do but wait */
+ break;
+
+ default:
+ __ocs_els_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Generate ELS context ddump data.
+ *
+ * <h3 class="desc">Description</h3>
+ * Generate the ddump data for an ELS context.
+ *
+ * @param textbuf Pointer to the text buffer.
+ * @param els Pointer to the ELS context.
+ *
+ * @return None.
+ */
+
+void
+ocs_ddump_els(ocs_textbuf_t *textbuf, ocs_io_t *els)
+{
+ ocs_ddump_section(textbuf, "els", -1);
+ ocs_ddump_value(textbuf, "req_free", "%d", els->els_req_free);
+ ocs_ddump_value(textbuf, "evtdepth", "%d", els->els_evtdepth);
+ ocs_ddump_value(textbuf, "pend", "%d", els->els_pend);
+ ocs_ddump_value(textbuf, "active", "%d", els->els_active);
+ ocs_ddump_io(textbuf, els);
+ ocs_ddump_endsection(textbuf, "els", -1);
+}
+
+
+/**
+ * @brief return TRUE if given ELS list is empty (while taking proper locks)
+ *
+ * Test if given ELS list is empty while holding the node->active_ios_lock.
+ *
+ * @param node pointer to node object
+ * @param list pointer to list
+ *
+ * @return TRUE if els_io_list is empty
+ */
+
+int32_t
+ocs_els_io_list_empty(ocs_node_t *node, ocs_list_t *list)
+{
+ int empty;
+ ocs_lock(&node->active_ios_lock);
+ empty = ocs_list_empty(list);
+ ocs_unlock(&node->active_ios_lock);
+ return empty;
+}
+
+/**
+ * @brief Handle CT send response completion
+ *
+ * Called when CT response completes, free IO
+ *
+ * @param hio Pointer to the HW IO context that completed.
+ * @param rnode Pointer to the remote node.
+ * @param length Length of the returned payload data.
+ * @param status Status of the completion.
+ * @param ext_status Extended status of the completion.
+ * @param arg Application-specific argument (generally a pointer to the ELS IO context).
+ *
+ * @return returns 0
+ */
+static int32_t
+ocs_ct_acc_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg)
+{
+ ocs_io_t *io = arg;
+
+ ocs_els_io_free(io);
+
+ return 0;
+}
+
+/**
+ * @brief Send CT response
+ *
+ * Sends a CT response frame with payload
+ *
+ * @param io Pointer to the IO context.
+ * @param ox_id Originator exchange ID
+ * @param ct_hdr Pointer to the CT IU
+ * @param cmd_rsp_code CT response code
+ * @param reason_code Reason code
+ * @param reason_code_explanation Reason code explanation
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_send_ct_rsp(ocs_io_t *io, uint32_t ox_id, fcct_iu_header_t *ct_hdr, uint32_t cmd_rsp_code, uint32_t reason_code, uint32_t reason_code_explanation)
+{
+ fcct_iu_header_t *rsp = io->els_rsp.virt;
+
+ io->io_type = OCS_IO_TYPE_CT_RESP;
+
+ *rsp = *ct_hdr;
+
+ fcct_build_req_header(rsp, cmd_rsp_code, 0);
+ rsp->reason_code = reason_code;
+ rsp->reason_code_explanation = reason_code_explanation;
+
+ io->display_name = "ct response";
+ io->init_task_tag = ox_id;
+ io->wire_len += sizeof(*rsp);
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+
+ io->io_type = OCS_IO_TYPE_CT_RESP;
+ io->hio_type = OCS_HW_FC_CT_RSP;
+ io->iparam.fc_ct_rsp.ox_id = ocs_htobe16(ox_id);
+ io->iparam.fc_ct_rsp.r_ctl = 3;
+ io->iparam.fc_ct_rsp.type = FC_TYPE_GS;
+ io->iparam.fc_ct_rsp.df_ctl = 0;
+ io->iparam.fc_ct_rsp.timeout = 5;
+
+ if (ocs_scsi_io_dispatch(io, ocs_ct_acc_cb) < 0) {
+ ocs_els_io_free(io);
+ return -1;
+ }
+ return 0;
+}
+
+
+/**
+ * @brief Handle delay retry timeout
+ *
+ * Callback is invoked when the delay retry timer expires.
+ *
+ * @param arg pointer to the ELS IO object
+ *
+ * @return none
+ */
+static void
+ocs_els_delay_timer_cb(void *arg)
+{
+ ocs_io_t *els = arg;
+ ocs_node_t *node = els->node;
+
+ /*
+ * There is a potential deadlock here since is Linux executes timers
+ * in a soft IRQ context. The lock may be aready locked by the interrupt
+ * thread. Handle this case by attempting to take the node lock and reset the
+ * timer if we fail to acquire the lock.
+ *
+ * Note: This code relies on the fact that the node lock is recursive.
+ */
+ if (ocs_node_lock_try(node)) {
+ ocs_els_post_event(els, OCS_EVT_TIMER_EXPIRED, NULL);
+ ocs_node_unlock(node);
+ } else {
+ ocs_setup_timer(els->ocs, &els->delay_timer, ocs_els_delay_timer_cb, els, 1);
+ }
+}
diff --git a/sys/dev/ocs_fc/ocs_els.h b/sys/dev/ocs_fc/ocs_els.h
new file mode 100644
index 000000000000..b320c3fb0577
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_els.h
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Declarations for the interface exported by ocs_els.
+ */
+
+#if !defined(__OCS_ELS_H__)
+#define __OCS_ELS_H__
+#include "ocs.h"
+
+#define OCS_ELS_RSP_LEN 1024
+#define OCS_ELS_GID_PT_RSP_LEN 8096 /* Enough for 2K remote target nodes */
+
+#define OCS_ELS_REQ_LEN 116 /*Max request length*/
+
+typedef enum {
+ OCS_ELS_ROLE_ORIGINATOR,
+ OCS_ELS_ROLE_RESPONDER,
+} ocs_els_role_e;
+
+extern ocs_io_t *ocs_els_io_alloc(ocs_node_t *node, uint32_t reqlen, ocs_els_role_e role);
+extern ocs_io_t *ocs_els_io_alloc_size(ocs_node_t *node, uint32_t reqlen, uint32_t rsplen, ocs_els_role_e role);
+extern void ocs_els_io_free(ocs_io_t *els);
+
+/* ELS command send */
+typedef void (*els_cb_t)(ocs_node_t *node, ocs_node_cb_t *cbdata, void *arg);
+extern ocs_io_t *ocs_send_plogi(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_flogi(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_fdisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_prli(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_prlo(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_logo(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_adisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_pdisc(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_scr(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_rrq(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_ns_send_rftid(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_ns_send_rffid(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_ns_send_gidpt(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_rscn(ocs_node_t *node, uint32_t timeout_sec, uint32_t retries,
+ void *port_ids, uint32_t port_ids_count, els_cb_t cb, void *cbarg);
+extern void ocs_els_io_cleanup(ocs_io_t *els, ocs_sm_event_t node_evt, void *arg);
+
+/* ELS acc send */
+extern ocs_io_t *ocs_send_ls_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_ls_rjt(ocs_io_t *io, uint32_t ox_id, uint32_t reason_cod, uint32_t reason_code_expl,
+ uint32_t vendor_unique, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_flogi_p2p_acc(ocs_io_t *io, uint32_t ox_id, uint32_t s_id, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_flogi_acc(ocs_io_t *io, uint32_t ox_id, uint32_t is_fport, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_plogi_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_prli_acc(ocs_io_t *io, uint32_t ox_id, uint8_t fc_type, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_logo_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_prlo_acc(ocs_io_t *io, uint32_t ox_id, uint8_t fc_type, els_cb_t cb, void *cbarg);
+extern ocs_io_t *ocs_send_adisc_acc(ocs_io_t *io, uint32_t ox_id, els_cb_t cb, void *cbarg);
+extern void ocs_ddump_els(ocs_textbuf_t *textbuf, ocs_io_t *els);
+
+/* BLS acc send */
+extern ocs_io_t *ocs_bls_send_acc_hdr(ocs_io_t *io, fc_header_t *hdr);
+
+/* ELS IO state machine */
+extern void ocs_els_post_event(ocs_io_t *els, ocs_sm_event_t evt, void *data);
+extern void *__ocs_els_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_els_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_els_wait_resp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_els_aborting(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_els_aborting_wait_req_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_els_aborting_wait_abort_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_els_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void * __ocs_els_aborted_delay_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_els_delay_retry(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+/* Misc */
+extern int32_t ocs_els_io_list_empty(ocs_node_t *node, ocs_list_t *list);
+
+/* CT */
+extern int32_t ocs_send_ct_rsp(ocs_io_t *io, uint32_t ox_id, fcct_iu_header_t *ct_hdr, uint32_t cmd_rsp_code, uint32_t reason_code, uint32_t reason_code_explanation);
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_fabric.c b/sys/dev/ocs_fc/ocs_fabric.c
new file mode 100644
index 000000000000..162dd558fc6d
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_fabric.c
@@ -0,0 +1,2046 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ * This file implements remote node state machines for:
+ * - Fabric logins.
+ * - Fabric controller events.
+ * - Name/directory services interaction.
+ * - Point-to-point logins.
+ */
+
+/*!
+@defgroup fabric_sm Node State Machine: Fabric States
+@defgroup ns_sm Node State Machine: Name/Directory Services States
+@defgroup p2p_sm Node State Machine: Point-to-Point Node States
+*/
+
+#include "ocs.h"
+#include "ocs_fabric.h"
+#include "ocs_els.h"
+#include "ocs_device.h"
+
+static void ocs_fabric_initiate_shutdown(ocs_node_t *node);
+static void * __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+static int32_t ocs_start_ns_node(ocs_sport_t *sport);
+static int32_t ocs_start_fabctl_node(ocs_sport_t *sport);
+static int32_t ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len);
+static void ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata);
+static uint64_t ocs_get_wwpn(fc_plogi_payload_t *sp);
+static void gidpt_delay_timer_cb(void *arg);
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric node state machine: Initial state.
+ *
+ * @par Description
+ * Send an FLOGI to a well-known fabric.
+ *
+ * @param ctx Remote node sm context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_REENTER: /* not sure why we're getting these ... */
+ ocs_log_debug(node->ocs, ">>> reenter !!\n");
+ /* fall through */
+ case OCS_EVT_ENTER:
+ /* sm: / send FLOGI */
+ ocs_send_flogi(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_fabric_flogi_wait_rsp, NULL);
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Set sport topology.
+ *
+ * @par Description
+ * Set sport topology.
+ *
+ * @param node Pointer to the node for which the topology is set.
+ * @param topology Topology to set.
+ *
+ * @return Returns NULL.
+ */
+void
+ocs_fabric_set_topology(ocs_node_t *node, ocs_sport_topology_e topology)
+{
+ node->sport->topology = topology;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Notify sport topology.
+ * @par Description
+ * notify sport topology.
+ * @param node Pointer to the node for which the topology is set.
+ * @return Returns NULL.
+ */
+void
+ocs_fabric_notify_topology(ocs_node_t *node)
+{
+ ocs_node_t *tmp_node;
+ ocs_node_t *next;
+ ocs_sport_topology_e topology = node->sport->topology;
+
+ /* now loop through the nodes in the sport and send topology notification */
+ ocs_sport_lock(node->sport);
+ ocs_list_foreach_safe(&node->sport->node_list, tmp_node, next) {
+ if (tmp_node != node) {
+ ocs_node_post_event(tmp_node, OCS_EVT_SPORT_TOPOLOGY_NOTIFY, (void *)topology);
+ }
+ }
+ ocs_sport_unlock(node->sport);
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric node state machine: Wait for an FLOGI response.
+ *
+ * @par Description
+ * Wait for an FLOGI response event.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK: {
+
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+
+ ocs_domain_save_sparms(node->sport->domain, cbdata->els->els_rsp.virt);
+
+ ocs_display_sparams(node->display_name, "flogi rcvd resp", 0, NULL,
+ ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
+
+ /* Check to see if the fabric is an F_PORT or and N_PORT */
+ if (ocs_rnode_is_nport(cbdata->els->els_rsp.virt)) {
+ /* sm: if nport and p2p_winner / ocs_domain_attach */
+ ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
+ if (ocs_p2p_setup(node->sport)) {
+ node_printf(node, "p2p setup failed, shutting down node\n");
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ } else {
+ if (node->sport->p2p_winner) {
+ ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
+ if (!node->sport->domain->attached) {
+ node_printf(node, "p2p winner, domain not attached\n");
+ ocs_domain_attach(node->sport->domain, node->sport->p2p_port_id);
+ } else {
+ /* already attached, just send ATTACH_OK */
+ node_printf(node, "p2p winner, domain already attached\n");
+ ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
+ }
+ } else {
+ /* peer is p2p winner; PLOGI will be received on the
+ * remote SID=1 node; this node has served its purpose
+ */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ }
+ }
+ } else {
+ /* sm: if not nport / ocs_domain_attach */
+ /* ext_status has the fc_id, attach domain */
+ ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_FABRIC);
+ ocs_fabric_notify_topology(node);
+ ocs_assert(!node->sport->domain->attached, NULL);
+ ocs_domain_attach(node->sport->domain, cbdata->ext_status);
+ ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
+ }
+
+ break;
+ }
+
+ case OCS_EVT_ELS_REQ_ABORTED:
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: {
+ ocs_sport_t *sport = node->sport;
+ /*
+ * with these errors, we have no recovery, so shutdown the sport, leave the link
+ * up and the domain ready
+ */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ node_printf(node, "FLOGI failed evt=%s, shutting down sport [%s]\n", ocs_sm_event_name(evt),
+ sport->display_name);
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric node state machine: Initial state for a virtual port.
+ *
+ * @par Description
+ * State entered when a virtual port is created. Send FDISC.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_vport_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* sm: send FDISC */
+ ocs_send_fdisc(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_fabric_fdisc_wait_rsp, NULL);
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric node state machine: Wait for an FDISC response
+ *
+ * @par Description
+ * Used for a virtual port. Waits for an FDISC response. If OK, issue a HW port attach.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK: {
+ /* fc_id is in ext_status */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+
+ ocs_display_sparams(node->display_name, "fdisc rcvd resp", 0, NULL,
+ ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
+
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: ocs_sport_attach */
+ ocs_sport_attach(node->sport, cbdata->ext_status);
+ ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
+ break;
+
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: {
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_log_err(ocs, "FDISC failed, shutting down sport\n");
+ /* sm: shutdown sport */
+ ocs_sm_post_event(&node->sport->sm, OCS_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric node state machine: Wait for a domain/sport attach event.
+ *
+ * @par Description
+ * Waits for a domain/sport attach event.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_fabric_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ case OCS_EVT_SPORT_ATTACH_OK: {
+ int rc;
+
+ rc = ocs_start_ns_node(node->sport);
+ if (rc)
+ return NULL;
+
+ /* sm: if enable_ini / start fabctl node
+ * Instantiate the fabric controller (sends SCR) */
+ if (node->sport->enable_rscn) {
+ rc = ocs_start_fabctl_node(node->sport);
+ if (rc)
+ return NULL;
+ }
+ ocs_node_transition(node, __ocs_fabric_idle, NULL);
+ break;
+ }
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric node state machine: Fabric node is idle.
+ *
+ * @par Description
+ * Wait for fabric node events.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_fabric_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ break;
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Name services node state machine: Initialize.
+ *
+ * @par Description
+ * A PLOGI is sent to the well-known name/directory services node.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* sm: send PLOGI */
+ ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_ns_plogi_wait_rsp, NULL);
+ break;
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Name services node state machine: Wait for a PLOGI response.
+ *
+ * @par Description
+ * Waits for a response from PLOGI to name services node, then issues a
+ * node attach request to the HW.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK: {
+ /* Save service parameters */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: save sparams, ocs_node_attach */
+ ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
+ ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
+ ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_ns_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+ }
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Name services node state machine: Wait for a node attach completion.
+ *
+ * @par Description
+ * Waits for a node attach completion, then issues an RFTID name services
+ * request.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ /* sm: send RFTID */
+ ocs_ns_send_rftid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
+ OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_ns_rftid_wait_rsp, NULL);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = FALSE;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ case OCS_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
+ break;
+
+ /* if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node) */
+ case OCS_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Wait for a domain/sport/node attach completion, then
+ * shutdown.
+ *
+ * @par Description
+ * Waits for a domain/sport/node attach completion, then shuts
+ * node down.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_FAIL:
+ node->attached = FALSE;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ /* ignore shutdown event as we're already in shutdown path */
+ case OCS_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Name services node state machine: Wait for an RFTID response event.
+ *
+ * @par Description
+ * Waits for an RFTID response event; if configured for an initiator operation,
+ * a GIDPT name services request is issued.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFT_ID, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /*sm: send RFFID */
+ ocs_ns_send_rffid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
+ OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_ns_rffid_wait_rsp, NULL);
+ break;
+
+ /* if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node) */
+ case OCS_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Fabric node state machine: Wait for RFFID response event.
+ *
+ * @par Description
+ * Waits for an RFFID response event; if configured for an initiator operation,
+ * a GIDPT name services request is issued.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK: {
+ if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFF_ID, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ if (node->sport->enable_rscn) {
+ /* sm: if enable_rscn / send GIDPT */
+ ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
+ OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
+ } else {
+ /* if 'T' only, we're done, go to idle */
+ ocs_node_transition(node, __ocs_ns_idle, NULL);
+ }
+ break;
+ }
+ /* if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node) */
+ case OCS_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Name services node state machine: Wait for a GIDPT response.
+ *
+ * @par Description
+ * Wait for a GIDPT response from the name server. Process the FC_IDs that are
+ * reported by creating new remote ports, as needed.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK: {
+ if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_GID_PT, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: / process GIDPT payload */
+ ocs_process_gidpt_payload(node, cbdata->els->els_rsp.virt, cbdata->els->els_rsp.len);
+ /* TODO: should we logout at this point or just go idle */
+ ocs_node_transition(node, __ocs_ns_idle, NULL);
+ break;
+ }
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: {
+ /* not much we can do; will retry with the next RSCN */
+ node_printf(node, "GID_PT failed to complete\n");
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_node_transition(node, __ocs_ns_idle, NULL);
+ break;
+ }
+
+ /* if receive RSCN here, queue up another discovery processing */
+ case OCS_EVT_RSCN_RCVD: {
+ node_printf(node, "RSCN received during GID_PT processing\n");
+ node->rscn_pending = 1;
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @ingroup ns_sm
+ * @brief Name services node state machine: Idle state.
+ *
+ * @par Description
+ * Idle. Waiting for RSCN received events (posted from the fabric controller), and
+ * restarts the GIDPT name services query and processing.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ if (!node->rscn_pending) {
+ break;
+ }
+ node_printf(node, "RSCN pending, restart discovery\n");
+ node->rscn_pending = 0;
+
+ /* fall through */
+
+ case OCS_EVT_RSCN_RCVD: {
+ /* sm: / send GIDPT
+ * If target RSCN processing is enabled, and this is target only
+ * (not initiator), and tgt_rscn_delay is non-zero,
+ * then we delay issuing the GID_PT
+ */
+ if ((ocs->tgt_rscn_delay_msec != 0) && !node->sport->enable_ini && node->sport->enable_tgt &&
+ enable_target_rscn(ocs)) {
+ ocs_node_transition(node, __ocs_ns_gidpt_delay, NULL);
+ } else {
+ ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
+ OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
+ }
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Handle GIDPT delay timer callback
+ *
+ * @par Description
+ * Post an OCS_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
+ *
+ * @param arg Pointer to node.
+ *
+ * @return None.
+ */
+static void
+gidpt_delay_timer_cb(void *arg)
+{
+ ocs_node_t *node = arg;
+ int32_t rc;
+
+ ocs_del_timer(&node->gidpt_delay_timer);
+ rc = ocs_xport_control(node->ocs->xport, OCS_XPORT_POST_NODE_EVENT, node, OCS_EVT_GIDPT_DELAY_EXPIRED, NULL);
+ if (rc) {
+ ocs_log_err(node->ocs, "ocs_xport_control(OCS_XPORT_POST_NODE_EVENT) failed: %d\n", rc);
+ }
+}
+
+/**
+ * @ingroup ns_sm
+ * @brief Name services node state machine: Delayed GIDPT.
+ *
+ * @par Description
+ * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_ns_gidpt_delay(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ time_t delay_msec;
+
+ ocs_assert(ocs->tgt_rscn_delay_msec != 0, NULL);
+
+ /*
+ * Compute the delay time. Set to tgt_rscn_delay, if the time since last GIDPT
+ * is less than tgt_rscn_period, then use tgt_rscn_period.
+ */
+ delay_msec = ocs->tgt_rscn_delay_msec;
+ if ((ocs_msectime() - node->time_last_gidpt_msec) < ocs->tgt_rscn_period_msec) {
+ delay_msec = ocs->tgt_rscn_period_msec;
+ }
+
+ ocs_setup_timer(ocs, &node->gidpt_delay_timer, gidpt_delay_timer_cb, node, delay_msec);
+
+ break;
+ }
+
+ case OCS_EVT_GIDPT_DELAY_EXPIRED:
+ node->time_last_gidpt_msec = ocs_msectime();
+ ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
+ OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
+ break;
+
+ case OCS_EVT_RSCN_RCVD: {
+ ocs_log_debug(ocs, "RSCN received while in GIDPT delay - no action\n");
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric controller node state machine: Initial state.
+ *
+ * @par Description
+ * Issue a PLOGI to a well-known fabric controller address.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_fabctl_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_t *node = ctx->app;
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* no need to login to fabric controller, just send SCR */
+ ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric controller node state machine: Wait for a node attach request
+ * to complete.
+ *
+ * @par Description
+ * Wait for a node attach to complete. If successful, issue an SCR
+ * to the fabric controller, subscribing to all RSCN.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ *
+ */
+void *
+__ocs_fabctl_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ /* sm: / send SCR */
+ ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = FALSE;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ case OCS_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric controller node state machine: Wait for an SCR response from the
+ * fabric controller.
+ *
+ * @par Description
+ * Waits for an SCR response from the fabric controller.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+void *
+__ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_SCR, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ ocs_node_transition(node, __ocs_fabctl_ready, NULL);
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric controller node state machine: Ready.
+ *
+ * @par Description
+ * In this state, the fabric controller sends a RSCN, which is received
+ * by this node and is forwarded to the name services node object; and
+ * the RSCN LS_ACC is sent.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_fabctl_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_RSCN_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+
+ /* sm: / process RSCN (forward to name services node),
+ * send LS_ACC */
+ ocs_process_rscn(node, cbdata);
+ ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ ocs_node_transition(node, __ocs_fabctl_wait_ls_acc_cmpl, NULL);
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric controller node state machine: Wait for LS_ACC.
+ *
+ * @par Description
+ * Waits for the LS_ACC from the fabric controller.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_OK:
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ ocs_node_transition(node, __ocs_fabctl_ready, NULL);
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Initiate fabric node shutdown.
+ *
+ * @param node Node for which shutdown is initiated.
+ *
+ * @return Returns None.
+ */
+
+static void
+ocs_fabric_initiate_shutdown(ocs_node_t *node)
+{
+ ocs_hw_rtn_e rc;
+ ocs_t *ocs = node->ocs;
+ ocs_scsi_io_alloc_disable(node);
+
+ if (node->attached) {
+ /* issue hw node free; don't care if succeeds right away
+ * or sometime later, will check node->attached later in
+ * shutdown process
+ */
+ rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
+ if (node->rnode.free_group) {
+ ocs_remote_node_group_free(node->node_group);
+ node->node_group = NULL;
+ node->rnode.free_group = FALSE;
+ }
+ if (rc != OCS_HW_RTN_SUCCESS && rc != OCS_HW_RTN_SUCCESS_SYNC) {
+ node_printf(node, "Failed freeing HW node, rc=%d\n", rc);
+ }
+ }
+ /*
+ * node has either been detached or is in the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ ocs_node_initiate_cleanup(node);
+}
+
+/**
+ * @ingroup fabric_sm
+ * @brief Fabric node state machine: Handle the common fabric node events.
+ *
+ * @param funcname Function name text.
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+static void *
+__ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_t *node = NULL;
+ ocs_assert(ctx, NULL);
+ ocs_assert(ctx->app, NULL);
+ node = ctx->app;
+
+ switch(evt) {
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ break;
+ case OCS_EVT_SHUTDOWN:
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __ocs_node_common(funcname, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Return TRUE if the remote node is an NPORT.
+ *
+ * @par Description
+ * Examines the service parameters. Returns TRUE if the node reports itself as
+ * an NPORT.
+ *
+ * @param remote_sparms Remote node service parameters.
+ *
+ * @return Returns TRUE if NPORT.
+ */
+
+int32_t
+ocs_rnode_is_nport(fc_plogi_payload_t *remote_sparms)
+{
+ return (ocs_be32toh(remote_sparms->common_service_parameters[1]) & (1U << 28)) == 0;
+}
+
+/**
+ * @brief Return the node's WWPN as an uint64_t.
+ *
+ * @par Description
+ * The WWPN is computed from service parameters, and returned as a uint64_t.
+ *
+ * @param sp Pointer to service parameters.
+ *
+ * @return Returns WWPN.
+ *
+ */
+
+static uint64_t
+ocs_get_wwpn(fc_plogi_payload_t *sp)
+{
+ return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
+}
+
+/**
+ * @brief Return TRUE if the remote node is the point-to-point winner.
+ *
+ * @par Description
+ * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
+ * higher than the local node's WWPN.
+ *
+ * @param sport Pointer to the sport object.
+ *
+ * @return
+ * - 0, if the remote node is the loser.
+ * - 1, if the remote node is the winner.
+ * - (-1), if remote node is neither the loser nor the winner
+ * (WWPNs match)
+ */
+
+static int32_t
+ocs_rnode_is_winner(ocs_sport_t *sport)
+{
+ fc_plogi_payload_t *remote_sparms = (fc_plogi_payload_t*) sport->domain->flogi_service_params;
+ uint64_t remote_wwpn = ocs_get_wwpn(remote_sparms);
+ uint64_t local_wwpn = sport->wwpn;
+ char prop_buf[32];
+ uint64_t wwn_bump = 0;
+
+ if (ocs_get_property("wwn_bump", prop_buf, sizeof(prop_buf)) == 0) {
+ wwn_bump = ocs_strtoull(prop_buf, 0, 0);
+ }
+ local_wwpn ^= wwn_bump;
+
+ remote_wwpn = ocs_get_wwpn(remote_sparms);
+
+ ocs_log_debug(sport->ocs, "r: %08x %08x\n", ocs_be32toh(remote_sparms->port_name_hi), ocs_be32toh(remote_sparms->port_name_lo));
+ ocs_log_debug(sport->ocs, "l: %08x %08x\n", (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
+
+ if (remote_wwpn == local_wwpn) {
+ ocs_log_warn(sport->ocs, "WWPN of remote node [%08x %08x] matches local WWPN\n",
+ (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
+ return (-1);
+ }
+
+ return (remote_wwpn > local_wwpn);
+}
+
+/**
+ * @ingroup p2p_sm
+ * @brief Point-to-point state machine: Wait for the domain attach to complete.
+ *
+ * @par Description
+ * Once the domain attach has completed, a PLOGI is sent (if we're the
+ * winning point-to-point node).
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_p2p_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_DOMAIN_ATTACH_OK: {
+ ocs_sport_t *sport = node->sport;
+ ocs_node_t *rnode;
+
+ /* this transient node (SID=0 (recv'd FLOGI) or DID=fabric (sent FLOGI))
+ * is the p2p winner, will use a separate node to send PLOGI to peer
+ */
+ ocs_assert (node->sport->p2p_winner, NULL);
+
+ rnode = ocs_node_find(sport, node->sport->p2p_remote_port_id);
+ if (rnode != NULL) {
+ /* the "other" transient p2p node has already kicked off the
+ * new node from which PLOGI is sent */
+ node_printf(node, "Node with fc_id x%x already exists\n", rnode->rnode.fc_id);
+ ocs_assert (rnode != node, NULL);
+ } else {
+ /* create new node (SID=1, DID=2) from which to send PLOGI */
+ rnode = ocs_node_alloc(sport, sport->p2p_remote_port_id, FALSE, FALSE);
+ if (rnode == NULL) {
+ ocs_log_err(ocs, "node alloc failed\n");
+ return NULL;
+ }
+
+ ocs_fabric_notify_topology(node);
+ /* sm: allocate p2p remote node */
+ ocs_node_transition(rnode, __ocs_p2p_rnode_init, NULL);
+ }
+
+ /* the transient node (SID=0 or DID=fabric) has served its purpose */
+ if (node->rnode.fc_id == 0) {
+ /* if this is the SID=0 node, move to the init state in case peer
+ * has restarted FLOGI discovery and FLOGI is pending
+ */
+ /* don't send PLOGI on ocs_d_init entry */
+ ocs_node_init_device(node, FALSE);
+ } else {
+ /* if this is the DID=fabric node (we initiated FLOGI), shut it down */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ }
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup p2p_sm
+ * @brief Point-to-point state machine: Remote node initialization state.
+ *
+ * @par Description
+ * This state is entered after winning point-to-point, and the remote node
+ * is instantiated.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_p2p_rnode_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /* sm: / send PLOGI */
+ ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
+ ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp, NULL);
+ break;
+
+ case OCS_EVT_ABTS_RCVD:
+ /* sm: send BA_ACC */
+ ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup p2p_sm
+ * @brief Point-to-point node state machine: Wait for the FLOGI accept completion.
+ *
+ * @par Description
+ * Wait for the FLOGI accept completion.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_OK:
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+
+ /* sm: if p2p_winner / domain_attach */
+ if (node->sport->p2p_winner) {
+ ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
+ if (node->sport->domain->attached &&
+ !(node->sport->domain->domain_notify_pend)) {
+ node_printf(node, "Domain already attached\n");
+ ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
+ }
+ } else {
+ /* this node has served its purpose; we'll expect a PLOGI on a separate
+ * node (remote SID=0x1); return this node to init state in case peer
+ * restarts discovery -- it may already have (pending frames may exist).
+ */
+ /* don't send PLOGI on ocs_d_init entry */
+ ocs_node_init_device(node, FALSE);
+ }
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_FAIL:
+ /* LS_ACC failed, possibly due to link down; shutdown node and wait
+ * for FLOGI discovery to restart */
+ node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ case OCS_EVT_ABTS_RCVD: {
+ /* sm: / send BA_ACC */
+ ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @ingroup p2p_sm
+ * @brief Point-to-point node state machine: Wait for a PLOGI response
+ * as a point-to-point winner.
+ *
+ * @par Description
+ * Wait for a PLOGI response from the remote node as a point-to-point winner.
+ * Submit node attach request to the HW.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_SRRS_ELS_REQ_OK: {
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: / save sparams, ocs_node_attach */
+ ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+ }
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: {
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ node_printf(node, "PLOGI failed, shutting down\n");
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ case OCS_EVT_PLOGI_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ /* if we're in external loopback mode, just send LS_ACC */
+ if (node->ocs->external_loopback) {
+ ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
+ break;
+ } else{
+ /* if this isn't external loopback, pass to default handler */
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ }
+ break;
+ }
+ case OCS_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ ocs_process_prli_payload(node, cbdata->payload->dma.virt);
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
+ ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp_recvd_prli, NULL);
+ break;
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup p2p_sm
+ * @brief Point-to-point node state machine: Waiting on a response for a
+ * sent PLOGI.
+ *
+ * @par Description
+ * State is entered when the point-to-point winner has sent
+ * a PLOGI and is waiting for a response. Before receiving the
+ * response, a PRLI was received, implying that the PLOGI was
+ * successful.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have accepted).
+ * At this time, we are not waiting on any other unsolicited
+ * frames to continue with the login process. Thus, it will not
+ * hurt to hold frames here.
+ */
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ /* sm: / save sparams, ocs_node_attach */
+ ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
+ ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
+ ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
+ return NULL;
+ }
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup p2p_sm
+ * @brief Point-to-point node state machine: Wait for a point-to-point node attach
+ * to complete.
+ *
+ * @par Description
+ * Waits for the point-to-point node attach to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_p2p_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_cb_t *cbdata = arg;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ switch (node->send_ls_acc) {
+ case OCS_NODE_SEND_LS_ACC_PRLI: {
+ ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
+ node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case OCS_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
+ case OCS_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
+ break;
+ }
+ break;
+
+ case OCS_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = FALSE;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_fabric_initiate_shutdown(node);
+ break;
+
+ case OCS_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
+ break;
+ case OCS_EVT_PRLI_RCVD:
+ node_printf(node, "%s: PRLI received before node is attached\n", ocs_sm_event_name(evt));
+ ocs_process_prli_payload(node, cbdata->payload->dma.virt);
+ ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
+ break;
+ default:
+ __ocs_fabric_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Start up the name services node.
+ *
+ * @par Description
+ * Allocates and starts up the name services node.
+ *
+ * @param sport Pointer to the sport structure.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+static int32_t
+ocs_start_ns_node(ocs_sport_t *sport)
+{
+ ocs_node_t *ns;
+
+ /* Instantiate a name services node */
+ ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
+ if (ns == NULL) {
+ ns = ocs_node_alloc(sport, FC_ADDR_NAMESERVER, FALSE, FALSE);
+ if (ns == NULL) {
+ return -1;
+ }
+ }
+ /* TODO: for found ns, should we be transitioning from here?
+ * breaks transition only 1. from within state machine or
+ * 2. if after alloc
+ */
+ if (ns->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NAMESERVER) {
+ ocs_node_pause(ns, __ocs_ns_init);
+ } else {
+ ocs_node_transition(ns, __ocs_ns_init, NULL);
+ }
+ return 0;
+}
+
+/**
+ * @brief Start up the fabric controller node.
+ *
+ * @par Description
+ * Allocates and starts up the fabric controller node.
+ *
+ * @param sport Pointer to the sport structure.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+static int32_t
+ocs_start_fabctl_node(ocs_sport_t *sport)
+{
+ ocs_node_t *fabctl;
+
+ fabctl = ocs_node_find(sport, FC_ADDR_CONTROLLER);
+ if (fabctl == NULL) {
+ fabctl = ocs_node_alloc(sport, FC_ADDR_CONTROLLER, FALSE, FALSE);
+ if (fabctl == NULL) {
+ return -1;
+ }
+ }
+ /* TODO: for found ns, should we be transitioning from here?
+ * breaks transition only 1. from within state machine or
+ * 2. if after alloc
+ */
+ ocs_node_transition(fabctl, __ocs_fabctl_init, NULL);
+ return 0;
+}
+
+/**
+ * @brief Process the GIDPT payload.
+ *
+ * @par Description
+ * The GIDPT payload is parsed, and new nodes are created, as needed.
+ *
+ * @param node Pointer to the node structure.
+ * @param gidpt Pointer to the GIDPT payload.
+ * @param gidpt_len Payload length
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+static int32_t
+ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len)
+{
+ uint32_t i;
+ uint32_t j;
+ ocs_node_t *newnode;
+ ocs_sport_t *sport = node->sport;
+ ocs_t *ocs = node->ocs;
+ uint32_t port_id;
+ uint32_t port_count;
+ ocs_node_t *n;
+ ocs_node_t **active_nodes;
+ uint32_t portlist_count;
+ uint16_t residual;
+
+ residual = ocs_be16toh(gidpt->hdr.max_residual_size);
+
+ if (residual != 0) {
+ ocs_log_debug(node->ocs, "residual is %u words\n", residual);
+ }
+
+ if (ocs_be16toh(gidpt->hdr.cmd_rsp_code) == FCCT_HDR_CMDRSP_REJECT) {
+ node_printf(node, "GIDPT request failed: rsn x%x rsn_expl x%x\n",
+ gidpt->hdr.reason_code, gidpt->hdr.reason_code_explanation);
+ return -1;
+ }
+
+ portlist_count = (gidpt_len - sizeof(fcct_iu_header_t)) / sizeof(gidpt->port_list);
+
+ /* Count the number of nodes */
+ port_count = 0;
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, n) {
+ port_count ++;
+ }
+
+ /* Allocate a buffer for all nodes */
+ active_nodes = ocs_malloc(node->ocs, port_count * sizeof(*active_nodes), OCS_M_NOWAIT | OCS_M_ZERO);
+ if (active_nodes == NULL) {
+ node_printf(node, "ocs_malloc failed\n");
+ ocs_sport_unlock(sport);
+ return -1;
+ }
+
+ /* Fill buffer with fc_id of active nodes */
+ i = 0;
+ ocs_list_foreach(&sport->node_list, n) {
+ port_id = n->rnode.fc_id;
+ switch (port_id) {
+ case FC_ADDR_FABRIC:
+ case FC_ADDR_CONTROLLER:
+ case FC_ADDR_NAMESERVER:
+ break;
+ default:
+ if (!FC_ADDR_IS_DOMAIN_CTRL(port_id)) {
+ active_nodes[i++] = n;
+ }
+ break;
+ }
+ }
+
+ /* update the active nodes buffer */
+ for (i = 0; i < portlist_count; i ++) {
+ port_id = fc_be24toh(gidpt->port_list[i].port_id);
+
+ for (j = 0; j < port_count; j ++) {
+ if ((active_nodes[j] != NULL) && (port_id == active_nodes[j]->rnode.fc_id)) {
+ active_nodes[j] = NULL;
+ }
+ }
+
+ if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
+ break;
+ }
+
+ /* Those remaining in the active_nodes[] are now gone ! */
+ for (i = 0; i < port_count; i ++) {
+ /* if we're an initiator and the remote node is a target, then
+ * post the node missing event. if we're target and we have enabled
+ * target RSCN, then post the node missing event.
+ */
+ if (active_nodes[i] != NULL) {
+ if ((node->sport->enable_ini && active_nodes[i]->targ) ||
+ (node->sport->enable_tgt && enable_target_rscn(ocs))) {
+ ocs_node_post_event(active_nodes[i], OCS_EVT_NODE_MISSING, NULL);
+ } else {
+ node_printf(node, "GID_PT: skipping non-tgt port_id x%06x\n",
+ active_nodes[i]->rnode.fc_id);
+ }
+ }
+ }
+ ocs_free(ocs, active_nodes, port_count * sizeof(*active_nodes));
+
+ for(i = 0; i < portlist_count; i ++) {
+ uint32_t port_id = fc_be24toh(gidpt->port_list[i].port_id);
+
+ /* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
+
+ /* Don't create node for ourselves or the associated NPIV ports */
+ if (port_id != node->rnode.sport->fc_id && !ocs_sport_find(sport->domain, port_id)) {
+ newnode = ocs_node_find(sport, port_id);
+ if (newnode) {
+ /* TODO: what if node deleted here?? */
+ if (node->sport->enable_ini && newnode->targ) {
+ ocs_node_post_event(newnode, OCS_EVT_NODE_REFOUND, NULL);
+ }
+ /* original code sends ADISC, has notion of "refound" */
+ } else {
+ if (node->sport->enable_ini) {
+ newnode = ocs_node_alloc(sport, port_id, 0, 0);
+ if (newnode == NULL) {
+ ocs_log_err(ocs, "ocs_node_alloc() failed\n");
+ ocs_sport_unlock(sport);
+ return -1;
+ }
+ /* send PLOGI automatically if initiator */
+ ocs_node_init_device(newnode, TRUE);
+ }
+ }
+ }
+
+ if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID) {
+ break;
+ }
+ }
+ ocs_sport_unlock(sport);
+ return 0;
+}
+
+/**
+ * @brief Set up the domain point-to-point parameters.
+ *
+ * @par Description
+ * The remote node service parameters are examined, and various point-to-point
+ * variables are set.
+ *
+ * @param sport Pointer to the sport object.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int32_t
+ocs_p2p_setup(ocs_sport_t *sport)
+{
+ ocs_t *ocs = sport->ocs;
+ int32_t rnode_winner;
+ rnode_winner = ocs_rnode_is_winner(sport);
+
+ /* set sport flags to indicate p2p "winner" */
+ if (rnode_winner == 1) {
+ sport->p2p_remote_port_id = 0;
+ sport->p2p_port_id = 0;
+ sport->p2p_winner = FALSE;
+ } else if (rnode_winner == 0) {
+ sport->p2p_remote_port_id = 2;
+ sport->p2p_port_id = 1;
+ sport->p2p_winner = TRUE;
+ } else {
+ /* no winner; only okay if external loopback enabled */
+ if (sport->ocs->external_loopback) {
+ /*
+ * External loopback mode enabled; local sport and remote node
+ * will be registered with an NPortID = 1;
+ */
+ ocs_log_debug(ocs, "External loopback mode enabled\n");
+ sport->p2p_remote_port_id = 1;
+ sport->p2p_port_id = 1;
+ sport->p2p_winner = TRUE;
+ } else {
+ ocs_log_warn(ocs, "failed to determine p2p winner\n");
+ return rnode_winner;
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief Process the FABCTL node RSCN.
+ *
+ * <h3 class="desc">Description</h3>
+ * Processes the FABCTL node RSCN payload, simply passes the event to the name server.
+ *
+ * @param node Pointer to the node structure.
+ * @param cbdata Callback data to pass forward.
+ *
+ * @return None.
+ */
+
+static void
+ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata)
+{
+ ocs_t *ocs = node->ocs;
+ ocs_sport_t *sport = node->sport;
+ ocs_node_t *ns;
+
+ /* Forward this event to the name-services node */
+ ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
+ if (ns != NULL) {
+ ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, cbdata);
+ } else {
+ ocs_log_warn(ocs, "can't find name server node\n");
+ }
+}
diff --git a/sys/dev/ocs_fc/ocs_fabric.h b/sys/dev/ocs_fc/ocs_fabric.h
new file mode 100644
index 000000000000..57fad1390984
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_fabric.h
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Declarations for the interface exported by ocs_fabric
+ */
+
+
+#if !defined(__OCS_FABRIC_H__)
+#define __OCS_FABRIC_H__
+extern void *__ocs_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabric_domain_attach_wait(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabric_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern void *__ocs_vport_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabric_wait_sport_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern void *__ocs_ns_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_logo_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_ns_gidpt_delay(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern void *__ocs_fabctl_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabctl_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabctl_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_fabric_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern void *__ocs_p2p_rnode_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_p2p_domain_attach_wait(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_p2p_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_p2p_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern int32_t ocs_rnode_is_nport(fc_plogi_payload_t *remote_sparms);
+extern int32_t ocs_p2p_setup(ocs_sport_t *sport);
+extern void ocs_fabric_set_topology(ocs_node_t *node, ocs_sport_topology_e topology);
+extern void ocs_fabric_notify_topology(ocs_node_t *node);
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_fcp.h b/sys/dev/ocs_fc/ocs_fcp.h
new file mode 100644
index 000000000000..8a95f596891c
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_fcp.h
@@ -0,0 +1,747 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Define Fibre Channel types and structures.
+ */
+
+#ifndef _OCS_FCP_H
+#define _OCS_FCP_H
+
+#define FC_ELS_CMD_RJT 0x01
+#define FC_ELS_CMD_ACC 0x02
+#define FC_ELS_CMD_PLOGI 0x03
+#define FC_ELS_CMD_FLOGI 0x04
+#define FC_ELS_CMD_LOGO 0x05
+#define FC_ELS_CMD_RRQ 0x12
+#define FC_ELS_CMD_PRLI 0x20
+#define FC_ELS_CMD_PRLO 0x21
+#define FC_ELS_CMD_PDISC 0x50
+#define FC_ELS_CMD_FDISC 0x51
+#define FC_ELS_CMD_ADISC 0x52
+#define FC_ELS_CMD_RSCN 0x61
+#define FC_ELS_CMD_SCR 0x62
+
+#define FC_TYPE_BASIC_LINK 0
+#define FC_TYPE_FCP 0x08
+#define FC_TYPE_GS 0x20
+#define FC_TYPE_SW 0x22
+
+#define FC_ADDR_FABRIC 0xfffffe /** well known fabric address */
+#define FC_ADDR_CONTROLLER 0xfffffd /** well known fabric controller address */
+#define FC_ADDR_IS_DOMAIN_CTRL(x) (((x) & 0xffff00) == 0xfffc00) /** is well known domain controller */
+#define FC_ADDR_GET_DOMAIN_CTRL(x) ((x) & 0x0000ff) /** get domain controller number */
+#define FC_ADDR_NAMESERVER 0xfffffc /** well known directory server address */
+
+#define FC_GS_TYPE_ALIAS_SERVICE 0xf8
+#define FC_GS_TYPE_MANAGEMENT_SERVICE 0xfa
+#define FC_GS_TYPE_DIRECTORY_SERVICE 0xfc
+
+#define FC_GS_SUBTYPE_NAME_SERVER 0x02
+
+/**
+ * Generic Services FC Type Bit mask macros:
+ */
+#define FC_GS_TYPE_WORD(type) ((type) >> 5)
+#define FC_GS_TYPE_BIT(type) ((type) & 0x1f)
+
+/**
+ * Generic Services Name Server Request Command codes:
+ */
+#define FC_GS_NAMESERVER_GPN_ID 0x0112
+#define FC_GS_NAMESERVER_GNN_ID 0x0113
+#define FC_GS_NAMESERVER_GFPN_ID 0x011c
+#define FC_GS_NAMESERVER_GFF_ID 0x011f
+#define FC_GS_NAMESERVER_GID_FT 0x0171
+#define FC_GS_NAMESERVER_GID_PT 0x01a1
+#define FC_GS_NAMESERVER_RHBA 0x0200
+#define FC_GS_NAMESERVER_RPA 0x0211
+#define FC_GS_NAMESERVER_RPN_ID 0x0212
+#define FC_GS_NAMESERVER_RNN_ID 0x0213
+#define FC_GS_NAMESERVER_RCS_ID 0x0214
+#define FC_GS_NAMESERVER_RFT_ID 0x0217
+#define FC_GS_NAMESERVER_RFF_ID 0x021f
+#define FC_GS_NAMESERVER_RSNN_NN 0x0239
+#define FC_GS_NAMESERVER_RSPN_ID 0x0218
+
+
+#define FC_GS_REVISION 0x03
+
+#define FC_GS_IO_PARAMS { .fc_ct.r_ctl = 0x02, \
+ .fc_ct.type = FC_TYPE_GS, \
+ .fc_ct.df_ctl = 0x00 }
+
+typedef struct fc_vft_header_s {
+ uint32_t :1,
+ vf_id:12,
+ priority:3,
+ e:1,
+ :1,
+ type:4,
+ ver:2,
+ r_ctl:8;
+ uint32_t :24,
+ hopct:8;
+} fc_vft_header_t;
+
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+static inline uint32_t fc_be24toh(uint32_t x) { return (ocs_be32toh(x) >> 8); }
+#else
+static inline uint32_t fc_be24toh(uint32_t x) { }
+#endif
+static inline uint32_t fc_htobe24(uint32_t x) { return fc_be24toh(x); }
+
+#define FC_SOFI3 0x2e
+#define FC_SOFn3 0x36
+#define FC_EOFN 0x41
+#define FC_EOFT 0x42
+
+/**
+ * @brief FC header in big-endian order
+ */
+typedef struct fc_header_s {
+ uint32_t info:4,
+ r_ctl:4,
+ d_id:24;
+ uint32_t cs_ctl:8,
+ s_id:24;
+ uint32_t type:8,
+ f_ctl:24;
+ uint32_t seq_id:8,
+ df_ctl:8,
+ seq_cnt:16;
+ uint32_t ox_id:16,
+ rx_id:16;
+ uint32_t parameter;
+} fc_header_t;
+
+
+/**
+ * @brief FC header in little-endian order
+ */
+typedef struct fc_header_le_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t d_id:24,
+ info:4,
+ r_ctl:4;
+ uint32_t s_id:24,
+ cs_ctl:8;
+ uint32_t f_ctl:24,
+ type:8;
+ uint32_t seq_cnt:16,
+ df_ctl:8,
+ seq_id:8;
+ uint32_t rx_id:16,
+ ox_id:16;
+ uint32_t parameter;
+#else
+#error big endian version not defined
+#endif
+} fc_header_le_t;
+
+/**
+ * @brief FC VM header in big-endian order
+ */
+typedef struct fc_vm_header_s {
+ uint32_t dst_vmid;
+ uint32_t src_vmid;
+ uint32_t rsvd0;
+ uint32_t rsvd1;
+} fc_vm_header_t;
+
+#define FC_DFCTL_DEVICE_HDR_16_MASK 0x1
+#define FC_DFCTL_NETWORK_HDR_MASK 0x20
+#define FC_DFCTL_ESP_HDR_MASK 0x40
+#define FC_DFCTL_NETWORK_HDR_SIZE 16
+#define FC_DFCTL_ESP_HDR_SIZE 0 //FIXME
+
+#define FC_RCTL_FC4_DATA 0
+#define FC_RCTL_ELS 2
+#define FC_RCTL_BLS 8
+
+#define FC_RCTL_INFO_UNCAT 0
+#define FC_RCTL_INFO_SOL_DATA 1
+#define FC_RCTL_INFO_UNSOL_CTRL 2
+#define FC_RCTL_INFO_SOL_CTRL 3
+#define FC_RCTL_INFO_UNSOL_DATA 4
+#define FC_RCTL_INFO_DATA_DESC 5
+#define FC_RCTL_INFO_UNSOL_CMD 6
+#define FC_RCTL_INFO_CMD_STATUS 7
+
+#define FC_FCTL_EXCHANGE_RESPONDER 0x800000
+#define FC_FCTL_SEQUENCE_CONTEXT 0x400000
+#define FC_FCTL_FIRST_SEQUENCE 0x200000
+#define FC_FCTL_LAST_SEQUENCE 0x100000
+#define FC_FCTL_END_SEQUENCE 0x080000
+#define FC_FCTL_END_CONNECTION 0x040000
+#define FC_FCTL_PRIORITY_ENABLE 0x020000
+#define FC_FCTL_SEQUENCE_INITIATIVE 0x010000
+#define FC_FCTL_FILL_DATA_BYTES_MASK 0x000003
+
+/**
+ * Common BLS definitions:
+ */
+#define FC_INFO_NOP 0x0
+#define FC_INFO_ABTS 0x1
+#define FC_INFO_RMC 0x2
+/* reserved 0x3 */
+#define FC_INFO_BA_ACC 0x4
+#define FC_INFO_BA_RJT 0x5
+#define FC_INFO_PRMT 0x6
+
+/* (FC-LS) LS_RJT Reason Codes */
+#define FC_REASON_INVALID_COMMAND_CODE 0x01
+#define FC_REASON_LOGICAL_ERROR 0x03
+#define FC_REASON_LOGICAL_BUSY 0x05
+#define FC_REASON_PROTOCOL_ERROR 0x07
+#define FC_REASON_UNABLE_TO_PERFORM 0x09
+#define FC_REASON_COMMAND_NOT_SUPPORTED 0x0b
+#define FC_REASON_COMMAND_IN_PROGRESS 0x0e
+#define FC_REASON_VENDOR_SPECIFIC 0xff
+
+/* (FC-LS) LS_RJT Reason Codes Explanations */
+#define FC_EXPL_NO_ADDITIONAL 0x00
+#define FC_EXPL_SPARAM_OPTIONS 0x01
+#define FC_EXPL_SPARAM_INITIATOR 0x03
+#define FC_EXPL_SPARAM_RECPIENT 0x05
+#define FC_EXPL_SPARM_DATA_SIZE 0x07
+#define FC_EXPL_SPARM_CONCURRENT 0x09
+#define FC_EXPL_SPARM_CREDIT 0x0b
+#define FC_EXPL_INV_PORT_NAME 0x0d
+#define FC_EXPL_INV_NODE_NAME 0x0e
+#define FC_EXPL_INV_COMMON_SPARAMS 0x0f
+#define FC_EXPL_INV_ASSOC_HEADER 0x11
+#define FC_EXPL_ASSOC_HDR_REQUIRED 0x13
+#define FC_EXPL_INV_ORIGINATOR_S_ID 0x15
+#define FC_EXPL_INV_X_ID_COMBINATION 0x17
+#define FC_EXPL_COMMAND_IN_PROGRESS 0x19
+#define FC_EXPL_NPORT_LOGIN_REQUIRED 0x1e
+#define FC_EXPL_N_PORT_ID 0x1f
+#define FC_EXPL_INSUFFICIENT_RESOURCES 0x29
+#define FC_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
+#define FC_EXPL_REQUEST_NOT_SUPPORTED 0x2c
+#define FC_EXPL_INV_PAYLOAD_LEN 0x1d
+#define FC_EXPL_INV_PORT_NODE_NAME 0x44
+#define FC_EXPL_LOGIN_EXT_NOT_SUPPORTED 0x46
+#define FC_EXPL_AUTH_REQUIRED 0x48
+#define FC_EXPL_SCAN_VALUE_NOT_ALLOWED 0x50
+#define FC_EXPL_SCAN_VALUE_NOT_SUPPORTED 0x51
+#define FC_EXPL_NO_RESOURCES_ASSIGNED 0x52
+#define FC_EXPL_MAC_ADDR_MODE_NOT_SUPPORTED 0x60
+#define FC_EXPL_MAC_ADDR_INCORRECTLY_FORMED 0x61
+#define FC_EXPL_VN2VN_PORT_NOT_IN_NEIGHBOR_SET 0x62
+
+#define FC_EXPL_INV_X_ID 0x03 /* invalid OX_ID - RX_ID combination */
+#define FC_EXPL_SEQUENCE_ABORTED 0x05
+
+typedef struct fc_ba_acc_payload_s {
+#define FC_SEQ_ID_VALID 0x80
+#define FC_SEQ_ID_INVALID 0x00
+ uint32_t seq_id_validity:8,
+ seq_id:8,
+ :16;
+ uint32_t ox_id:16,
+ rx_id:16;
+ uint32_t low_seq_cnt:16,
+ high_seq_cnt:16;
+} fc_ba_acc_payload_t;
+
+typedef struct fc_ba_rjt_payload_s {
+ uint32_t vendor_unique:8,
+ reason_explanation:8,
+ reason_code:8,
+ :8;
+} fc_ba_rjt_payload_t;
+
+typedef struct fc_els_gen_s {
+ uint32_t command_code: 8,
+ resv1: 24;
+} fc_els_gen_t;
+
+typedef struct fc_plogi_playload_s {
+ uint32_t command_code: 8,
+ resv1: 24;
+ uint32_t common_service_parameters[4];
+ uint32_t port_name_hi;
+ uint32_t port_name_lo;
+ uint32_t node_name_hi;
+ uint32_t node_name_lo;
+ uint32_t class1_service_parameters[4];
+ uint32_t class2_service_parameters[4];
+ uint32_t class3_service_parameters[4];
+ uint32_t class4_service_parameters[4];
+ uint32_t vendor_version_level[4];
+} fc_plogi_payload_t;
+
+typedef fc_plogi_payload_t fc_sparms_t;
+
+typedef struct fc_logo_payload_s {
+ uint32_t command_code: 8,
+ resv1:24;
+ uint32_t :8,
+ port_id:24;
+ uint32_t port_name_hi;
+ uint32_t port_name_lo;
+} fc_logo_payload_t;
+
+typedef struct fc_acc_payload_s {
+ uint32_t command_code: 8,
+ resv1:24;
+} fc_acc_payload_t;
+
+
+typedef struct fc_ls_rjt_payload_s {
+ uint32_t command_code:8,
+ resv1:24;
+ uint32_t resv2:8,
+ reason_code:8,
+ reason_code_exp:8,
+ vendor_unique:8;
+} fc_ls_rjt_payload_t;
+
+typedef struct fc_prli_payload_s {
+ uint32_t command_code:8,
+ page_length:8,
+ payload_length:16;
+ uint32_t type:8,
+ type_ext:8,
+ flags:16;
+ uint32_t originator_pa;
+ uint32_t responder_pa;
+ uint32_t :16,
+ service_params:16;
+} fc_prli_payload_t;
+
+typedef struct fc_prlo_payload_s {
+ uint32_t command_code:8,
+ page_length:8,
+ payload_length:16;
+ uint32_t type:8,
+ type_ext:8,
+ :16;
+ uint32_t :32;
+ uint32_t :32;
+ uint32_t :32;
+} fc_prlo_payload_t;
+
+typedef struct fc_prlo_acc_payload_s {
+ uint32_t command_code:8,
+ page_length:8,
+ payload_length:16;
+ uint32_t type:8,
+ type_ext:8,
+ :4,
+ response_code:4,
+ :8;
+ uint32_t :32;
+ uint32_t :32;
+ uint32_t :32;
+} fc_prlo_acc_payload_t;
+
+typedef struct fc_adisc_payload_s {
+ uint32_t command_code:8,
+ payload_length:24;
+ uint32_t :8,
+ hard_address:24;
+ uint32_t port_name_hi;
+ uint32_t port_name_lo;
+ uint32_t node_name_hi;
+ uint32_t node_name_lo;
+ uint32_t :8,
+ port_id:24;
+} fc_adisc_payload_t;
+
+/* PRLI flags */
+#define FC_PRLI_ORIGINATOR_PA_VALID 0x8000
+#define FC_PRLI_RESPONDER_PA_VALID 0x4000
+#define FC_PRLI_ESTABLISH_IMAGE_PAIR 0x2000
+#define FC_PRLI_SERVICE_PARAM_INVALID 0x0800
+#define FC_PRLI_REQUEST_EXECUTED 0x0100
+
+/* PRLI Service Parameters */
+#define FC_PRLI_REC_SUPPORT 0x0400
+#define FC_PRLI_TASK_RETRY_ID_REQ 0x0200
+#define FC_PRLI_RETRY 0x0100
+#define FC_PRLI_CONFIRMED_COMPLETION 0x0080
+#define FC_PRLI_DATA_OVERLAY 0x0040
+#define FC_PRLI_INITIATOR_FUNCTION 0x0020
+#define FC_PRLI_TARGET_FUNCTION 0x0010
+#define FC_PRLI_READ_XRDY_DISABLED 0x0002
+#define FC_PRLI_WRITE_XRDY_DISABLED 0x0001
+
+/* PRLO Logout flags */
+#define FC_PRLO_REQUEST_EXECUTED 0x0001
+
+typedef struct fc_scr_payload_s {
+ uint32_t command_code:8,
+ :24;
+ uint32_t :24,
+ function:8;
+} fc_scr_payload_t;
+
+#define FC_SCR_REG_FABRIC 1
+#define FC_SCR_REG_NPORT 2
+#define FC_SCR_REG_FULL 3
+
+typedef struct {
+ uint32_t :2,
+ rscn_event_qualifier:4,
+ address_format:2,
+ port_id:24;
+} fc_rscn_affected_port_id_page_t;
+
+typedef struct fc_rscn_payload_s {
+ uint32_t command_code:8,
+ page_length:8,
+ payload_length:16;
+ fc_rscn_affected_port_id_page_t port_list[1];
+} fc_rscn_payload_t;
+
+typedef struct fcct_iu_header_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t revision:8,
+ in_id:24;
+ uint32_t gs_type:8,
+ gs_subtype:8,
+ options:8,
+ resv1:8;
+ uint32_t cmd_rsp_code:16,
+ max_residual_size:16;
+ uint32_t fragment_id:8,
+ reason_code:8,
+ reason_code_explanation:8,
+ vendor_specific:8;
+#else
+#error big endian version not defined
+#endif
+} fcct_iu_header_t;
+
+#define FCCT_REJECT_INVALID_COMMAND_CODE 1
+#define FCCT_REJECT_INVALID_VERSION_LEVEL 2
+#define FCCT_LOGICAL_ERROR 3
+#define FCCT_INVALID_CT_IU_SIZE 4
+#define FCCT_LOGICAL_BUSY 5
+#define FCCT_PROTOCOL_ERROR 7
+#define FCCT_UNABLE_TO_PERFORM 9
+#define FCCT_COMMAND_NOT_SUPPORTED 0x0b
+#define FCCT_FABRIC_PORT_NAME_NOT_REGISTERED 0x0c
+#define FCCT_SERVER_NOT_AVAILABLE 0x0d
+#define FCCT_SESSION_COULD_NOT_BE_ESTABLISHED 0x0e
+#define FCCT_VENDOR_SPECIFIC_ERROR 0xff
+
+#define FCCT_NO_ADDITIONAL_EXPLANATION 0
+#define FCCT_AUTHORIZATION_EXCEPTION 0xf0
+#define FCCT_AUTHENTICATION_EXCEPTION 0xf1
+#define FCCT_DATA_BASE_FULL 0xf2
+#define FCCT_DATA_BASE_EMPTY 0xf3
+#define FCCT_PROCESSING_REQUEST 0xf4
+#define FCCT_UNABLE_TO_VERIFY_CONNECTION 0xf5
+#define FCCT_DEVICES_NOT_IN_COMMON_ZONE 0xf6
+
+typedef struct {
+ fcct_iu_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t port_id;
+ uint32_t fc4_types;
+#else
+#error big endian version not defined
+#endif
+} fcgs_rft_id_t;
+
+typedef struct {
+ fcct_iu_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t port_id;
+ uint32_t :16,
+ fc4_features:8,
+ type_code:8;
+#else
+#error big endian version not defined
+#endif
+} fcgs_rff_id_t;
+
+#pragma pack(1)
+typedef struct {
+ fcct_iu_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t port_id;
+ uint64_t port_name;
+#else
+#error big endian version not defined
+#endif
+} fcgs_rpn_id_t;
+#pragma pack()
+
+#pragma pack(1)
+typedef struct {
+ fcct_iu_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t port_id;
+ uint64_t node_name;
+#else
+#error big endian version not defined
+#endif
+} fcgs_rnn_id_t;
+#pragma pack()
+
+#define FCCT_CLASS_OF_SERVICE_F 0x1
+#define FCCT_CLASS_OF_SERVICE_2 0x4
+#define FCCT_CLASS_OF_SERVICE_3 0x8
+#pragma pack(1)
+typedef struct {
+ fcct_iu_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t port_id;
+ uint32_t class_of_srvc;
+#else
+#error big endian version not defined
+#endif
+} fcgs_rcs_id_t;
+#pragma pack()
+
+#pragma pack(1)
+typedef struct {
+ fcct_iu_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint64_t node_name;
+ uint8_t name_len;
+ char sym_node_name[1];
+/*TODO: need name length and symbolic name */
+#else
+#error big endian version not defined
+#endif
+} fcgs_rsnn_nn_t;
+#pragma pack()
+
+#define FCCT_HDR_CMDRSP_ACCEPT 0x8002
+#define FCCT_HDR_CMDRSP_REJECT 0x8001
+
+static inline void fcct_build_req_header(fcct_iu_header_t *hdr, uint16_t cmd, uint16_t max_size)
+{
+ /* use old rev (1) to accommodate older switches */
+ hdr->revision = 1;
+ hdr->in_id = 0;
+ hdr->gs_type = FC_GS_TYPE_DIRECTORY_SERVICE;
+ hdr->gs_subtype = FC_GS_SUBTYPE_NAME_SERVER;
+ hdr->options = 0;
+ hdr->resv1 = 0;
+ hdr->cmd_rsp_code = ocs_htobe16(cmd);
+ hdr->max_residual_size = ocs_htobe16(max_size/(sizeof(uint32_t))); /* words */
+ hdr->fragment_id = 0;
+ hdr->reason_code = 0;
+ hdr->reason_code_explanation = 0;
+ hdr->vendor_specific = 0;
+}
+
+typedef struct fcct_rftid_req_s {
+ fcct_iu_header_t hdr;
+ uint32_t port_id;
+ uint32_t fc4_types[8];
+} fcct_rftid_req_t;
+
+#define FC4_FEATURE_TARGET (1U << 0)
+#define FC4_FEATURE_INITIATOR (1U << 1)
+
+typedef struct fcct_rffid_req_s {
+ fcct_iu_header_t hdr;
+ uint32_t port_id;
+ uint32_t :16,
+ fc4_feature_bits:8,
+ type:8;
+} fcct_rffid_req_t;
+
+typedef struct fcct_gnnid_req_s {
+ fcct_iu_header_t hdr;
+ uint32_t :8,
+ port_id:24;
+} fcct_gnnid_req_t;
+
+typedef struct fcct_gpnid_req_s {
+ fcct_iu_header_t hdr;
+ uint32_t :8,
+ port_id:24;
+} fcct_gpnid_req_t;
+
+typedef struct fcct_gffid_req_s {
+ fcct_iu_header_t hdr;
+ uint32_t :8,
+ port_id:24;
+} fcct_gffid_req_t;
+
+typedef struct fcct_gidft_req_s {
+ fcct_iu_header_t hdr;
+ uint32_t :8,
+ domain_id_scope:8,
+ area_id_scope:8,
+ type:8;
+} fcct_gidft_req_t;
+
+typedef struct fcct_gidpt_req_s {
+ fcct_iu_header_t hdr;
+ uint32_t port_type:8,
+ domain_id_scope:8,
+ area_id_scope:8,
+ flags:8;
+} fcct_gidpt_req_t;
+
+typedef struct fcct_gnnid_acc_s {
+ fcct_iu_header_t hdr;
+ uint64_t node_name;
+} fcct_gnnid_acc_t;
+
+typedef struct fcct_gpnid_acc_s {
+ fcct_iu_header_t hdr;
+ uint64_t port_name;
+} fcct_gpnid_acc_t;
+
+typedef struct fcct_gffid_acc_s {
+ fcct_iu_header_t hdr;
+ uint8_t fc4_feature_bits;
+} fcct_gffid_acc_t;
+
+typedef struct fcct_gidft_acc_s {
+ fcct_iu_header_t hdr;
+ struct {
+ uint32_t ctl:8,
+ port_id:24;
+ } port_list[1];
+} fcct_gidft_acc_t;
+
+typedef struct fcct_gidpt_acc_s {
+ fcct_iu_header_t hdr;
+ struct {
+ uint32_t ctl:8,
+ port_id:24;
+ } port_list[1];
+} fcct_gidpt_acc_t;
+
+#define FCCT_GID_PT_LAST_ID 0x80
+#define FCCT_GIDPT_ID_MASK 0x00ffffff
+
+typedef struct fcp_cmnd_iu_s {
+ uint8_t fcp_lun[8];
+ uint8_t command_reference_number;
+ uint8_t task_attribute:3,
+ command_priority:4,
+ :1;
+ uint8_t task_management_flags;
+ uint8_t wrdata:1,
+ rddata:1,
+ additional_fcp_cdb_length:6;
+ uint8_t fcp_cdb[16];
+ uint8_t fcp_cdb_and_dl[20]; /* < May contain up to 16 bytes of CDB, followed by fcp_dl */
+} fcp_cmnd_iu_t;
+
+#define FCP_LUN_ADDRESS_METHOD_SHIFT 6
+#define FCP_LUN_ADDRESS_METHOD_MASK 0xc0
+#define FCP_LUN_ADDR_METHOD_PERIPHERAL 0x0
+#define FCP_LUN_ADDR_METHOD_FLAT 0x1
+#define FCP_LUN_ADDR_METHOD_LOGICAL 0x2
+#define FCP_LUN_ADDR_METHOD_EXTENDED 0x3
+
+#define FCP_LUN_ADDR_SIMPLE_MAX 0xff
+#define FCP_LUN_ADDR_FLAT_MAX 0x3fff
+
+#define FCP_TASK_ATTR_SIMPLE 0x0
+#define FCP_TASK_ATTR_HEAD_OF_QUEUE 0x1
+#define FCP_TASK_ATTR_ORDERED 0x2
+#define FCP_TASK_ATTR_ACA 0x4
+#define FCP_TASK_ATTR_UNTAGGED 0x5
+
+#define FCP_QUERY_TASK_SET BIT(0)
+#define FCP_ABORT_TASK_SET BIT(1)
+#define FCP_CLEAR_TASK_SET BIT(2)
+#define FCP_QUERY_ASYNCHRONOUS_EVENT BIT(3)
+#define FCP_LOGICAL_UNIT_RESET BIT(4)
+#define FCP_TARGET_RESET BIT(5)
+#define FCP_CLEAR_ACA BIT(6)
+
+/* SPC-4 says that the maximum length of sense data is 252 bytes */
+#define FCP_MAX_SENSE_LEN 252
+#define FCP_MAX_RSP_LEN 8
+/*
+ * FCP_RSP buffer will either have sense or response data, but not both
+ * so pick the larger.
+ */
+#define FCP_MAX_RSP_INFO_LEN FCP_MAX_SENSE_LEN
+
+typedef struct fcp_rsp_iu_s {
+ uint8_t rsvd[8];
+ uint8_t status_qualifier[2];
+ uint8_t flags;
+ uint8_t scsi_status;
+ uint8_t fcp_resid[4];
+ uint8_t fcp_sns_len[4];
+ uint8_t fcp_rsp_len[4];
+ uint8_t data[FCP_MAX_RSP_INFO_LEN];
+} fcp_rsp_iu_t;
+
+/** Flag field defines: */
+#define FCP_RSP_LEN_VALID BIT(0)
+#define FCP_SNS_LEN_VALID BIT(1)
+#define FCP_RESID_OVER BIT(2)
+#define FCP_RESID_UNDER BIT(3)
+#define FCP_CONF_REQ BIT(4)
+#define FCP_BIDI_READ_RESID_OVER BIT(5)
+#define FCP_BIDI_READ_RESID_UNDER BIT(6)
+#define FCP_BIDI_RSP BIT(7)
+
+/** Status values: */
+#define FCP_TMF_COMPLETE 0x00
+#define FCP_DATA_LENGTH_MISMATCH 0x01
+#define FCP_INVALID_FIELD 0x02
+#define FCP_DATA_RO_MISMATCH 0x03
+#define FCP_TMF_REJECTED 0x04
+#define FCP_TMF_FAILED 0x05
+#define FCP_TMF_SUCCEEDED 0x08
+#define FCP_TMF_INCORRECT_LUN 0x09
+
+/** FCP-4 Table 28, TMF response information: */
+typedef struct fc_rsp_info_s {
+ uint8_t addl_rsp_info[3];
+ uint8_t rsp_code;
+ uint32_t :32;
+} fcp_rsp_info_t;
+
+typedef struct fcp_xfer_rdy_iu_s {
+ uint8_t fcp_data_ro[4];
+ uint8_t fcp_burst_len[4];
+ uint8_t rsvd[4];
+} fcp_xfer_rdy_iu_t;
+
+#define MAX_ACC_REJECT_PAYLOAD (sizeof(fc_ls_rjt_payload_t) > sizeof(fc_acc_payload_t) ? sizeof(fc_ls_rjt_payload_t) : sizeof(fc_acc_payload_t))
+
+
+#endif /* !_OCS_FCP_H */
diff --git a/sys/dev/ocs_fc/ocs_hw.c b/sys/dev/ocs_fc/ocs_hw.c
new file mode 100644
index 000000000000..9cbb3f67e306
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_hw.c
@@ -0,0 +1,12551 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Defines and implements the Hardware Abstraction Layer (HW).
+ * All interaction with the hardware is performed through the HW, which abstracts
+ * the details of the underlying SLI-4 implementation.
+ */
+
+/**
+ * @defgroup devInitShutdown Device Initialization and Shutdown
+ * @defgroup domain Domain Functions
+ * @defgroup port Port Functions
+ * @defgroup node Remote Node Functions
+ * @defgroup io IO Functions
+ * @defgroup interrupt Interrupt handling
+ * @defgroup os OS Required Functions
+ */
+
+#include "ocs.h"
+#include "ocs_os.h"
+#include "ocs_hw.h"
+#include "ocs_hw_queues.h"
+
+#define OCS_HW_MQ_DEPTH 128
+#define OCS_HW_READ_FCF_SIZE 4096
+#define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256
+#define OCS_HW_WQ_TIMER_PERIOD_MS 500
+
+/* values used for setting the auto xfer rdy parameters */
+#define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */
+#define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE
+#define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE
+#define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0
+#define OCS_HW_REQUE_XRI_REGTAG 65534
+/* max command and response buffer lengths -- arbitrary at the moment */
+#define OCS_HW_DMTF_CLP_CMD_MAX 256
+#define OCS_HW_DMTF_CLP_RSP_MAX 256
+
+/* HW global data */
+ocs_hw_global_t hw_global;
+
+static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
+static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
+static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
+static int32_t ocs_hw_cb_link(void *, void *);
+static int32_t ocs_hw_cb_fip(void *, void *);
+static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
+static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
+static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
+static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
+static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
+static int32_t ocs_hw_flush(ocs_hw_t *);
+static int32_t ocs_hw_command_cancel(ocs_hw_t *);
+static int32_t ocs_hw_io_cancel(ocs_hw_t *);
+static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
+static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
+static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
+static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
+static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
+static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
+static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
+typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
+static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
+static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
+
+static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
+static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
+static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
+static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
+static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
+static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
+static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
+static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
+static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
+static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
+static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
+static void ocs_hw_io_free_internal(void *arg);
+static void ocs_hw_io_free_port_owned(void *arg);
+static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
+static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
+static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
+static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
+static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
+static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
+
+/* HW domain database operations */
+static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
+static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
+
+
+/* Port state machine */
+static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+
+/* Domain state machine */
+static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
+static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
+static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
+
+/* BZ 161832 */
+static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
+
+/* WQE timeouts */
+static void target_wqe_timer_cb(void *arg);
+static void shutdown_target_wqe_timer(ocs_hw_t *hw);
+
+static inline void
+ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
+ /*
+ * Active WQE list currently only used for
+ * target WQE timeouts.
+ */
+ ocs_lock(&hw->io_lock);
+ ocs_list_add_tail(&hw->io_timed_wqe, io);
+ io->submit_ticks = ocs_get_os_ticks();
+ ocs_unlock(&hw->io_lock);
+ }
+}
+
+static inline void
+ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ if (hw->config.emulate_tgt_wqe_timeout) {
+ /*
+ * If target wqe timeouts are enabled,
+ * remove from active wqe list.
+ */
+ ocs_lock(&hw->io_lock);
+ if (ocs_list_on_list(&io->wqe_link)) {
+ ocs_list_remove(&hw->io_timed_wqe, io);
+ }
+ ocs_unlock(&hw->io_lock);
+ }
+}
+
+static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
+{
+ switch (io_type) {
+ case OCS_HW_IO_INITIATOR_READ:
+ case OCS_HW_IO_INITIATOR_WRITE:
+ case OCS_HW_IO_INITIATOR_NODATA:
+ case OCS_HW_FC_CT:
+ case OCS_HW_ELS_REQ:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
+{
+ /* if exchange not active, nothing to abort */
+ if (!xb) {
+ return FALSE;
+ }
+ if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
+ switch (ext) {
+ /* exceptions where abort is not needed */
+ case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
+ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
+ return FALSE;
+ default:
+ break;
+ }
+ }
+ return TRUE;
+}
+
+/**
+ * @brief Determine the number of chutes on the device.
+ *
+ * @par Description
+ * Some devices require queue resources allocated per protocol processor
+ * (chute). This function returns the number of chutes on this device.
+ *
+ * @param hw Hardware context allocated by the caller.
+ *
+ * @return Returns the number of chutes on the device for protocol.
+ */
+static uint32_t
+ocs_hw_get_num_chutes(ocs_hw_t *hw)
+{
+ uint32_t num_chutes = 1;
+
+ if (sli_get_is_dual_ulp_capable(&hw->sli) &&
+ sli_get_is_ulp_enabled(&hw->sli, 0) &&
+ sli_get_is_ulp_enabled(&hw->sli, 1)) {
+ num_chutes = 2;
+ }
+ return num_chutes;
+}
+
+static ocs_hw_rtn_e
+ocs_hw_link_event_init(ocs_hw_t *hw)
+{
+ if (hw == NULL) {
+ ocs_log_err(hw->os, "bad parameter hw=%p\n", hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ hw->link.status = SLI_LINK_STATUS_MAX;
+ hw->link.topology = SLI_LINK_TOPO_NONE;
+ hw->link.medium = SLI_LINK_MEDIUM_MAX;
+ hw->link.speed = 0;
+ hw->link.loop_map = NULL;
+ hw->link.fc_id = UINT32_MAX;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief If this is physical port 0, then read the max dump size.
+ *
+ * @par Description
+ * Queries the FW for the maximum dump size
+ *
+ * @param hw Hardware context allocated by the caller.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static ocs_hw_rtn_e
+ocs_hw_read_max_dump_size(ocs_hw_t *hw)
+{
+ uint8_t buf[SLI4_BMBX_SIZE];
+ uint8_t bus, dev, func;
+ int rc;
+
+ /* lancer only */
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
+ ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Make sure the FW is new enough to support this command. If the FW
+ * is too old, the FW will UE.
+ */
+ if (hw->workaround.disable_dump_loc) {
+ ocs_log_test(hw->os, "FW version is too old for this feature\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* attempt to detemine the dump size for function 0 only. */
+ ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
+ if (func == 0) {
+ if (sli_cmd_common_set_dump_location(&hw->sli, buf,
+ SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
+ sli4_res_common_set_dump_location_t *rsp =
+ (sli4_res_common_set_dump_location_t *)
+ (buf + offsetof(sli4_cmd_sli_config_t,
+ payload.embed));
+
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "set dump location command failed\n");
+ return rc;
+ } else {
+ hw->dump_size = rsp->buffer_length;
+ ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
+ }
+ }
+ }
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Set up the Hardware Abstraction Layer module.
+ *
+ * @par Description
+ * Calls set up to configure the hardware.
+ *
+ * @param hw Hardware context allocated by the caller.
+ * @param os Device abstraction.
+ * @param port_type Protocol type of port, such as FC and NIC.
+ *
+ * @todo Why is port_type a parameter?
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
+{
+ uint32_t i;
+ char prop_buf[32];
+
+ if (hw == NULL) {
+ ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->hw_setup_called) {
+ /* Setup run-time workarounds.
+ * Call for each setup, to allow for hw_war_version
+ */
+ ocs_hw_workaround_setup(hw);
+ return OCS_HW_RTN_SUCCESS;
+ }
+
+ /*
+ * ocs_hw_init() relies on NULL pointers indicating that a structure
+ * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
+ * free/realloc that memory
+ */
+ ocs_memset(hw, 0, sizeof(ocs_hw_t));
+
+ hw->hw_setup_called = TRUE;
+
+ hw->os = os;
+
+ ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
+ ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
+ ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
+ hw->cmd_head_count = 0;
+
+ ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
+ ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
+
+ ocs_atomic_init(&hw->io_alloc_failed_count, 0);
+
+ hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
+ hw->config.dif_seed = 0;
+ hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
+ hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
+ hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
+ hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
+
+
+ if (sli_setup(&hw->sli, hw->os, port_type)) {
+ ocs_log_err(hw->os, "SLI setup failed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ ocs_memset(hw->domains, 0, sizeof(hw->domains));
+
+ ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
+
+ ocs_hw_link_event_init(hw);
+
+ sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
+ sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
+
+ /*
+ * Set all the queue sizes to the maximum allowed. These values may
+ * be changes later by the adjust and workaround functions.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
+ hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
+ }
+
+ /*
+ * The RQ assignment for RQ pair mode.
+ */
+ hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
+ hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
+ if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
+ hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
+ }
+
+ /* by default, enable initiator-only auto-ABTS emulation */
+ hw->config.i_only_aab = TRUE;
+
+ /* Setup run-time workarounds */
+ ocs_hw_workaround_setup(hw);
+
+ /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
+ if (hw->workaround.override_fcfi) {
+ hw->first_domain_idx = -1;
+ }
+
+ /* Must be done after the workaround setup */
+ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
+ (void)ocs_hw_read_max_dump_size(hw);
+ }
+
+ /* calculate the number of WQs required. */
+ ocs_hw_adjust_wqs(hw);
+
+ /* Set the default dif mode */
+ if (! sli_is_dif_inline_capable(&hw->sli)) {
+ ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
+ hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
+ }
+ /* Workaround: BZ 161832 */
+ if (hw->workaround.use_dif_sec_xri) {
+ ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
+ }
+
+ /*
+ * Figure out the starting and max ULP to spread the WQs across the
+ * ULPs.
+ */
+ if (sli_get_is_dual_ulp_capable(&hw->sli)) {
+ if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
+ sli_get_is_ulp_enabled(&hw->sli, 1)) {
+ hw->ulp_start = 0;
+ hw->ulp_max = 1;
+ } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
+ hw->ulp_start = 0;
+ hw->ulp_max = 0;
+ } else {
+ hw->ulp_start = 1;
+ hw->ulp_max = 1;
+ }
+ } else {
+ if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
+ hw->ulp_start = 0;
+ hw->ulp_max = 0;
+ } else {
+ hw->ulp_start = 1;
+ hw->ulp_max = 1;
+ }
+ }
+ ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
+ hw->ulp_start, hw->ulp_max);
+ hw->config.queue_topology = hw_global.queue_topology_string;
+
+ hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
+
+ hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
+ hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
+ hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
+ hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
+ hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
+
+ /* Verify qtop configuration against driver supported configuration */
+ if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
+ ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
+ OCE_HW_MAX_NUM_MRQ_PAIRS);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
+ ocs_log_crit(hw->os, "Max supported EQs = %d\n",
+ OCS_HW_MAX_NUM_EQ);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
+ ocs_log_crit(hw->os, "Max supported CQs = %d\n",
+ OCS_HW_MAX_NUM_CQ);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
+ ocs_log_crit(hw->os, "Max supported WQs = %d\n",
+ OCS_HW_MAX_NUM_WQ);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
+ ocs_log_crit(hw->os, "Max supported MQs = %d\n",
+ OCS_HW_MAX_NUM_MQ);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Allocate memory structures to prepare for the device operation.
+ *
+ * @par Description
+ * Allocates memory structures needed by the device and prepares the device
+ * for operation.
+ * @n @n @b Note: This function may be called more than once (for example, at
+ * initialization and then after a reset), but the size of the internal resources
+ * may not be changed without tearing down the HW (ocs_hw_teardown()).
+ *
+ * @param hw Hardware context allocated by the caller.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_init(ocs_hw_t *hw)
+{
+ ocs_hw_rtn_e rc;
+ uint32_t i = 0;
+ uint8_t buf[SLI4_BMBX_SIZE];
+ uint32_t max_rpi;
+ int rem_count;
+ int written_size = 0;
+ uint32_t count;
+ char prop_buf[32];
+ uint32_t ramdisc_blocksize = 512;
+ uint32_t q_count = 0;
+ /*
+ * Make sure the command lists are empty. If this is start-of-day,
+ * they'll be empty since they were just initialized in ocs_hw_setup.
+ * If we've just gone through a reset, the command and command pending
+ * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
+ */
+ ocs_lock(&hw->cmd_lock);
+ if (!ocs_list_empty(&hw->cmd_head)) {
+ ocs_log_test(hw->os, "command found on cmd list\n");
+ ocs_unlock(&hw->cmd_lock);
+ return OCS_HW_RTN_ERROR;
+ }
+ if (!ocs_list_empty(&hw->cmd_pending)) {
+ ocs_log_test(hw->os, "command found on pending list\n");
+ ocs_unlock(&hw->cmd_lock);
+ return OCS_HW_RTN_ERROR;
+ }
+ ocs_unlock(&hw->cmd_lock);
+
+ /* Free RQ buffers if prevously allocated */
+ ocs_hw_rx_free(hw);
+
+ /*
+ * The IO queues must be initialized here for the reset case. The
+ * ocs_hw_init_io() function will re-add the IOs to the free list.
+ * The cmd_head list should be OK since we free all entries in
+ * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
+ */
+
+ /* If we are in this function due to a reset, there may be stale items
+ * on lists that need to be removed. Clean them up.
+ */
+ rem_count=0;
+ if (ocs_list_valid(&hw->io_wait_free)) {
+ while ((!ocs_list_empty(&hw->io_wait_free))) {
+ rem_count++;
+ ocs_list_remove_head(&hw->io_wait_free);
+ }
+ if (rem_count > 0) {
+ ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
+ }
+ }
+ rem_count=0;
+ if (ocs_list_valid(&hw->io_inuse)) {
+ while ((!ocs_list_empty(&hw->io_inuse))) {
+ rem_count++;
+ ocs_list_remove_head(&hw->io_inuse);
+ }
+ if (rem_count > 0) {
+ ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
+ }
+ }
+ rem_count=0;
+ if (ocs_list_valid(&hw->io_free)) {
+ while ((!ocs_list_empty(&hw->io_free))) {
+ rem_count++;
+ ocs_list_remove_head(&hw->io_free);
+ }
+ if (rem_count > 0) {
+ ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
+ }
+ }
+ if (ocs_list_valid(&hw->io_port_owned)) {
+ while ((!ocs_list_empty(&hw->io_port_owned))) {
+ ocs_list_remove_head(&hw->io_port_owned);
+ }
+ }
+ ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
+ ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
+ ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
+ ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
+ ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
+ ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
+
+ /* If MRQ not required, Make sure we dont request feature. */
+ if (hw->config.n_rq == 1) {
+ hw->sli.config.features.flag.mrqp = FALSE;
+ }
+
+ if (sli_init(&hw->sli)) {
+ ocs_log_err(hw->os, "SLI failed to initialize\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Enable the auto xfer rdy feature if requested.
+ */
+ hw->auto_xfer_rdy_enabled = FALSE;
+ if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
+ hw->config.auto_xfer_rdy_size > 0) {
+ if (hw->config.esoc){
+ if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
+ ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
+ }
+ written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
+ } else {
+ written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
+ }
+ if (written_size) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "config auto xfer rdy failed\n");
+ return rc;
+ }
+ }
+ hw->auto_xfer_rdy_enabled = TRUE;
+
+ if (hw->config.auto_xfer_rdy_t10_enable) {
+ rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
+ return rc;
+ }
+ }
+ }
+
+ if(hw->sliport_healthcheck) {
+ rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
+ return rc;
+ }
+ }
+
+ /*
+ * Set FDT transfer hint, only works on Lancer
+ */
+ if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
+ /*
+ * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
+ * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
+ */
+ ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
+ }
+
+ /*
+ * Verify that we have not exceeded any queue sizes
+ */
+ q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
+ OCS_HW_MAX_NUM_EQ);
+ if (hw->config.n_eq > q_count) {
+ ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
+ hw->config.n_eq, q_count);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
+ OCS_HW_MAX_NUM_CQ);
+ if (hw->config.n_cq > q_count) {
+ ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
+ hw->config.n_cq, q_count);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
+ OCS_HW_MAX_NUM_MQ);
+ if (hw->config.n_mq > q_count) {
+ ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
+ hw->config.n_mq, q_count);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
+ OCS_HW_MAX_NUM_RQ);
+ if (hw->config.n_rq > q_count) {
+ ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
+ hw->config.n_rq, q_count);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
+ OCS_HW_MAX_NUM_WQ);
+ if (hw->config.n_wq > q_count) {
+ ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
+ hw->config.n_wq, q_count);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* zero the hashes */
+ ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
+ ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
+ OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
+
+ ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
+ ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
+ OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
+
+ ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
+ ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
+ OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
+
+
+ rc = ocs_hw_init_queues(hw, hw->qtop);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ return rc;
+ }
+
+ max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
+ i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
+ if (i) {
+ ocs_dma_t payload_memory;
+
+ rc = OCS_HW_RTN_ERROR;
+
+ if (hw->rnode_mem.size) {
+ ocs_dma_free(hw->os, &hw->rnode_mem);
+ }
+
+ if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
+ ocs_log_err(hw->os, "remote node memory allocation fail\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ payload_memory.size = 0;
+ if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
+ &hw->rnode_mem, UINT16_MAX, &payload_memory)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+
+ if (payload_memory.size != 0) {
+ /* The command was non-embedded - need to free the dma buffer */
+ ocs_dma_free(hw->os, &payload_memory);
+ }
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "header template registration failed\n");
+ return rc;
+ }
+ }
+
+ /* Allocate and post RQ buffers */
+ rc = ocs_hw_rx_allocate(hw);
+ if (rc) {
+ ocs_log_err(hw->os, "rx_allocate failed\n");
+ return rc;
+ }
+
+ /* Populate hw->seq_free_list */
+ if (hw->seq_pool == NULL) {
+ uint32_t count = 0;
+ uint32_t i;
+
+ /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ count += hw->hw_rq[i]->entry_count;
+ }
+
+ hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
+ if (hw->seq_pool == NULL) {
+ ocs_log_err(hw->os, "malloc seq_pool failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+
+ if(ocs_hw_rx_post(hw)) {
+ ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
+ }
+
+ /* Allocate rpi_ref if not previously allocated */
+ if (hw->rpi_ref == NULL) {
+ hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
+ OCS_M_ZERO | OCS_M_NOWAIT);
+ if (hw->rpi_ref == NULL) {
+ ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+
+ for (i = 0; i < max_rpi; i ++) {
+ ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
+ ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
+ }
+
+ ocs_memset(hw->domains, 0, sizeof(hw->domains));
+
+ /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
+ if (hw->workaround.override_fcfi) {
+ hw->first_domain_idx = -1;
+ }
+
+ ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
+
+ /* Register a FCFI to allow unsolicited frames to be routed to the driver */
+ if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
+
+ if (hw->hw_mrq_count) {
+ ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
+
+ rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
+ return rc;
+ }
+
+ rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
+ return rc;
+ }
+ } else {
+ sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+
+ ocs_log_debug(hw->os, "using REG_FCFI standard\n");
+
+ /* Set the filter match/mask values from hw's filter_def values */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_cfg[i].rq_id = 0xffff;
+ rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
+ rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
+ rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
+ rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
+ }
+
+ /*
+ * Update the rq_id's of the FCF configuration (don't update more than the number
+ * of rq_cfg elements)
+ */
+ for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
+ hw_rq_t *rq = hw->hw_rq[i];
+ uint32_t j;
+ for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
+ uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
+ if (mask & (1U << j)) {
+ rq_cfg[j].rq_id = rq->hdr->id;
+ ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
+ j, hw->config.filter_def[j], i, rq->hdr->id);
+ }
+ }
+ }
+
+ rc = OCS_HW_RTN_ERROR;
+
+ if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "FCFI registration failed\n");
+ return rc;
+ }
+ hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
+ }
+
+ }
+
+ /*
+ * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
+ * thus the pool allocation size of 64k)
+ */
+ rc = ocs_hw_reqtag_init(hw);
+ if (rc) {
+ ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = ocs_hw_setup_io(hw);
+ if (rc) {
+ ocs_log_err(hw->os, "IO allocation failure\n");
+ return rc;
+ }
+
+ rc = ocs_hw_init_io(hw);
+ if (rc) {
+ ocs_log_err(hw->os, "IO initialization failure\n");
+ return rc;
+ }
+
+ ocs_queue_history_init(hw->os, &hw->q_hist);
+
+ /* get hw link config; polling, so callback will be called immediately */
+ hw->linkcfg = OCS_HW_LINKCFG_NA;
+ ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
+
+ /* if lancer ethernet, ethernet ports need to be enabled */
+ if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
+ (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
+ if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
+ /* log warning but continue */
+ ocs_log_err(hw->os, "Failed to set ethernet license\n");
+ }
+ }
+
+ /* Set the DIF seed - only for lancer right now */
+ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
+ ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "Failed to set DIF seed value\n");
+ return rc;
+ }
+
+ /* Set the DIF mode - skyhawk only */
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
+ sli_get_dif_capable(&hw->sli)) {
+ rc = ocs_hw_set_dif_mode(hw);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "Failed to set DIF mode value\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
+ */
+ for (i = 0; i < hw->eq_count; i++) {
+ sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
+ }
+
+ /*
+ * Initialize RQ hash
+ */
+ for (i = 0; i < hw->rq_count; i++) {
+ ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
+ }
+
+ /*
+ * Initialize WQ hash
+ */
+ for (i = 0; i < hw->wq_count; i++) {
+ ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
+ }
+
+ /*
+ * Arming the CQ allows (e.g.) MQ completions to write CQ entries
+ */
+ for (i = 0; i < hw->cq_count; i++) {
+ ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
+ sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
+ }
+
+ /* record the fact that the queues are functional */
+ hw->state = OCS_HW_STATE_ACTIVE;
+
+ /* Note: Must be after the IOs are setup and the state is active*/
+ if (ocs_hw_rqpair_init(hw)) {
+ ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
+ }
+
+ /* finally kick off periodic timer to check for timed out target WQEs */
+ if (hw->config.emulate_tgt_wqe_timeout) {
+ ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
+ OCS_HW_WQ_TIMER_PERIOD_MS);
+ }
+
+ /*
+ * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there
+ * are none of those, allocate one for WQ[0]
+ */
+ if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
+ for (i = 0; i < count; i++) {
+ hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
+ wq->send_frame_io = ocs_hw_io_alloc(hw);
+ if (wq->send_frame_io == NULL) {
+ ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
+ }
+ }
+ } else {
+ hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
+ if (hw->hw_wq[0]->send_frame_io == NULL) {
+ ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
+ }
+ }
+
+ /* Initialize send frame frame sequence id */
+ ocs_atomic_init(&hw->send_frame_seq_id, 0);
+
+ /* Initialize watchdog timer if enabled by user */
+ hw->expiration_logged = 0;
+ if(hw->watchdog_timeout) {
+ if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
+ ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
+ }else if(!ocs_hw_config_watchdog_timer(hw)) {
+ ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
+ }
+ }
+
+ if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
+ ocs_log_err(hw->os, "domain node memory allocation fail\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
+ ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map,
+ SLI4_MIN_LOOP_MAP_BYTES, 4)) {
+ ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
+ }
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @brief Configure Multi-RQ
+ *
+ * @param hw Hardware context allocated by the caller.
+ * @param mode 1 to set MRQ filters and 0 to set FCFI index
+ * @param vlanid valid in mode 0
+ * @param fcf_index valid in mode 0
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
+{
+ uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
+ hw_rq_t *rq;
+ sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
+ uint32_t i, j;
+ sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ int32_t rc;
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
+ goto issue_cmd;
+ }
+
+ /* Set the filter match/mask values from hw's filter_def values */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_filter[i].rq_id = 0xffff;
+ rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
+ rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
+ rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
+ rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
+ }
+
+ /* Accumulate counts for each filter type used, build rq_ids[] list */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ rq = hw->hw_rq[i];
+ for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
+ if (rq->filter_mask & (1U << j)) {
+ if (rq_filter[j].rq_id != 0xffff) {
+ /* Already used. Bailout ifts not RQset case */
+ if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
+ ocs_log_err(hw->os, "Wrong queue topology.\n");
+ return OCS_HW_RTN_ERROR;
+ }
+ continue;
+ }
+
+ if (rq->is_mrq) {
+ rq_filter[j].rq_id = rq->base_mrq_id;
+ mrq_bitmask |= (1U << j);
+ } else {
+ rq_filter[j].rq_id = rq->hdr->id;
+ }
+ }
+ }
+ }
+
+issue_cmd:
+ /* Invoke REG_FCFI_MRQ */
+ rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
+ buf, /* buf */
+ SLI4_BMBX_SIZE, /* size */
+ mode, /* mode 1 */
+ fcf_index, /* fcf_index */
+ vlanid, /* vlan_id */
+ hw->config.rq_selection_policy, /* RQ selection policy*/
+ mrq_bitmask, /* MRQ bitmask */
+ hw->hw_mrq_count, /* num_mrqs */
+ rq_filter); /* RQ filter */
+ if (rc == 0) {
+ ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+
+ rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
+
+ if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
+ ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
+ rsp->hdr.command, rsp->hdr.status);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
+ hw->fcf_indicator = rsp->fcfi;
+ }
+ return 0;
+}
+
+/**
+ * @brief Callback function for getting linkcfg during HW initialization.
+ *
+ * @param status Status of the linkcfg get operation.
+ * @param value Link configuration enum to which the link configuration is set.
+ * @param arg Callback argument (ocs_hw_t *).
+ *
+ * @return None.
+ */
+static void
+ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
+{
+ ocs_hw_t *hw = (ocs_hw_t *)arg;
+ if (status == 0) {
+ hw->linkcfg = (ocs_hw_linkcfg_e)value;
+ } else {
+ hw->linkcfg = OCS_HW_LINKCFG_NA;
+ }
+ ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Tear down the Hardware Abstraction Layer module.
+ *
+ * @par Description
+ * Frees memory structures needed by the device, and shuts down the device. Does
+ * not free the HW context memory (which is done by the caller).
+ *
+ * @param hw Hardware context allocated by the caller.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_teardown(ocs_hw_t *hw)
+{
+ uint32_t i = 0;
+ uint32_t iters = 10;/*XXX*/
+ uint32_t max_rpi;
+ uint32_t destroy_queues;
+ uint32_t free_memory;
+
+ if (!hw) {
+ ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
+ free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
+
+ /* shutdown target wqe timer */
+ shutdown_target_wqe_timer(hw);
+
+ /* Cancel watchdog timer if enabled */
+ if(hw->watchdog_timeout) {
+ hw->watchdog_timeout = 0;
+ ocs_hw_config_watchdog_timer(hw);
+ }
+
+ /* Cancel Sliport Healthcheck */
+ if(hw->sliport_healthcheck) {
+ hw->sliport_healthcheck = 0;
+ ocs_hw_config_sli_port_health_check(hw, 0, 0);
+ }
+
+ if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
+
+ hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
+
+ ocs_hw_flush(hw);
+
+ /* If there are outstanding commands, wait for them to complete */
+ while (!ocs_list_empty(&hw->cmd_head) && iters) {
+ ocs_udelay(10000);
+ ocs_hw_flush(hw);
+ iters--;
+ }
+
+ if (ocs_list_empty(&hw->cmd_head)) {
+ ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
+ } else {
+ ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
+ }
+
+ /* Cancel any remaining commands */
+ ocs_hw_command_cancel(hw);
+ } else {
+ hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
+ }
+
+ ocs_lock_free(&hw->cmd_lock);
+
+ /* Free unregistered RPI if workaround is in force */
+ if (hw->workaround.use_unregistered_rpi) {
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
+ }
+
+ max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
+ if (hw->rpi_ref) {
+ for (i = 0; i < max_rpi; i++) {
+ if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
+ ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
+ i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
+ }
+ }
+ ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
+ hw->rpi_ref = NULL;
+ }
+
+ ocs_dma_free(hw->os, &hw->rnode_mem);
+
+ if (hw->io) {
+ for (i = 0; i < hw->config.n_io; i++) {
+ if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
+ (hw->io[i]->sgl->virt != NULL)) {
+ if(hw->io[i]->is_port_owned) {
+ ocs_lock_free(&hw->io[i]->axr_lock);
+ }
+ ocs_dma_free(hw->os, hw->io[i]->sgl);
+ }
+ ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
+ hw->io[i] = NULL;
+ }
+ ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
+ hw->wqe_buffs = NULL;
+ ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
+ hw->io = NULL;
+ }
+
+ ocs_dma_free(hw->os, &hw->xfer_rdy);
+ ocs_dma_free(hw->os, &hw->dump_sges);
+ ocs_dma_free(hw->os, &hw->loop_map);
+
+ ocs_lock_free(&hw->io_lock);
+ ocs_lock_free(&hw->io_abort_lock);
+
+
+ for (i = 0; i < hw->wq_count; i++) {
+ sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
+ }
+
+
+ for (i = 0; i < hw->rq_count; i++) {
+ sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
+ }
+
+ for (i = 0; i < hw->mq_count; i++) {
+ sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
+ }
+
+ for (i = 0; i < hw->cq_count; i++) {
+ sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
+ }
+
+ for (i = 0; i < hw->eq_count; i++) {
+ sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
+ }
+
+ ocs_hw_qtop_free(hw->qtop);
+
+ /* Free rq buffers */
+ ocs_hw_rx_free(hw);
+
+ hw_queue_teardown(hw);
+
+ ocs_hw_rqpair_teardown(hw);
+
+ if (sli_teardown(&hw->sli)) {
+ ocs_log_err(hw->os, "SLI teardown failed\n");
+ }
+
+ ocs_queue_history_free(&hw->q_hist);
+
+ /* record the fact that the queues are non-functional */
+ hw->state = OCS_HW_STATE_UNINITIALIZED;
+
+ /* free sequence free pool */
+ ocs_array_free(hw->seq_pool);
+ hw->seq_pool = NULL;
+
+ /* free hw_wq_callback pool */
+ ocs_pool_free(hw->wq_reqtag_pool);
+
+ ocs_dma_free(hw->os, &hw->domain_dmem);
+ ocs_dma_free(hw->os, &hw->fcf_dmem);
+ /* Mark HW setup as not having been called */
+ hw->hw_setup_called = FALSE;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+ocs_hw_rtn_e
+ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
+{
+ uint32_t i;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint32_t iters;
+ ocs_hw_state_e prev_state = hw->state;
+
+ if (hw->state != OCS_HW_STATE_ACTIVE) {
+ ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
+ }
+
+ hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
+
+ /* shutdown target wqe timer */
+ shutdown_target_wqe_timer(hw);
+
+ ocs_hw_flush(hw);
+
+ /*
+ * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
+ * then the FW will UE when the reset is issued. So attempt to complete
+ * all mailbox commands.
+ */
+ iters = 10;
+ while (!ocs_list_empty(&hw->cmd_head) && iters) {
+ ocs_udelay(10000);
+ ocs_hw_flush(hw);
+ iters--;
+ }
+
+ if (ocs_list_empty(&hw->cmd_head)) {
+ ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
+ } else {
+ ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
+ }
+
+ /* Reset the chip */
+ switch(reset) {
+ case OCS_HW_RESET_FUNCTION:
+ ocs_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ ocs_log_err(hw->os, "sli_reset failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_RESET_FIRMWARE:
+ ocs_log_debug(hw->os, "issuing firmware reset\n");
+ if (sli_fw_reset(&hw->sli)) {
+ ocs_log_err(hw->os, "sli_soft_reset failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ /*
+ * Because the FW reset leaves the FW in a non-running state,
+ * follow that with a regular reset.
+ */
+ ocs_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ ocs_log_err(hw->os, "sli_reset failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ default:
+ ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
+ hw->state = prev_state;
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* Not safe to walk command/io lists unless they've been initialized */
+ if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
+ ocs_hw_command_cancel(hw);
+
+ /* Clean up the inuse list, the free list and the wait free list */
+ ocs_hw_io_cancel(hw);
+
+ ocs_memset(hw->domains, 0, sizeof(hw->domains));
+ ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
+
+ ocs_hw_link_event_init(hw);
+
+ ocs_lock(&hw->io_lock);
+ /* The io lists should be empty, but remove any that didn't get cleaned up. */
+ while (!ocs_list_empty(&hw->io_timed_wqe)) {
+ ocs_list_remove_head(&hw->io_timed_wqe);
+ }
+ /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
+
+ while (!ocs_list_empty(&hw->io_free)) {
+ ocs_list_remove_head(&hw->io_free);
+ }
+ while (!ocs_list_empty(&hw->io_wait_free)) {
+ ocs_list_remove_head(&hw->io_wait_free);
+ }
+
+ /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
+ ocs_hw_reqtag_reset(hw);
+
+ ocs_unlock(&hw->io_lock);
+ }
+
+ if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
+ for (i = 0; i < hw->wq_count; i++) {
+ sli_queue_reset(&hw->sli, &hw->wq[i]);
+ }
+
+ for (i = 0; i < hw->rq_count; i++) {
+ sli_queue_reset(&hw->sli, &hw->rq[i]);
+ }
+
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ hw_rq_t *rq = hw->hw_rq[i];
+ if (rq->rq_tracker != NULL) {
+ uint32_t j;
+
+ for (j = 0; j < rq->entry_count; j++) {
+ rq->rq_tracker[j] = NULL;
+ }
+ }
+ }
+
+ for (i = 0; i < hw->mq_count; i++) {
+ sli_queue_reset(&hw->sli, &hw->mq[i]);
+ }
+
+ for (i = 0; i < hw->cq_count; i++) {
+ sli_queue_reset(&hw->sli, &hw->cq[i]);
+ }
+
+ for (i = 0; i < hw->eq_count; i++) {
+ sli_queue_reset(&hw->sli, &hw->eq[i]);
+ }
+
+ /* Free rq buffers */
+ ocs_hw_rx_free(hw);
+
+ /* Teardown the HW queue topology */
+ hw_queue_teardown(hw);
+ } else {
+
+ /* Free rq buffers */
+ ocs_hw_rx_free(hw);
+ }
+
+ /*
+ * Re-apply the run-time workarounds after clearing the SLI config
+ * fields in sli_reset.
+ */
+ ocs_hw_workaround_setup(hw);
+ hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
+
+ return rc;
+}
+
+int32_t
+ocs_hw_get_num_eq(ocs_hw_t *hw)
+{
+ return hw->eq_count;
+}
+
+static int32_t
+ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
+{
+ /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
+ * No further explanation is given in the document.
+ * */
+ return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
+ sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
+}
+
+
+ocs_hw_rtn_e
+ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ int32_t tmp;
+
+ if (!value) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ *value = 0;
+
+ switch (prop) {
+ case OCS_HW_N_IO:
+ *value = hw->config.n_io;
+ break;
+ case OCS_HW_N_SGL:
+ *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
+ break;
+ case OCS_HW_MAX_IO:
+ *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
+ break;
+ case OCS_HW_MAX_NODES:
+ *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
+ break;
+ case OCS_HW_MAX_RQ_ENTRIES:
+ *value = hw->num_qentries[SLI_QTYPE_RQ];
+ break;
+ case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
+ *value = hw->config.rq_default_buffer_size;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_CAPABLE:
+ *value = sli_get_auto_xfer_rdy_capable(&hw->sli);
+ break;
+ case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
+ *value = hw->config.auto_xfer_rdy_xri_cnt;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_SIZE:
+ *value = hw->config.auto_xfer_rdy_size;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
+ switch (hw->config.auto_xfer_rdy_blk_size_chip) {
+ case 0:
+ *value = 512;
+ break;
+ case 1:
+ *value = 1024;
+ break;
+ case 2:
+ *value = 2048;
+ break;
+ case 3:
+ *value = 4096;
+ break;
+ case 4:
+ *value = 520;
+ break;
+ default:
+ *value = 0;
+ rc = OCS_HW_RTN_ERROR;
+ break;
+ }
+ break;
+ case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
+ *value = hw->config.auto_xfer_rdy_t10_enable;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_P_TYPE:
+ *value = hw->config.auto_xfer_rdy_p_type;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
+ *value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
+ *value = hw->config.auto_xfer_rdy_app_tag_valid;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
+ *value = hw->config.auto_xfer_rdy_app_tag_value;
+ break;
+ case OCS_HW_MAX_SGE:
+ *value = sli_get_max_sge(&hw->sli);
+ break;
+ case OCS_HW_MAX_SGL:
+ *value = sli_get_max_sgl(&hw->sli);
+ break;
+ case OCS_HW_TOPOLOGY:
+ /*
+ * Infer link.status based on link.speed.
+ * Report OCS_HW_TOPOLOGY_NONE if the link is down.
+ */
+ if (hw->link.speed == 0) {
+ *value = OCS_HW_TOPOLOGY_NONE;
+ break;
+ }
+ switch (hw->link.topology) {
+ case SLI_LINK_TOPO_NPORT:
+ *value = OCS_HW_TOPOLOGY_NPORT;
+ break;
+ case SLI_LINK_TOPO_LOOP:
+ *value = OCS_HW_TOPOLOGY_LOOP;
+ break;
+ case SLI_LINK_TOPO_NONE:
+ *value = OCS_HW_TOPOLOGY_NONE;
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
+ rc = OCS_HW_RTN_ERROR;
+ break;
+ }
+ break;
+ case OCS_HW_CONFIG_TOPOLOGY:
+ *value = hw->config.topology;
+ break;
+ case OCS_HW_LINK_SPEED:
+ *value = hw->link.speed;
+ break;
+ case OCS_HW_LINK_CONFIG_SPEED:
+ switch (hw->config.speed) {
+ case FC_LINK_SPEED_10G:
+ *value = 10000;
+ break;
+ case FC_LINK_SPEED_AUTO_16_8_4:
+ *value = 0;
+ break;
+ case FC_LINK_SPEED_2G:
+ *value = 2000;
+ break;
+ case FC_LINK_SPEED_4G:
+ *value = 4000;
+ break;
+ case FC_LINK_SPEED_8G:
+ *value = 8000;
+ break;
+ case FC_LINK_SPEED_16G:
+ *value = 16000;
+ break;
+ case FC_LINK_SPEED_32G:
+ *value = 32000;
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
+ rc = OCS_HW_RTN_ERROR;
+ break;
+ }
+ break;
+ case OCS_HW_IF_TYPE:
+ *value = sli_get_if_type(&hw->sli);
+ break;
+ case OCS_HW_SLI_REV:
+ *value = sli_get_sli_rev(&hw->sli);
+ break;
+ case OCS_HW_SLI_FAMILY:
+ *value = sli_get_sli_family(&hw->sli);
+ break;
+ case OCS_HW_DIF_CAPABLE:
+ *value = sli_get_dif_capable(&hw->sli);
+ break;
+ case OCS_HW_DIF_SEED:
+ *value = hw->config.dif_seed;
+ break;
+ case OCS_HW_DIF_MODE:
+ *value = hw->config.dif_mode;
+ break;
+ case OCS_HW_DIF_MULTI_SEPARATE:
+ /* Lancer supports multiple DIF separates */
+ if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
+ *value = TRUE;
+ } else {
+ *value = FALSE;
+ }
+ break;
+ case OCS_HW_DUMP_MAX_SIZE:
+ *value = hw->dump_size;
+ break;
+ case OCS_HW_DUMP_READY:
+ *value = sli_dump_is_ready(&hw->sli);
+ break;
+ case OCS_HW_DUMP_PRESENT:
+ *value = sli_dump_is_present(&hw->sli);
+ break;
+ case OCS_HW_RESET_REQUIRED:
+ tmp = sli_reset_required(&hw->sli);
+ if(tmp < 0) {
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ *value = tmp;
+ }
+ break;
+ case OCS_HW_FW_ERROR:
+ *value = sli_fw_error_status(&hw->sli);
+ break;
+ case OCS_HW_FW_READY:
+ *value = sli_fw_ready(&hw->sli);
+ break;
+ case OCS_HW_FW_TIMED_OUT:
+ *value = ocs_hw_get_fw_timed_out(hw);
+ break;
+ case OCS_HW_HIGH_LOGIN_MODE:
+ *value = sli_get_hlm_capable(&hw->sli);
+ break;
+ case OCS_HW_PREREGISTER_SGL:
+ *value = sli_get_sgl_preregister_required(&hw->sli);
+ break;
+ case OCS_HW_HW_REV1:
+ *value = sli_get_hw_revision(&hw->sli, 0);
+ break;
+ case OCS_HW_HW_REV2:
+ *value = sli_get_hw_revision(&hw->sli, 1);
+ break;
+ case OCS_HW_HW_REV3:
+ *value = sli_get_hw_revision(&hw->sli, 2);
+ break;
+ case OCS_HW_LINKCFG:
+ *value = hw->linkcfg;
+ break;
+ case OCS_HW_ETH_LICENSE:
+ *value = hw->eth_license;
+ break;
+ case OCS_HW_LINK_MODULE_TYPE:
+ *value = sli_get_link_module_type(&hw->sli);
+ break;
+ case OCS_HW_NUM_CHUTES:
+ *value = ocs_hw_get_num_chutes(hw);
+ break;
+ case OCS_HW_DISABLE_AR_TGT_DIF:
+ *value = hw->workaround.disable_ar_tgt_dif;
+ break;
+ case OCS_HW_EMULATE_I_ONLY_AAB:
+ *value = hw->config.i_only_aab;
+ break;
+ case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
+ *value = hw->config.emulate_tgt_wqe_timeout;
+ break;
+ case OCS_HW_VPD_LEN:
+ *value = sli_get_vpd_len(&hw->sli);
+ break;
+ case OCS_HW_SGL_CHAINING_CAPABLE:
+ *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
+ break;
+ case OCS_HW_SGL_CHAINING_ALLOWED:
+ /*
+ * SGL Chaining is allowed in the following cases:
+ * 1. Lancer with host SGL Lists
+ * 2. Skyhawk with pre-registered SGL Lists
+ */
+ *value = FALSE;
+ if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
+ !sli_get_sgl_preregister(&hw->sli) &&
+ SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
+ *value = TRUE;
+ }
+
+ if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
+ sli_get_sgl_preregister(&hw->sli) &&
+ ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
+ (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
+ *value = TRUE;
+ }
+ break;
+ case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
+ /* Only lancer supports host allocated SGL Chaining buffers. */
+ *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
+ (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)));
+ break;
+ case OCS_HW_SEND_FRAME_CAPABLE:
+ if (hw->workaround.ignore_send_frame) {
+ *value = 0;
+ } else {
+ /* Only lancer is capable */
+ *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
+ }
+ break;
+ case OCS_HW_RQ_SELECTION_POLICY:
+ *value = hw->config.rq_selection_policy;
+ break;
+ case OCS_HW_RR_QUANTA:
+ *value = hw->config.rr_quanta;
+ break;
+ case OCS_HW_MAX_VPORTS:
+ *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
+ default:
+ ocs_log_test(hw->os, "unsupported property %#x\n", prop);
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ return rc;
+}
+
+void *
+ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
+{
+ void *rc = NULL;
+
+ switch (prop) {
+ case OCS_HW_WWN_NODE:
+ rc = sli_get_wwn_node(&hw->sli);
+ break;
+ case OCS_HW_WWN_PORT:
+ rc = sli_get_wwn_port(&hw->sli);
+ break;
+ case OCS_HW_VPD:
+ /* make sure VPD length is non-zero */
+ if (sli_get_vpd_len(&hw->sli)) {
+ rc = sli_get_vpd(&hw->sli);
+ }
+ break;
+ case OCS_HW_FW_REV:
+ rc = sli_get_fw_name(&hw->sli, 0);
+ break;
+ case OCS_HW_FW_REV2:
+ rc = sli_get_fw_name(&hw->sli, 1);
+ break;
+ case OCS_HW_IPL:
+ rc = sli_get_ipl_name(&hw->sli);
+ break;
+ case OCS_HW_PORTNUM:
+ rc = sli_get_portnum(&hw->sli);
+ break;
+ case OCS_HW_BIOS_VERSION_STRING:
+ rc = sli_get_bios_version_string(&hw->sli);
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported property %#x\n", prop);
+ }
+
+ return rc;
+}
+
+
+
+ocs_hw_rtn_e
+ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ switch (prop) {
+ case OCS_HW_N_IO:
+ if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
+ value == 0) {
+ ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
+ value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ hw->config.n_io = value;
+ }
+ break;
+ case OCS_HW_N_SGL:
+ value += SLI4_SGE_MAX_RESERVED;
+ if (value > sli_get_max_sgl(&hw->sli)) {
+ ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
+ value, sli_get_max_sgl(&hw->sli));
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ hw->config.n_sgl = value;
+ }
+ break;
+ case OCS_HW_TOPOLOGY:
+ if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
+ (value != OCS_HW_TOPOLOGY_AUTO)) {
+ ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
+ value, sli_get_medium(&hw->sli));
+ rc = OCS_HW_RTN_ERROR;
+ break;
+ }
+
+ switch (value) {
+ case OCS_HW_TOPOLOGY_AUTO:
+ if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
+ sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
+ } else {
+ sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
+ }
+ break;
+ case OCS_HW_TOPOLOGY_NPORT:
+ sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
+ break;
+ case OCS_HW_TOPOLOGY_LOOP:
+ sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported topology %#x\n", value);
+ rc = OCS_HW_RTN_ERROR;
+ }
+ hw->config.topology = value;
+ break;
+ case OCS_HW_LINK_SPEED:
+ if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
+ switch (value) {
+ case 0: /* Auto-speed negotiation */
+ case 10000: /* FCoE speed */
+ hw->config.speed = FC_LINK_SPEED_10G;
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
+ value, sli_get_medium(&hw->sli));
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ }
+
+ switch (value) {
+ case 0: /* Auto-speed negotiation */
+ hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
+ break;
+ case 2000: /* FC speeds */
+ hw->config.speed = FC_LINK_SPEED_2G;
+ break;
+ case 4000:
+ hw->config.speed = FC_LINK_SPEED_4G;
+ break;
+ case 8000:
+ hw->config.speed = FC_LINK_SPEED_8G;
+ break;
+ case 16000:
+ hw->config.speed = FC_LINK_SPEED_16G;
+ break;
+ case 32000:
+ hw->config.speed = FC_LINK_SPEED_32G;
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported speed %d\n", value);
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_DIF_SEED:
+ /* Set the DIF seed - only for lancer right now */
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
+ ocs_log_test(hw->os, "DIF seed not supported for this device\n");
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ hw->config.dif_seed = value;
+ }
+ break;
+ case OCS_HW_DIF_MODE:
+ switch (value) {
+ case OCS_HW_DIF_MODE_INLINE:
+ /*
+ * Make sure we support inline DIF.
+ *
+ * Note: Having both bits clear means that we have old
+ * FW that doesn't set the bits.
+ */
+ if (sli_is_dif_inline_capable(&hw->sli)) {
+ hw->config.dif_mode = value;
+ } else {
+ ocs_log_test(hw->os, "chip does not support DIF inline\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_DIF_MODE_SEPARATE:
+ /* Make sure we support DIF separates. */
+ if (sli_is_dif_separate_capable(&hw->sli)) {
+ hw->config.dif_mode = value;
+ } else {
+ ocs_log_test(hw->os, "chip does not support DIF separate\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ }
+ break;
+ case OCS_HW_RQ_PROCESS_LIMIT: {
+ hw_rq_t *rq;
+ uint32_t i;
+
+ /* For each hw_rq object, set its parent CQ limit value */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ rq = hw->hw_rq[i];
+ hw->cq[rq->cq->instance].proc_limit = value;
+ }
+ break;
+ }
+ case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
+ hw->config.rq_default_buffer_size = value;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
+ hw->config.auto_xfer_rdy_xri_cnt = value;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_SIZE:
+ hw->config.auto_xfer_rdy_size = value;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
+ switch (value) {
+ case 512:
+ hw->config.auto_xfer_rdy_blk_size_chip = 0;
+ break;
+ case 1024:
+ hw->config.auto_xfer_rdy_blk_size_chip = 1;
+ break;
+ case 2048:
+ hw->config.auto_xfer_rdy_blk_size_chip = 2;
+ break;
+ case 4096:
+ hw->config.auto_xfer_rdy_blk_size_chip = 3;
+ break;
+ case 520:
+ hw->config.auto_xfer_rdy_blk_size_chip = 4;
+ break;
+ default:
+ ocs_log_err(hw->os, "Invalid block size %d\n",
+ value);
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
+ hw->config.auto_xfer_rdy_t10_enable = value;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_P_TYPE:
+ hw->config.auto_xfer_rdy_p_type = value;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
+ hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
+ hw->config.auto_xfer_rdy_app_tag_valid = value;
+ break;
+ case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
+ hw->config.auto_xfer_rdy_app_tag_value = value;
+ break;
+ case OCS_ESOC:
+ hw->config.esoc = value;
+ case OCS_HW_HIGH_LOGIN_MODE:
+ rc = sli_set_hlm(&hw->sli, value);
+ break;
+ case OCS_HW_PREREGISTER_SGL:
+ rc = sli_set_sgl_preregister(&hw->sli, value);
+ break;
+ case OCS_HW_ETH_LICENSE:
+ hw->eth_license = value;
+ break;
+ case OCS_HW_EMULATE_I_ONLY_AAB:
+ hw->config.i_only_aab = value;
+ break;
+ case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
+ hw->config.emulate_tgt_wqe_timeout = value;
+ break;
+ case OCS_HW_BOUNCE:
+ hw->config.bounce = value;
+ break;
+ case OCS_HW_RQ_SELECTION_POLICY:
+ hw->config.rq_selection_policy = value;
+ break;
+ case OCS_HW_RR_QUANTA:
+ hw->config.rr_quanta = value;
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported property %#x\n", prop);
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ return rc;
+}
+
+
+ocs_hw_rtn_e
+ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ switch (prop) {
+ case OCS_HW_WAR_VERSION:
+ hw->hw_war_version = value;
+ break;
+ case OCS_HW_FILTER_DEF: {
+ char *p = value;
+ uint32_t idx = 0;
+
+ for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
+ hw->config.filter_def[idx] = 0;
+ }
+
+ for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
+ hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
+ p = ocs_strchr(p, ',');
+ if (p != NULL) {
+ p++;
+ }
+ }
+
+ break;
+ }
+ default:
+ ocs_log_test(hw->os, "unsupported property %#x\n", prop);
+ rc = OCS_HW_RTN_ERROR;
+ break;
+ }
+ return rc;
+}
+/**
+ * @ingroup interrupt
+ * @brief Check for the events associated with the interrupt vector.
+ *
+ * @param hw Hardware context.
+ * @param vector Zero-based interrupt vector number.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
+{
+ int32_t rc = 0;
+
+ if (!hw) {
+ ocs_log_err(NULL, "HW context NULL?!?\n");
+ return -1;
+ }
+
+ if (vector > hw->eq_count) {
+ ocs_log_err(hw->os, "vector %d. max %d\n",
+ vector, hw->eq_count);
+ return -1;
+ }
+
+ /*
+ * The caller should disable interrupts if they wish to prevent us
+ * from processing during a shutdown. The following states are defined:
+ * OCS_HW_STATE_UNINITIALIZED - No queues allocated
+ * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
+ * queues are cleared.
+ * OCS_HW_STATE_ACTIVE - Chip and queues are operational
+ * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
+ * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
+ * completions.
+ */
+ if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
+ rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
+
+ /* Re-arm queue if there are no entries */
+ if (rc != 0) {
+ sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
+ }
+ }
+ return rc;
+}
+
+void
+ocs_hw_unsol_process_bounce(void *arg)
+{
+ ocs_hw_sequence_t *seq = arg;
+ ocs_hw_t *hw = seq->hw;
+
+ ocs_hw_assert(hw != NULL);
+ ocs_hw_assert(hw->callback.unsolicited != NULL);
+
+ hw->callback.unsolicited(hw->args.unsolicited, seq);
+}
+
+int32_t
+ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
+{
+ hw_eq_t *eq;
+ int32_t rc = 0;
+
+ CPUTRACE("");
+
+ /*
+ * The caller should disable interrupts if they wish to prevent us
+ * from processing during a shutdown. The following states are defined:
+ * OCS_HW_STATE_UNINITIALIZED - No queues allocated
+ * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
+ * queues are cleared.
+ * OCS_HW_STATE_ACTIVE - Chip and queues are operational
+ * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
+ * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
+ * completions.
+ */
+ if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
+ return 0;
+ }
+
+ /* Get pointer to hw_eq_t */
+ eq = hw->hw_eq[vector];
+
+ OCS_STAT(eq->use_count++);
+
+ rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
+
+ return rc;
+}
+
+/**
+ * @ingroup interrupt
+ * @brief Process events associated with an EQ.
+ *
+ * @par Description
+ * Loop termination:
+ * @n @n Without a mechanism to terminate the completion processing loop, it
+ * is possible under some workload conditions for the loop to never terminate
+ * (or at least take longer than the OS is happy to have an interrupt handler
+ * or kernel thread context hold a CPU without yielding).
+ * @n @n The approach taken here is to periodically check how much time
+ * we have been in this
+ * processing loop, and if we exceed a predetermined time (multiple seconds), the
+ * loop is terminated, and ocs_hw_process() returns.
+ *
+ * @param hw Hardware context.
+ * @param eq Pointer to HW EQ object.
+ * @param max_isr_time_msec Maximum time in msec to stay in this function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
+{
+ uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 };
+ uint32_t done = FALSE;
+ uint32_t tcheck_count;
+ time_t tstart;
+ time_t telapsed;
+
+ tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
+ tstart = ocs_msectime();
+
+ CPUTRACE("");
+
+ while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
+ uint16_t cq_id = 0;
+ int32_t rc;
+
+ rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
+ if (unlikely(rc)) {
+ if (rc > 0) {
+ uint32_t i;
+
+ /*
+ * Received a sentinel EQE indicating the EQ is full.
+ * Process all CQs
+ */
+ for (i = 0; i < hw->cq_count; i++) {
+ ocs_hw_cq_process(hw, hw->hw_cq[i]);
+ }
+ continue;
+ } else {
+ return rc;
+ }
+ } else {
+ int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
+ if (likely(index >= 0)) {
+ ocs_hw_cq_process(hw, hw->hw_cq[index]);
+ } else {
+ ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
+ }
+ }
+
+
+ if (eq->queue->n_posted > (eq->queue->posted_limit)) {
+ sli_queue_arm(&hw->sli, eq->queue, FALSE);
+ }
+
+ if (tcheck_count && (--tcheck_count == 0)) {
+ tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
+ telapsed = ocs_msectime() - tstart;
+ if (telapsed >= max_isr_time_msec) {
+ done = TRUE;
+ }
+ }
+ }
+ sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
+
+ return 0;
+}
+
+/**
+ * @brief Submit queued (pending) mbx commands.
+ *
+ * @par Description
+ * Submit queued mailbox commands.
+ * --- Assumes that hw->cmd_lock is held ---
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+static int32_t
+ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
+{
+ ocs_command_ctx_t *ctx;
+ int32_t rc = 0;
+
+ /* Assumes lock held */
+
+ /* Only submit MQE if there's room */
+ while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
+ ctx = ocs_list_remove_head(&hw->cmd_pending);
+ if (ctx == NULL) {
+ break;
+ }
+ ocs_list_add_tail(&hw->cmd_head, ctx);
+ hw->cmd_head_count++;
+ if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
+ ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
+ rc = -1;
+ break;
+ }
+ }
+ return rc;
+}
+
+/**
+ * @ingroup io
+ * @brief Issue a SLI command.
+ *
+ * @par Description
+ * Send a mailbox command to the hardware, and either wait for a completion
+ * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
+ *
+ * @param hw Hardware context.
+ * @param cmd Buffer containing a formatted command and results.
+ * @param opts Command options:
+ * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
+ * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
+ * @param cb Function callback used for asynchronous mode. May be NULL.
+ * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
+ * @n @n @b Note: If the
+ * callback function pointer is NULL, the results of the command are silently
+ * discarded, allowing this pointer to exist solely on the stack.
+ * @param arg Argument passed to an asynchronous callback.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+
+ /*
+ * If the chip is in an error state (UE'd) then reject this mailbox
+ * command.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
+ uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
+ if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
+ hw->expiration_logged = 1;
+ ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
+ hw->watchdog_timeout);
+ }
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
+ err1, err2);
+
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (OCS_CMD_POLL == opts) {
+
+ ocs_lock(&hw->cmd_lock);
+ if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
+ /*
+ * Can't issue Boot-strap mailbox command with other
+ * mail-queue commands pending as this interaction is
+ * undefined
+ */
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ void *bmbx = hw->sli.bmbx.virt;
+
+ ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
+ ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
+
+ if (sli_bmbx_command(&hw->sli) == 0) {
+ rc = OCS_HW_RTN_SUCCESS;
+ ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
+ }
+ }
+ ocs_unlock(&hw->cmd_lock);
+ } else if (OCS_CMD_NOWAIT == opts) {
+ ocs_command_ctx_t *ctx = NULL;
+
+ ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (!ctx) {
+ ocs_log_err(hw->os, "can't allocate command context\n");
+ return OCS_HW_RTN_NO_RESOURCES;
+ }
+
+ if (hw->state != OCS_HW_STATE_ACTIVE) {
+ ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
+ ocs_free(hw->os, ctx, sizeof(*ctx));
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (cb) {
+ ctx->cb = cb;
+ ctx->arg = arg;
+ }
+ ctx->buf = cmd;
+ ctx->ctx = hw;
+
+ ocs_lock(&hw->cmd_lock);
+
+ /* Add to pending list */
+ ocs_list_add_tail(&hw->cmd_pending, ctx);
+
+ /* Submit as much of the pending list as we can */
+ if (ocs_hw_cmd_submit_pending(hw) == 0) {
+ rc = OCS_HW_RTN_SUCCESS;
+ }
+
+ ocs_unlock(&hw->cmd_lock);
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Register a callback for the given event.
+ *
+ * @param hw Hardware context.
+ * @param which Event of interest.
+ * @param func Function to call when the event occurs.
+ * @param arg Argument passed to the callback function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
+{
+
+ if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
+ ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
+ hw, which, func);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ switch (which) {
+ case OCS_HW_CB_DOMAIN:
+ hw->callback.domain = func;
+ hw->args.domain = arg;
+ break;
+ case OCS_HW_CB_PORT:
+ hw->callback.port = func;
+ hw->args.port = arg;
+ break;
+ case OCS_HW_CB_UNSOLICITED:
+ hw->callback.unsolicited = func;
+ hw->args.unsolicited = arg;
+ break;
+ case OCS_HW_CB_REMOTE_NODE:
+ hw->callback.rnode = func;
+ hw->args.rnode = arg;
+ break;
+ case OCS_HW_CB_BOUNCE:
+ hw->callback.bounce = func;
+ hw->args.bounce = arg;
+ break;
+ default:
+ ocs_log_test(hw->os, "unknown callback %#x\n", which);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup port
+ * @brief Allocate a port object.
+ *
+ * @par Description
+ * This function allocates a VPI object for the port and stores it in the
+ * indicator field of the port object.
+ *
+ * @param hw Hardware context.
+ * @param sport SLI port object used to connect to the domain.
+ * @param domain Domain object associated with this port (may be NULL).
+ * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
+ uint8_t *wwpn)
+{
+ uint8_t *cmd = NULL;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint32_t index;
+
+ sport->indicator = UINT32_MAX;
+ sport->hw = hw;
+ sport->ctx.app = sport;
+ sport->sm_free_req_pending = 0;
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (wwpn) {
+ ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
+ }
+
+ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
+ ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (domain != NULL) {
+ ocs_sm_function_t next = NULL;
+
+ cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (!cmd) {
+ ocs_log_err(hw->os, "command memory allocation failed\n");
+ rc = OCS_HW_RTN_NO_MEMORY;
+ goto ocs_hw_port_alloc_out;
+ }
+
+ /* If the WWPN is NULL, fetch the default WWPN and WWNN before
+ * initializing the VPI
+ */
+ if (!wwpn) {
+ next = __ocs_hw_port_alloc_read_sparm64;
+ } else {
+ next = __ocs_hw_port_alloc_init_vpi;
+ }
+
+ ocs_sm_transition(&sport->ctx, next, cmd);
+ } else if (!wwpn) {
+ /* This is the convention for the HW, not SLI */
+ ocs_log_test(hw->os, "need WWN for physical port\n");
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ /* domain NULL and wwpn non-NULL */
+ ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
+ }
+
+ocs_hw_port_alloc_out:
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
+
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup port
+ * @brief Attach a physical/virtual SLI port to a domain.
+ *
+ * @par Description
+ * This function registers a previously-allocated VPI with the
+ * device.
+ *
+ * @param hw Hardware context.
+ * @param sport Pointer to the SLI port object.
+ * @param fc_id Fibre Channel ID to associate with this port.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
+{
+ uint8_t *buf = NULL;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ if (!hw || !sport) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter(s) hw=%p sport=%p\n", hw,
+ sport);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (!buf) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ sport->fc_id = fc_id;
+ ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
+ return rc;
+}
+
+/**
+ * @brief Called when the port control command completes.
+ *
+ * @par Description
+ * We only need to free the mailbox command buffer.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ return 0;
+}
+
+/**
+ * @ingroup port
+ * @brief Control a port (initialize, shutdown, or set link configuration).
+ *
+ * @par Description
+ * This function controls a port depending on the @c ctrl parameter:
+ * - @b OCS_HW_PORT_INIT -
+ * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
+ * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
+ * .
+ * - @b OCS_HW_PORT_SHUTDOWN -
+ * Issues the DOWN_LINK command for the specified port.
+ * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
+ * .
+ * - @b OCS_HW_PORT_SET_LINK_CONFIG -
+ * Sets the link configuration.
+ *
+ * @param hw Hardware context.
+ * @param ctrl Specifies the operation:
+ * - OCS_HW_PORT_INIT
+ * - OCS_HW_PORT_SHUTDOWN
+ * - OCS_HW_PORT_SET_LINK_CONFIG
+ *
+ * @param value Operation-specific value.
+ * - OCS_HW_PORT_INIT - Selective reset AL_PA
+ * - OCS_HW_PORT_SHUTDOWN - N/A
+ * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
+ *
+ * @param cb Callback function to invoke the following operation.
+ * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
+ * are handled by the OCS_HW_CB_DOMAIN callbacks).
+ * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
+ * completes.
+ *
+ * @param arg Callback argument invoked after the command completes.
+ * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
+ * are handled by the OCS_HW_CB_DOMAIN callbacks).
+ * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
+ * completes.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+
+ switch (ctrl) {
+ case OCS_HW_PORT_INIT:
+ {
+ uint8_t *init_link;
+ uint32_t speed = 0;
+ uint8_t reset_alpa = 0;
+
+ if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
+ uint8_t *cfg_link;
+
+ cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (cfg_link == NULL) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
+ rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
+ ocs_hw_cb_port_control, NULL);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
+ ocs_log_err(hw->os, "CONFIG_LINK failed\n");
+ break;
+ }
+ speed = hw->config.speed;
+ reset_alpa = (uint8_t)(value & 0xff);
+ } else {
+ speed = FC_LINK_SPEED_10G;
+ }
+
+ /*
+ * Bring link up, unless FW version is not supported
+ */
+ if (hw->workaround.fw_version_too_low) {
+ if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
+ ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
+ OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
+ } else {
+ ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
+ OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
+ }
+
+ return OCS_HW_RTN_ERROR;
+ }
+
+ rc = OCS_HW_RTN_ERROR;
+
+ /* Allocate a new buffer for the init_link command */
+ init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (init_link == NULL) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+
+ if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
+ rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
+ ocs_hw_cb_port_control, NULL);
+ }
+ /* Free buffer on error, since no callback is coming */
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
+ ocs_log_err(hw->os, "INIT_LINK failed\n");
+ }
+ break;
+ }
+ case OCS_HW_PORT_SHUTDOWN:
+ {
+ uint8_t *down_link;
+
+ down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (down_link == NULL) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
+ rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
+ ocs_hw_cb_port_control, NULL);
+ }
+ /* Free buffer on error, since no callback is coming */
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
+ ocs_log_err(hw->os, "DOWN_LINK failed\n");
+ }
+ break;
+ }
+ case OCS_HW_PORT_SET_LINK_CONFIG:
+ rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
+ break;
+ default:
+ ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
+ break;
+ }
+
+ return rc;
+}
+
+
+/**
+ * @ingroup port
+ * @brief Free port resources.
+ *
+ * @par Description
+ * Issue the UNREG_VPI command to free the assigned VPI context.
+ *
+ * @param hw Hardware context.
+ * @param sport SLI port object used to connect to the domain.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ if (!hw || !sport) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter(s) hw=%p sport=%p\n", hw,
+ sport);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
+ return rc;
+}
+
+/**
+ * @ingroup domain
+ * @brief Allocate a fabric domain object.
+ *
+ * @par Description
+ * This function starts a series of commands needed to connect to the domain, including
+ * - REG_FCFI
+ * - INIT_VFI
+ * - READ_SPARMS
+ * .
+ * @b Note: Not all SLI interface types use all of the above commands.
+ * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
+ * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
+ *
+ * @param hw Hardware context.
+ * @param domain Pointer to the domain object.
+ * @param fcf FCF index.
+ * @param vlan VLAN ID.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
+{
+ uint8_t *cmd = NULL;
+ uint32_t index;
+
+ if (!hw || !domain || !domain->sport) {
+ ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
+ hw, domain, domain ? domain->sport : NULL);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (!cmd) {
+ ocs_log_err(hw->os, "command memory allocation failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ domain->dma = hw->domain_dmem;
+
+ domain->hw = hw;
+ domain->sm.app = domain;
+ domain->fcf = fcf;
+ domain->fcf_indicator = UINT32_MAX;
+ domain->vlan_id = vlan;
+ domain->indicator = UINT32_MAX;
+
+ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
+ ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
+
+ ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
+
+ return OCS_HW_RTN_ERROR;
+ }
+
+ ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup domain
+ * @brief Attach a SLI port to a domain.
+ *
+ * @param hw Hardware context.
+ * @param domain Pointer to the domain object.
+ * @param fc_id Fibre Channel ID to associate with this port.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
+{
+ uint8_t *buf = NULL;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ if (!hw || !domain) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter(s) hw=%p domain=%p\n",
+ hw, domain);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (!buf) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ domain->sport->fc_id = fc_id;
+ ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
+ return rc;
+}
+
+/**
+ * @ingroup domain
+ * @brief Free a fabric domain object.
+ *
+ * @par Description
+ * Free both the driver and SLI port resources associated with the domain.
+ *
+ * @param hw Hardware context.
+ * @param domain Pointer to the domain object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ if (!hw || !domain) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter(s) hw=%p domain=%p\n",
+ hw, domain);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
+ return rc;
+}
+
+/**
+ * @ingroup domain
+ * @brief Free a fabric domain object.
+ *
+ * @par Description
+ * Free the driver resources associated with the domain. The difference between
+ * this call and ocs_hw_domain_free() is that this call assumes resources no longer
+ * exist on the SLI port, due to a reset or after some error conditions.
+ *
+ * @param hw Hardware context.
+ * @param domain Pointer to the domain object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
+{
+ if (!hw || !domain) {
+ ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup node
+ * @brief Allocate a remote node object.
+ *
+ * @param hw Hardware context.
+ * @param rnode Allocated remote node object to initialize.
+ * @param fc_addr FC address of the remote node.
+ * @param sport SLI port used to connect to remote node.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
+ ocs_sli_port_t *sport)
+{
+ /* Check for invalid indicator */
+ if (UINT32_MAX != rnode->indicator) {
+ ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
+ fc_addr, rnode->indicator);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* NULL SLI port indicates an unallocated remote node */
+ rnode->sport = NULL;
+
+ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
+ ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
+ fc_addr);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ rnode->fc_id = fc_addr;
+ rnode->sport = sport;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup node
+ * @brief Update a remote node object with the remote port's service parameters.
+ *
+ * @param hw Hardware context.
+ * @param rnode Allocated remote node object to initialize.
+ * @param sparms DMA buffer containing the remote port's service parameters.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ uint8_t *buf = NULL;
+ uint32_t count = 0;
+
+ if (!hw || !rnode || !sparms) {
+ ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
+ hw, rnode, sparms);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (!buf) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /*
+ * If the attach count is non-zero, this RPI has already been registered.
+ * Otherwise, register the RPI
+ */
+ if (rnode->index == UINT32_MAX) {
+ ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
+ ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_ERROR;
+ }
+ count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
+ if (count) {
+ /*
+ * Can't attach multiple FC_ID's to a node unless High Login
+ * Mode is enabled
+ */
+ if (sli_get_hlm(&hw->sli) == FALSE) {
+ ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
+ sli_get_hlm(&hw->sli), count);
+ rc = OCS_HW_RTN_SUCCESS;
+ } else {
+ rnode->node_group = TRUE;
+ rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
+ rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
+ }
+ } else {
+ rnode->node_group = FALSE;
+
+ ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
+ if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
+ rnode->indicator, rnode->sport->indicator,
+ sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
+ ocs_hw_cb_node_attach, rnode);
+ }
+ }
+
+ if (count || rc) {
+ if (rc < OCS_HW_RTN_SUCCESS) {
+ ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
+ ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
+ }
+ ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup node
+ * @brief Free a remote node resource.
+ *
+ * @param hw Hardware context.
+ * @param rnode Remote node object to free.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ if (!hw || !rnode) {
+ ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
+ hw, rnode);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (rnode->sport) {
+ if (!rnode->attached) {
+ if (rnode->indicator != UINT32_MAX) {
+ if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
+ ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
+ rnode->indicator, rnode->fc_id);
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ rnode->node_group = FALSE;
+ rnode->indicator = UINT32_MAX;
+ rnode->index = UINT32_MAX;
+ rnode->free_group = FALSE;
+ }
+ }
+ } else {
+ ocs_log_err(hw->os, "Error: rnode is still attached\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * @ingroup node
+ * @brief Free a remote node object.
+ *
+ * @param hw Hardware context.
+ * @param rnode Remote node object to free.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
+{
+ uint8_t *buf = NULL;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC;
+ uint32_t index = UINT32_MAX;
+
+ if (!hw || !rnode) {
+ ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
+ hw, rnode);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ index = rnode->index;
+
+ if (rnode->sport) {
+ uint32_t count = 0;
+ uint32_t fc_id;
+
+ if (!rnode->attached) {
+ return OCS_HW_RTN_SUCCESS_SYNC;
+ }
+
+ buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (!buf) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
+
+ if (count <= 1) {
+ /* There are no other references to this RPI
+ * so unregister it and free the resource. */
+ fc_id = UINT32_MAX;
+ rnode->node_group = FALSE;
+ rnode->free_group = TRUE;
+ } else {
+ if (sli_get_hlm(&hw->sli) == FALSE) {
+ ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
+ count);
+ }
+ fc_id = rnode->fc_id & 0x00ffffff;
+ }
+
+ rc = OCS_HW_RTN_ERROR;
+
+ if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
+ SLI_RSRC_FCOE_RPI, fc_id)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "UNREG_RPI failed\n");
+ ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
+ rc = OCS_HW_RTN_ERROR;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup node
+ * @brief Free all remote node objects.
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_node_free_all(ocs_hw_t *hw)
+{
+ uint8_t *buf = NULL;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+
+ if (!hw) {
+ ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Check if the chip is in an error state (UE'd) before proceeding.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (!buf) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
+ SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
+ NULL);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "UNREG_RPI failed\n");
+ ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ return rc;
+}
+
+ocs_hw_rtn_e
+ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
+{
+
+ if (!hw || !ngroup) {
+ ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
+ hw, ngroup);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
+ &ngroup->index)) {
+ ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
+ ngroup->indicator);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+ocs_hw_rtn_e
+ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
+{
+
+ if (!hw || !ngroup || !rnode) {
+ ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
+ hw, ngroup, rnode);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (rnode->attached) {
+ ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
+ rnode->indicator, rnode->fc_id);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
+ ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
+ rnode->indicator);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ rnode->indicator = ngroup->indicator;
+ rnode->index = ngroup->index;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+ocs_hw_rtn_e
+ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
+{
+ int ref;
+
+ if (!hw || !ngroup) {
+ ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
+ hw, ngroup);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
+ if (ref) {
+ /* Hmmm, the reference count is non-zero */
+ ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
+ ref, ngroup->indicator);
+
+ if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
+ ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
+ ngroup->indicator);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
+ }
+
+ ngroup->indicator = UINT32_MAX;
+ ngroup->index = UINT32_MAX;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @brief Initialize IO fields on each free call.
+ *
+ * @n @b Note: This is done on each free call (as opposed to each
+ * alloc call) because port-owned XRIs are not
+ * allocated with ocs_hw_io_alloc() but are freed with this
+ * function.
+ *
+ * @param io Pointer to HW IO.
+ */
+static inline void
+ocs_hw_init_free_io(ocs_hw_io_t *io)
+{
+ /*
+ * Set io->done to NULL, to avoid any callbacks, should
+ * a completion be received for one of these IOs
+ */
+ io->done = NULL;
+ io->abort_done = NULL;
+ io->status_saved = 0;
+ io->abort_in_progress = FALSE;
+ io->port_owned_abort_count = 0;
+ io->rnode = NULL;
+ io->type = 0xFFFF;
+ io->wq = NULL;
+ io->ul_io = NULL;
+ io->tgt_wqe_timeout = 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Lockless allocate a HW IO object.
+ *
+ * @par Description
+ * Assume that hw->ocs_lock is held. This function is only used if
+ * use_dif_sec_xri workaround is being used.
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns a pointer to an object on success, or NULL on failure.
+ */
+static inline ocs_hw_io_t *
+_ocs_hw_io_alloc(ocs_hw_t *hw)
+{
+ ocs_hw_io_t *io = NULL;
+
+ if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
+ ocs_list_add_tail(&hw->io_inuse, io);
+ io->state = OCS_HW_IO_STATE_INUSE;
+ io->quarantine = FALSE;
+ io->quarantine_first_phase = TRUE;
+ io->abort_reqtag = UINT32_MAX;
+ ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
+ } else {
+ ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
+ }
+
+ return io;
+}
+/**
+ * @ingroup io
+ * @brief Allocate a HW IO object.
+ *
+ * @par Description
+ * @n @b Note: This function applies to non-port owned XRIs
+ * only.
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns a pointer to an object on success, or NULL on failure.
+ */
+ocs_hw_io_t *
+ocs_hw_io_alloc(ocs_hw_t *hw)
+{
+ ocs_hw_io_t *io = NULL;
+
+ ocs_lock(&hw->io_lock);
+ io = _ocs_hw_io_alloc(hw);
+ ocs_unlock(&hw->io_lock);
+
+ return io;
+}
+
+/**
+ * @ingroup io
+ * @brief Allocate/Activate a port owned HW IO object.
+ *
+ * @par Description
+ * This function is called by the transport layer when an XRI is
+ * allocated by the SLI-Port. This will "activate" the HW IO
+ * associated with the XRI received from the SLI-Port to mirror
+ * the state of the XRI.
+ * @n @n @b Note: This function applies to port owned XRIs only.
+ *
+ * @param hw Hardware context.
+ * @param io Pointer HW IO to activate/allocate.
+ *
+ * @return Returns a pointer to an object on success, or NULL on failure.
+ */
+ocs_hw_io_t *
+ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ if (ocs_ref_read_count(&io->ref) > 0) {
+ ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
+ return NULL;
+ }
+
+ if (io->wq != NULL) {
+ ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
+ return NULL;
+ }
+
+ ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
+ io->xbusy = TRUE;
+
+ return io;
+}
+
+/**
+ * @ingroup io
+ * @brief When an IO is freed, depending on the exchange busy flag, and other
+ * workarounds, move it to the correct list.
+ *
+ * @par Description
+ * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
+ * from the busy or wait_free list.
+ *
+ * @param hw Hardware context.
+ * @param io Pointer to the IO object to move.
+ */
+static void
+ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ if (io->xbusy) {
+ /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
+ ocs_list_add_tail(&hw->io_wait_free, io);
+ io->state = OCS_HW_IO_STATE_WAIT_FREE;
+ } else {
+ /* IO not busy, add to free list */
+ ocs_list_add_tail(&hw->io_free, io);
+ io->state = OCS_HW_IO_STATE_FREE;
+ }
+
+ /* BZ 161832 workaround */
+ if (hw->workaround.use_dif_sec_xri) {
+ ocs_hw_check_sec_hio_list(hw);
+ }
+}
+
+/**
+ * @ingroup io
+ * @brief Free a HW IO object. Perform cleanup common to
+ * port and host-owned IOs.
+ *
+ * @param hw Hardware context.
+ * @param io Pointer to the HW IO object.
+ */
+static inline void
+ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ /* initialize IO fields */
+ ocs_hw_init_free_io(io);
+
+ /* Restore default SGL */
+ ocs_hw_io_restore_sgl(hw, io);
+}
+
+/**
+ * @ingroup io
+ * @brief Free a HW IO object associated with a port-owned XRI.
+ *
+ * @param arg Pointer to the HW IO object.
+ */
+static void
+ocs_hw_io_free_port_owned(void *arg)
+{
+ ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
+ ocs_hw_t *hw = io->hw;
+
+ /*
+ * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
+ * waiting for buffers.
+ */
+ if (io->auto_xfer_rdy_dnrx) {
+ ocs_lock(&hw->io_lock);
+ /* take a reference count because we still own the IO until the buffer is posted */
+ ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
+ ocs_list_add_tail(&hw->io_port_dnrx, io);
+ ocs_unlock(&hw->io_lock);
+ }
+
+ /* perform common cleanup */
+ ocs_hw_io_free_common(hw, io);
+}
+
+/**
+ * @ingroup io
+ * @brief Free a previously-allocated HW IO object. Called when
+ * IO refcount goes to zero (host-owned IOs only).
+ *
+ * @param arg Pointer to the HW IO object.
+ */
+static void
+ocs_hw_io_free_internal(void *arg)
+{
+ ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
+ ocs_hw_t *hw = io->hw;
+
+ /* perform common cleanup */
+ ocs_hw_io_free_common(hw, io);
+
+ ocs_lock(&hw->io_lock);
+ /* remove from in-use list */
+ ocs_list_remove(&hw->io_inuse, io);
+ ocs_hw_io_free_move_correct_list(hw, io);
+ ocs_unlock(&hw->io_lock);
+}
+
+/**
+ * @ingroup io
+ * @brief Free a previously-allocated HW IO object.
+ *
+ * @par Description
+ * @n @b Note: This function applies to port and host owned XRIs.
+ *
+ * @param hw Hardware context.
+ * @param io Pointer to the HW IO object.
+ *
+ * @return Returns a non-zero value if HW IO was freed, 0 if references
+ * on the IO still exist, or a negative value if an error occurred.
+ */
+int32_t
+ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ /* just put refcount */
+ if (ocs_ref_read_count(&io->ref) <= 0) {
+ ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
+ io->indicator, io->reqtag);
+ return -1;
+ }
+
+ return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
+}
+
+/**
+ * @ingroup io
+ * @brief Check if given HW IO is in-use
+ *
+ * @par Description
+ * This function returns TRUE if the given HW IO has been
+ * allocated and is in-use, and FALSE otherwise. It applies to
+ * port and host owned XRIs.
+ *
+ * @param hw Hardware context.
+ * @param io Pointer to the HW IO object.
+ *
+ * @return TRUE if an IO is in use, or FALSE otherwise.
+ */
+uint8_t
+ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ return (ocs_ref_read_count(&io->ref) > 0);
+}
+
+/**
+ * @brief Write a HW IO to a work queue.
+ *
+ * @par Description
+ * A HW IO is written to a work queue.
+ *
+ * @param wq Pointer to work queue.
+ * @param wqe Pointer to WQ entry.
+ *
+ * @n @b Note: Assumes the SLI-4 queue lock is held.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+static int32_t
+_hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
+{
+ int32_t rc;
+ int32_t queue_rc;
+
+ /* Every so often, set the wqec bit to generate comsummed completions */
+ if (wq->wqec_count) {
+ wq->wqec_count--;
+ }
+ if (wq->wqec_count == 0) {
+ sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
+ genwqe->wqec = 1;
+ wq->wqec_count = wq->wqec_set_count;
+ }
+
+ /* Decrement WQ free count */
+ wq->free_count--;
+
+ queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
+
+ if (queue_rc < 0) {
+ rc = -1;
+ } else {
+ rc = 0;
+ ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Write a HW IO to a work queue.
+ *
+ * @par Description
+ * A HW IO is written to a work queue.
+ *
+ * @param wq Pointer to work queue.
+ * @param wqe Pointer to WQE entry.
+ *
+ * @n @b Note: Takes the SLI-4 queue lock.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
+{
+ int32_t rc = 0;
+
+ sli_queue_lock(wq->queue);
+ if ( ! ocs_list_empty(&wq->pending_list)) {
+ ocs_list_add_tail(&wq->pending_list, wqe);
+ OCS_STAT(wq->wq_pending_count++;)
+ while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
+ rc = _hw_wq_write(wq, wqe);
+ if (rc < 0) {
+ break;
+ }
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = 0;
+ sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
+ wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
+ ocs_list_add_tail(&wq->pending_list, wqe);
+ OCS_STAT(wq->wq_pending_count++;)
+ }
+ }
+ } else {
+ if (wq->free_count > 0) {
+ rc = _hw_wq_write(wq, wqe);
+ } else {
+ ocs_list_add_tail(&wq->pending_list, wqe);
+ OCS_STAT(wq->wq_pending_count++;)
+ }
+ }
+
+ sli_queue_unlock(wq->queue);
+
+ return rc;
+
+}
+
+/**
+ * @brief Update free count and submit any pending HW IOs
+ *
+ * @par Description
+ * The WQ free count is updated, and any pending HW IOs are submitted that
+ * will fit in the queue.
+ *
+ * @param wq Pointer to work queue.
+ * @param update_free_count Value added to WQs free count.
+ *
+ * @return None.
+ */
+static void
+hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
+{
+ ocs_hw_wqe_t *wqe;
+
+ sli_queue_lock(wq->queue);
+
+ /* Update free count with value passed in */
+ wq->free_count += update_free_count;
+
+ while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
+ _hw_wq_write(wq, wqe);
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = 0;
+ sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
+ wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
+ ocs_list_add_tail(&wq->pending_list, wqe);
+ OCS_STAT(wq->wq_pending_count++;)
+ }
+ }
+
+ sli_queue_unlock(wq->queue);
+}
+
+/**
+ * @brief Check to see if there are any BZ 161832 workaround waiting IOs
+ *
+ * @par Description
+ * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
+ * to allocate a secondary HW io, and dispatch it.
+ *
+ * @n @b Note: hw->io_lock MUST be taken when called.
+ *
+ * @param hw pointer to HW object
+ *
+ * @return none
+ */
+static void
+ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
+{
+ ocs_hw_io_t *io;
+ ocs_hw_io_t *sec_io;
+ int rc = 0;
+
+ while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
+ uint16_t flags;
+
+ sec_io = _ocs_hw_io_alloc(hw);
+ if (sec_io == NULL) {
+ break;
+ }
+
+ io = ocs_list_remove_head(&hw->sec_hio_wait_list);
+ ocs_list_add_tail(&hw->io_inuse, io);
+ io->state = OCS_HW_IO_STATE_INUSE;
+ io->sec_hio = sec_io;
+
+ /* mark secondary XRI for second and subsequent data phase as quarantine */
+ if (io->xbusy) {
+ sec_io->quarantine = TRUE;
+ }
+
+ flags = io->sec_iparam.fcp_tgt.flags;
+ if (io->xbusy) {
+ flags |= SLI4_IO_CONTINUATION;
+ } else {
+ flags &= ~SLI4_IO_CONTINUATION;
+ }
+
+ io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
+
+ /* Complete (continue) TRECV IO */
+ if (io->xbusy) {
+ if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
+ io->first_data_sge,
+ io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
+ io->reqtag, SLI4_CQ_DEFAULT,
+ io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
+ flags,
+ io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
+ ocs_log_test(hw->os, "TRECEIVE WQE error\n");
+ break;
+ }
+ } else {
+ if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
+ io->first_data_sge,
+ io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
+ io->reqtag, SLI4_CQ_DEFAULT,
+ io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
+ flags,
+ io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
+ io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
+ ocs_log_test(hw->os, "TRECEIVE WQE error\n");
+ break;
+ }
+ }
+
+ if (io->wq == NULL) {
+ io->wq = ocs_hw_queue_next_wq(hw, io);
+ ocs_hw_assert(io->wq != NULL);
+ }
+ io->xbusy = TRUE;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ ocs_hw_add_io_timed_wqe(hw, io);
+ rc = hw_wq_write(io->wq, &io->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
+ io->xbusy = FALSE;
+ ocs_hw_remove_io_timed_wqe(hw, io);
+ }
+ }
+}
+
+/**
+ * @ingroup io
+ * @brief Send a Single Request/Response Sequence (SRRS).
+ *
+ * @par Description
+ * This routine supports communication sequences consisting of a single
+ * request and single response between two endpoints. Examples include:
+ * - Sending an ELS request.
+ * - Sending an ELS response - To send an ELS reponse, the caller must provide
+ * the OX_ID from the received request.
+ * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
+ * the caller must provide the R_CTL, TYPE, and DF_CTL
+ * values to place in the FC frame header.
+ * .
+ * @n @b Note: The caller is expected to provide both send and receive
+ * buffers for requests. In the case of sending a response, no receive buffer
+ * is necessary and the caller may pass in a NULL pointer.
+ *
+ * @param hw Hardware context.
+ * @param type Type of sequence (ELS request/response, FC-CT).
+ * @param io Previously-allocated HW IO object.
+ * @param send DMA memory holding data to send (for example, ELS request, BLS response).
+ * @param len Length, in bytes, of data to send.
+ * @param receive Optional DMA memory to hold a response.
+ * @param rnode Destination of data (that is, a remote node).
+ * @param iparam IO parameters (ELS response and FC-CT).
+ * @param cb Function call upon completion of sending the data (may be NULL).
+ * @param arg Argument to pass to IO completion function.
+ *
+ * @return Returns 0 on success, or a non-zero on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
+ ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
+ ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
+ ocs_hw_srrs_cb_t cb, void *arg)
+{
+ sli4_sge_t *sge = NULL;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint16_t local_flags = 0;
+
+ if (!hw || !io || !rnode || !iparam) {
+ ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
+ hw, io, send, receive, rnode, iparam);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->state != OCS_HW_STATE_ACTIVE) {
+ ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
+ /* We must set the XC bit for port owned XRIs */
+ local_flags |= SLI4_IO_CONTINUATION;
+ }
+ io->rnode = rnode;
+ io->type = type;
+ io->done = cb;
+ io->arg = arg;
+
+ sge = io->sgl->virt;
+
+ /* clear both SGE */
+ ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
+
+ if (send) {
+ sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
+ sge[0].buffer_address_low = ocs_addr32_lo(send->phys);
+ sge[0].sge_type = SLI4_SGE_TYPE_DATA;
+ sge[0].buffer_length = len;
+ }
+
+ if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
+ sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
+ sge[1].buffer_address_low = ocs_addr32_lo(receive->phys);
+ sge[1].sge_type = SLI4_SGE_TYPE_DATA;
+ sge[1].buffer_length = receive->size;
+ sge[1].last = TRUE;
+ } else {
+ sge[0].last = TRUE;
+ }
+
+ switch (type) {
+ case OCS_HW_ELS_REQ:
+ if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
+ *((uint8_t *)(send->virt)), /* req_type */
+ len, receive->size,
+ iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
+ ocs_log_err(hw->os, "REQ WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_ELS_RSP:
+ if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
+ io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
+ iparam->els.ox_id,
+ rnode, local_flags, UINT32_MAX)) {
+ ocs_log_err(hw->os, "RSP WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_ELS_RSP_SID:
+ if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
+ io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
+ iparam->els_sid.ox_id,
+ rnode, local_flags, iparam->els_sid.s_id)) {
+ ocs_log_err(hw->os, "RSP (SID) WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_FC_CT:
+ if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
+ receive->size, iparam->fc_ct.timeout, io->indicator,
+ io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
+ iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
+ ocs_log_err(hw->os, "GEN WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_FC_CT_RSP:
+ if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
+ iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
+ io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
+ iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
+ ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_BLS_ACC:
+ case OCS_HW_BLS_RJT:
+ {
+ sli_bls_payload_t bls;
+
+ if (OCS_HW_BLS_ACC == type) {
+ bls.type = SLI_BLS_ACC;
+ ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
+ } else {
+ bls.type = SLI_BLS_RJT;
+ ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
+ }
+
+ bls.ox_id = iparam->bls.ox_id;
+ bls.rx_id = iparam->bls.rx_id;
+
+ if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
+ io->indicator, io->reqtag,
+ SLI4_CQ_DEFAULT,
+ rnode, UINT32_MAX)) {
+ ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ }
+ case OCS_HW_BLS_ACC_SID:
+ {
+ sli_bls_payload_t bls;
+
+ bls.type = SLI_BLS_ACC;
+ ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
+
+ bls.ox_id = iparam->bls_sid.ox_id;
+ bls.rx_id = iparam->bls_sid.rx_id;
+
+ if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
+ io->indicator, io->reqtag,
+ SLI4_CQ_DEFAULT,
+ rnode, iparam->bls_sid.s_id)) {
+ ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ }
+ case OCS_HW_BCAST:
+ if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
+ iparam->bcast.timeout, io->indicator, io->reqtag,
+ SLI4_CQ_DEFAULT, rnode,
+ iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
+ ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ default:
+ ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ if (OCS_HW_RTN_SUCCESS == rc) {
+ if (io->wq == NULL) {
+ io->wq = ocs_hw_queue_next_wq(hw, io);
+ ocs_hw_assert(io->wq != NULL);
+ }
+ io->xbusy = TRUE;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ OCS_STAT(io->wq->use_count++);
+ ocs_hw_add_io_timed_wqe(hw, io);
+ rc = hw_wq_write(io->wq, &io->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
+ io->xbusy = FALSE;
+ ocs_hw_remove_io_timed_wqe(hw, io);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup io
+ * @brief Send a read, write, or response IO.
+ *
+ * @par Description
+ * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
+ * as a target or initiator. Examples include:
+ * - Sending read data and good response (target).
+ * - Sending a response (target with no data or after receiving write data).
+ * .
+ * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
+ * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
+ *
+ * @param hw Hardware context.
+ * @param type Type of IO (target read, target response, and so on).
+ * @param io Previously-allocated HW IO object.
+ * @param len Length, in bytes, of data to send.
+ * @param iparam IO parameters.
+ * @param rnode Destination of data (that is, a remote node).
+ * @param cb Function call upon completion of sending data (may be NULL).
+ * @param arg Argument to pass to IO completion function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ *
+ * @todo
+ * - Support specifiying relative offset.
+ * - Use a WQ other than 0.
+ */
+ocs_hw_rtn_e
+ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
+ uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
+ void *cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint32_t rpi;
+ uint8_t send_wqe = TRUE;
+
+ CPUTRACE("");
+
+ if (!hw || !io || !rnode || !iparam) {
+ ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
+ hw, io, iparam, rnode);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->state != OCS_HW_STATE_ACTIVE) {
+ ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ rpi = rnode->indicator;
+
+ if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
+ rpi = hw->workaround.unregistered_rid;
+ ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
+ }
+
+ /*
+ * Save state needed during later stages
+ */
+ io->rnode = rnode;
+ io->type = type;
+ io->done = cb;
+ io->arg = arg;
+
+ /*
+ * Format the work queue entry used to send the IO
+ */
+ switch (type) {
+ case OCS_HW_IO_INITIATOR_READ:
+ /*
+ * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
+ * initiator read IO for quarantine
+ */
+ if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
+ (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
+ io->quarantine = TRUE;
+ }
+
+ ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
+ iparam->fcp_ini.rsp);
+
+ if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
+ io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
+ iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
+ iparam->fcp_ini.timeout)) {
+ ocs_log_err(hw->os, "IREAD WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_IO_INITIATOR_WRITE:
+ ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
+ iparam->fcp_ini.rsp);
+
+ if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
+ len, iparam->fcp_ini.first_burst,
+ io->indicator, io->reqtag,
+ SLI4_CQ_DEFAULT, rpi, rnode,
+ iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
+ iparam->fcp_ini.timeout)) {
+ ocs_log_err(hw->os, "IWRITE WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_IO_INITIATOR_NODATA:
+ ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
+ iparam->fcp_ini.rsp);
+
+ if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
+ io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
+ rpi, rnode, iparam->fcp_ini.timeout)) {
+ ocs_log_err(hw->os, "ICMND WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ break;
+ case OCS_HW_IO_TARGET_WRITE: {
+ uint16_t flags = iparam->fcp_tgt.flags;
+ fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
+
+ /*
+ * Fill in the XFER_RDY for IF_TYPE 0 devices
+ */
+ *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
+ *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
+ *((uint32_t *)xfer->rsvd) = 0;
+
+ if (io->xbusy) {
+ flags |= SLI4_IO_CONTINUATION;
+ } else {
+ flags &= ~SLI4_IO_CONTINUATION;
+ }
+
+ io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
+
+ /*
+ * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
+ * then mark the target write IO for quarantine
+ */
+ if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
+ (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
+ io->quarantine = TRUE;
+ }
+
+ /*
+ * BZ 161832 Workaround:
+ * Check for use_dif_sec_xri workaround. Note, even though the first dataphase
+ * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
+ * potential for deadlock where all XRI's are allocated as primaries to IOs that
+ * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first
+ * data phase, it is marked for quarantine.
+ */
+ if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
+
+ /*
+ * If we have allocated a chained SGL for skyhawk, then
+ * we can re-use this for the sec_hio.
+ */
+ if (io->ovfl_io != NULL) {
+ io->sec_hio = io->ovfl_io;
+ io->sec_hio->quarantine = TRUE;
+ } else {
+ io->sec_hio = ocs_hw_io_alloc(hw);
+ }
+ if (io->sec_hio == NULL) {
+ /* Failed to allocate, so save full request context and put
+ * this IO on the wait list
+ */
+ io->sec_iparam = *iparam;
+ io->sec_len = len;
+ ocs_lock(&hw->io_lock);
+ ocs_list_remove(&hw->io_inuse, io);
+ ocs_list_add_tail(&hw->sec_hio_wait_list, io);
+ io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
+ hw->sec_hio_wait_count++;
+ ocs_unlock(&hw->io_lock);
+ send_wqe = FALSE;
+ /* Done */
+ break;
+ }
+ /* We quarantine the secondary IO if this is the second or subsequent data phase */
+ if (io->xbusy) {
+ io->sec_hio->quarantine = TRUE;
+ }
+ }
+
+ /*
+ * If not the first data phase, and io->sec_hio has been allocated, then issue
+ * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
+ */
+ if (io->xbusy && (io->sec_hio != NULL)) {
+ if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
+ iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
+ io->reqtag, SLI4_CQ_DEFAULT,
+ iparam->fcp_tgt.ox_id, rpi, rnode,
+ flags,
+ iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
+ iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
+ ocs_log_err(hw->os, "TRECEIVE WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ } else {
+ if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
+ iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
+ SLI4_CQ_DEFAULT,
+ iparam->fcp_tgt.ox_id, rpi, rnode,
+ flags,
+ iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
+ iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
+ ocs_log_err(hw->os, "TRECEIVE WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ }
+ break;
+ }
+ case OCS_HW_IO_TARGET_READ: {
+ uint16_t flags = iparam->fcp_tgt.flags;
+
+ if (io->xbusy) {
+ flags |= SLI4_IO_CONTINUATION;
+ } else {
+ flags &= ~SLI4_IO_CONTINUATION;
+ }
+
+ io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
+ if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
+ iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
+ SLI4_CQ_DEFAULT,
+ iparam->fcp_tgt.ox_id, rpi, rnode,
+ flags,
+ iparam->fcp_tgt.dif_oper,
+ iparam->fcp_tgt.blk_size,
+ iparam->fcp_tgt.cs_ctl,
+ iparam->fcp_tgt.app_id)) {
+ ocs_log_err(hw->os, "TSEND WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ } else if (hw->workaround.retain_tsend_io_length) {
+ io->length = len;
+ }
+ break;
+ }
+ case OCS_HW_IO_TARGET_RSP: {
+ uint16_t flags = iparam->fcp_tgt.flags;
+
+ if (io->xbusy) {
+ flags |= SLI4_IO_CONTINUATION;
+ } else {
+ flags &= ~SLI4_IO_CONTINUATION;
+ }
+
+ /* post a new auto xfer ready buffer */
+ if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
+ if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
+ flags |= SLI4_IO_DNRX;
+ }
+ }
+
+ io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
+ if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
+ &io->def_sgl,
+ len,
+ io->indicator, io->reqtag,
+ SLI4_CQ_DEFAULT,
+ iparam->fcp_tgt.ox_id,
+ rpi, rnode,
+ flags, iparam->fcp_tgt.cs_ctl,
+ io->is_port_owned,
+ iparam->fcp_tgt.app_id)) {
+ ocs_log_err(hw->os, "TRSP WQE error\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ break;
+ }
+ default:
+ ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
+ if (io->wq == NULL) {
+ io->wq = ocs_hw_queue_next_wq(hw, io);
+ ocs_hw_assert(io->wq != NULL);
+ }
+
+ io->xbusy = TRUE;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
+ OCS_STAT(io->wq->use_count++);
+ ocs_hw_add_io_timed_wqe(hw, io);
+ rc = hw_wq_write(io->wq, &io->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
+ io->xbusy = FALSE;
+ ocs_hw_remove_io_timed_wqe(hw, io);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Send a raw frame
+ *
+ * @par Description
+ * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
+ *
+ * @param hw Pointer to HW object.
+ * @param hdr Pointer to a little endian formatted FC header.
+ * @param sof Value to use as the frame SOF.
+ * @param eof Value to use as the frame EOF.
+ * @param payload Pointer to payload DMA buffer.
+ * @param ctx Pointer to caller provided send frame context.
+ * @param callback Callback function.
+ * @param arg Callback function argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
+ ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
+{
+ int32_t rc;
+ ocs_hw_wqe_t *wqe;
+ uint32_t xri;
+ hw_wq_t *wq;
+
+ wqe = &ctx->wqe;
+
+ /* populate the callback object */
+ ctx->hw = hw;
+
+ /* Fetch and populate request tag */
+ ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
+ if (ctx->wqcb == NULL) {
+ ocs_log_err(hw->os, "can't allocate request tag\n");
+ return OCS_HW_RTN_NO_RESOURCES;
+ }
+
+ /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
+ wq = ocs_varray_iter_next(hw->wq_class_array[1]);
+ if (wq == NULL) {
+ wq = hw->hw_wq[0];
+ }
+
+ /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
+ xri = wq->send_frame_io->indicator;
+
+ /* Build the send frame WQE */
+ rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
+ payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
+ if (rc) {
+ ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* Write to WQ */
+ rc = hw_wq_write(wq, wqe);
+ if (rc) {
+ ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ OCS_STAT(wq->use_count++);
+
+ return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
+}
+
+ocs_hw_rtn_e
+ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
+{
+ if (sli_get_sgl_preregister(&hw->sli)) {
+ ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
+ return OCS_HW_RTN_ERROR;
+ }
+ io->ovfl_sgl = sgl;
+ io->ovfl_sgl_count = sgl_count;
+ io->ovfl_io = NULL;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+static void
+ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ /* Restore the default */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+
+ /*
+ * For skyhawk, we need to free the IO allocated for the chained
+ * SGL. For all devices, clear the overflow fields on the IO.
+ *
+ * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
+ * the chained SGLs. If so, then we clear the ovfl_io field
+ * when the sec_hio is freed.
+ */
+ if (io->ovfl_io != NULL) {
+ ocs_hw_io_free(hw, io->ovfl_io);
+ io->ovfl_io = NULL;
+ }
+
+ /* Clear the overflow SGL */
+ io->ovfl_sgl = NULL;
+ io->ovfl_sgl_count = 0;
+ io->ovfl_lsp = NULL;
+}
+
+/**
+ * @ingroup io
+ * @brief Initialize the scatter gather list entries of an IO.
+ *
+ * @param hw Hardware context.
+ * @param io Previously-allocated HW IO object.
+ * @param type Type of IO (target read, target response, and so on).
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
+{
+ sli4_sge_t *data = NULL;
+ uint32_t i = 0;
+ uint32_t skips = 0;
+
+ if (!hw || !io) {
+ ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
+ hw, io);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* Clear / reset the scatter-gather list */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+ io->first_data_sge = 0;
+
+ ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
+ io->n_sge = 0;
+ io->sge_offset = 0;
+
+ io->type = type;
+
+ data = io->sgl->virt;
+
+ /*
+ * Some IO types have underlying hardware requirements on the order
+ * of SGEs. Process all special entries here.
+ */
+ switch (type) {
+ case OCS_HW_IO_INITIATOR_READ:
+ case OCS_HW_IO_INITIATOR_WRITE:
+ case OCS_HW_IO_INITIATOR_NODATA:
+ /*
+ * No skips, 2 special for initiator I/Os
+ * The addresses and length are written later
+ */
+ /* setup command pointer */
+ data->sge_type = SLI4_SGE_TYPE_DATA;
+ data++;
+
+ /* setup response pointer */
+ data->sge_type = SLI4_SGE_TYPE_DATA;
+
+ if (OCS_HW_IO_INITIATOR_NODATA == type) {
+ data->last = TRUE;
+ }
+ data++;
+
+ io->n_sge = 2;
+ break;
+ case OCS_HW_IO_TARGET_WRITE:
+#define OCS_TARGET_WRITE_SKIPS 2
+ skips = OCS_TARGET_WRITE_SKIPS;
+
+ /* populate host resident XFER_RDY buffer */
+ data->sge_type = SLI4_SGE_TYPE_DATA;
+ data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
+ data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys);
+ data->buffer_length = io->xfer_rdy.size;
+ data++;
+
+ skips--;
+
+ io->n_sge = 1;
+ break;
+ case OCS_HW_IO_TARGET_READ:
+ /*
+ * For FCP_TSEND64, the first 2 entries are SKIP SGE's
+ */
+#define OCS_TARGET_READ_SKIPS 2
+ skips = OCS_TARGET_READ_SKIPS;
+ break;
+ case OCS_HW_IO_TARGET_RSP:
+ /*
+ * No skips, etc. for FCP_TRSP64
+ */
+ break;
+ default:
+ ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Write skip entries
+ */
+ for (i = 0; i < skips; i++) {
+ data->sge_type = SLI4_SGE_TYPE_SKIP;
+ data++;
+ }
+
+ io->n_sge += skips;
+
+ /*
+ * Set last
+ */
+ data->last = TRUE;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup io
+ * @brief Add a T10 PI seed scatter gather list entry.
+ *
+ * @param hw Hardware context.
+ * @param io Previously-allocated HW IO object.
+ * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
+{
+ sli4_sge_t *data = NULL;
+ sli4_diseed_sge_t *dif_seed;
+
+ /* If no dif_info, or dif_oper is disabled, then just return success */
+ if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
+ return OCS_HW_RTN_SUCCESS;
+ }
+
+ if (!hw || !io) {
+ ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
+ hw, io, dif_info);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ /* If we are doing T10 DIF add the DIF Seed SGE */
+ ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
+ dif_seed = (sli4_diseed_sge_t *)data;
+ dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
+ dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
+ dif_seed->app_tag_repl = dif_info->app_tag_repl;
+ dif_seed->repl_app_tag = dif_info->repl_app_tag;
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
+ dif_seed->atrt = dif_info->disable_app_ref_ffff;
+ dif_seed->at = dif_info->disable_app_ffff;
+ }
+ dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
+ /* Workaround for SKH (BZ157233) */
+ if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
+ (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
+ dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
+ }
+
+ dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
+ dif_seed->dif_blk_size = dif_info->blk_size;
+ dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
+ dif_seed->check_app_tag = dif_info->check_app_tag;
+ dif_seed->check_ref_tag = dif_info->check_ref_tag;
+ dif_seed->check_crc = dif_info->check_guard;
+ dif_seed->new_ref_tag = dif_info->repl_ref_tag;
+
+ switch(dif_info->dif_oper) {
+ case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
+ break;
+ case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
+ dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
+ dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
+ break;
+ default:
+ ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
+ dif_info->dif_oper);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Set last, clear previous last
+ */
+ data->last = TRUE;
+ if (io->n_sge) {
+ data[-1].last = FALSE;
+ }
+
+ io->n_sge++;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+static ocs_hw_rtn_e
+ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ sli4_lsp_sge_t *lsp;
+
+ /* fail if we're already pointing to the overflow SGL */
+ if (io->sgl == io->ovfl_sgl) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * For skyhawk, we can use another SGL to extend the SGL list. The
+ * Chained entry must not be in the first 4 entries.
+ *
+ * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
+ */
+ if (sli_get_sgl_preregister(&hw->sli) &&
+ io->def_sgl_count > 4 &&
+ io->ovfl_io == NULL &&
+ ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
+ (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
+ io->ovfl_io = ocs_hw_io_alloc(hw);
+ if (io->ovfl_io != NULL) {
+ /*
+ * Note: We can't call ocs_hw_io_register_sgl() here
+ * because it checks that SGLs are not pre-registered
+ * and for shyhawk, preregistered SGLs are required.
+ */
+ io->ovfl_sgl = &io->ovfl_io->def_sgl;
+ io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
+ }
+ }
+
+ /* fail if we don't have an overflow SGL registered */
+ if (io->ovfl_sgl == NULL) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Overflow, we need to put a link SGE in the last location of the current SGL, after
+ * copying the the last SGE to the overflow SGL
+ */
+
+ ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
+
+ lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
+ ocs_memset(lsp, 0, sizeof(*lsp));
+
+ if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
+ (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
+ sli_skh_chain_sge_build(&hw->sli,
+ (sli4_sge_t*)lsp,
+ io->ovfl_io->indicator,
+ 0, /* frag_num */
+ 0); /* offset */
+ } else {
+ lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
+ lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys);
+ lsp->sge_type = SLI4_SGE_TYPE_LSP;
+ lsp->last = 0;
+ io->ovfl_lsp = lsp;
+ io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
+ }
+
+ /* Update the current SGL pointer, and n_sgl */
+ io->sgl = io->ovfl_sgl;
+ io->sgl_count = io->ovfl_sgl_count;
+ io->n_sge = 1;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup io
+ * @brief Add a scatter gather list entry to an IO.
+ *
+ * @param hw Hardware context.
+ * @param io Previously-allocated HW IO object.
+ * @param addr Physical address.
+ * @param length Length of memory pointed to by @c addr.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
+{
+ sli4_sge_t *data = NULL;
+
+ if (!hw || !io || !addr || !length) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter hw=%p io=%p addr=%lx length=%u\n",
+ hw, io, addr, length);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
+ if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
+ return OCS_HW_RTN_ERROR;
+ }
+ }
+
+ if (length > sli_get_max_sge(&hw->sli)) {
+ ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
+ length, sli_get_max_sge(&hw->sli));
+ return OCS_HW_RTN_ERROR;
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ data->sge_type = SLI4_SGE_TYPE_DATA;
+ data->buffer_address_high = ocs_addr32_hi(addr);
+ data->buffer_address_low = ocs_addr32_lo(addr);
+ data->buffer_length = length;
+ data->data_offset = io->sge_offset;
+ /*
+ * Always assume this is the last entry and mark as such.
+ * If this is not the first entry unset the "last SGE"
+ * indication for the previous entry
+ */
+ data->last = TRUE;
+ if (io->n_sge) {
+ data[-1].last = FALSE;
+ }
+
+ /* Set first_data_bde if not previously set */
+ if (io->first_data_sge == 0) {
+ io->first_data_sge = io->n_sge;
+ }
+
+ io->sge_offset += length;
+ io->n_sge++;
+
+ /* Update the linked segment length (only executed after overflow has begun) */
+ if (io->ovfl_lsp != NULL) {
+ io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
+ }
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup io
+ * @brief Add a T10 DIF scatter gather list entry to an IO.
+ *
+ * @param hw Hardware context.
+ * @param io Previously-allocated HW IO object.
+ * @param addr DIF physical address.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
+{
+ sli4_dif_sge_t *data = NULL;
+
+ if (!hw || !io || !addr) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter hw=%p io=%p addr=%lx\n",
+ hw, io, addr);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if ((io->n_sge + 1) > hw->config.n_sgl) {
+ if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
+ ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
+ return OCS_HW_RTN_ERROR;
+ }
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ data->sge_type = SLI4_SGE_TYPE_DIF;
+ /* Workaround for SKH (BZ157233) */
+ if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
+ (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
+ data->sge_type = SLI4_SGE_TYPE_SKIP;
+ }
+
+ data->buffer_address_high = ocs_addr32_hi(addr);
+ data->buffer_address_low = ocs_addr32_lo(addr);
+
+ /*
+ * Always assume this is the last entry and mark as such.
+ * If this is not the first entry unset the "last SGE"
+ * indication for the previous entry
+ */
+ data->last = TRUE;
+ if (io->n_sge) {
+ data[-1].last = FALSE;
+ }
+
+ io->n_sge++;
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup io
+ * @brief Abort a previously-started IO.
+ *
+ * @param hw Hardware context.
+ * @param io_to_abort The IO to abort.
+ * @param send_abts Boolean to have the hardware automatically
+ * generate an ABTS.
+ * @param cb Function call upon completion of the abort (may be NULL).
+ * @param arg Argument to pass to abort completion function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
+{
+ sli4_abort_type_e atype = SLI_ABORT_MAX;
+ uint32_t id = 0, mask = 0;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ hw_wq_callback_t *wqcb;
+
+ if (!hw || !io_to_abort) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter hw=%p io=%p\n",
+ hw, io_to_abort);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->state != OCS_HW_STATE_ACTIVE) {
+ ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
+ hw->state);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* take a reference on IO being aborted */
+ if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
+ /* command no longer active */
+ ocs_log_test(hw ? hw->os : NULL,
+ "io not active xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return OCS_HW_RTN_IO_NOT_ACTIVE;
+ }
+
+ /* non-port owned XRI checks */
+ /* Must have a valid WQ reference */
+ if (io_to_abort->wq == NULL) {
+ ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
+ io_to_abort->indicator);
+ ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
+ return OCS_HW_RTN_IO_NOT_ACTIVE;
+ }
+
+ /* Validation checks complete; now check to see if already being aborted */
+ ocs_lock(&hw->io_abort_lock);
+ if (io_to_abort->abort_in_progress) {
+ ocs_unlock(&hw->io_abort_lock);
+ ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
+ ocs_log_debug(hw ? hw->os : NULL,
+ "io already being aborted xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
+ }
+
+ /*
+ * This IO is not already being aborted. Set flag so we won't try to
+ * abort it again. After all, we only have one abort_done callback.
+ */
+ io_to_abort->abort_in_progress = 1;
+ ocs_unlock(&hw->io_abort_lock);
+
+ /*
+ * If we got here, the possibilities are:
+ * - host owned xri
+ * - io_to_abort->wq_index != UINT32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - port owned xri:
+ * - rxri: io_to_abort->wq_index == UINT32_MAX
+ * - submit ABORT_WQE to any WQ
+ * - non-rxri
+ * - io_to_abort->index != UINT32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - io_to_abort->index == UINT32_MAX
+ * - submit ABORT_WQE to any WQ
+ */
+ io_to_abort->abort_done = cb;
+ io_to_abort->abort_arg = arg;
+
+ atype = SLI_ABORT_XRI;
+ id = io_to_abort->indicator;
+
+ /* Allocate a request tag for the abort portion of this IO */
+ wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
+ if (wqcb == NULL) {
+ ocs_log_err(hw->os, "can't allocate request tag\n");
+ return OCS_HW_RTN_NO_RESOURCES;
+ }
+ io_to_abort->abort_reqtag = wqcb->instance_index;
+
+ /*
+ * If the wqe is on the pending list, then set this wqe to be
+ * aborted when the IO's wqe is removed from the list.
+ */
+ if (io_to_abort->wq != NULL) {
+ sli_queue_lock(io_to_abort->wq->queue);
+ if (ocs_list_on_list(&io_to_abort->wqe.link)) {
+ io_to_abort->wqe.abort_wqe_submit_needed = 1;
+ io_to_abort->wqe.send_abts = send_abts;
+ io_to_abort->wqe.id = id;
+ io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
+ sli_queue_unlock(io_to_abort->wq->queue);
+ return 0;
+ }
+ sli_queue_unlock(io_to_abort->wq->queue);
+ }
+
+ if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
+ io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
+ ocs_log_err(hw->os, "ABORT WQE error\n");
+ io_to_abort->abort_reqtag = UINT32_MAX;
+ ocs_hw_reqtag_free(hw, wqcb);
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ if (OCS_HW_RTN_SUCCESS == rc) {
+ if (io_to_abort->wq == NULL) {
+ io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
+ ocs_hw_assert(io_to_abort->wq != NULL);
+ }
+ /* ABORT_WQE does not actually utilize an XRI on the Port,
+ * therefore, keep xbusy as-is to track the exchange's state,
+ * not the ABORT_WQE's state
+ */
+ rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
+ if (rc > 0) {
+ /* non-negative return is success */
+ rc = 0;
+ /* can't abort an abort so skip adding to timed wqe list */
+ }
+ }
+
+ if (OCS_HW_RTN_SUCCESS != rc) {
+ ocs_lock(&hw->io_abort_lock);
+ io_to_abort->abort_in_progress = 0;
+ ocs_unlock(&hw->io_abort_lock);
+ ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
+ }
+ return rc;
+}
+
+/**
+ * @ingroup io
+ * @brief Return the OX_ID/RX_ID of the IO.
+ *
+ * @param hw Hardware context.
+ * @param io HW IO object.
+ *
+ * @return Returns X_ID on success, or -1 on failure.
+ */
+int32_t
+ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ if (!hw || !io) {
+ ocs_log_err(hw ? hw->os : NULL,
+ "bad parameter hw=%p io=%p\n", hw, io);
+ return -1;
+ }
+
+ return io->indicator;
+}
+
+
+typedef struct ocs_hw_fw_write_cb_arg {
+ ocs_hw_fw_cb_t cb;
+ void *arg;
+} ocs_hw_fw_write_cb_arg_t;
+
+typedef struct ocs_hw_sfp_cb_arg {
+ ocs_hw_sfp_cb_t cb;
+ void *arg;
+ ocs_dma_t payload;
+} ocs_hw_sfp_cb_arg_t;
+
+typedef struct ocs_hw_temp_cb_arg {
+ ocs_hw_temp_cb_t cb;
+ void *arg;
+} ocs_hw_temp_cb_arg_t;
+
+typedef struct ocs_hw_link_stat_cb_arg {
+ ocs_hw_link_stat_cb_t cb;
+ void *arg;
+} ocs_hw_link_stat_cb_arg_t;
+
+typedef struct ocs_hw_host_stat_cb_arg {
+ ocs_hw_host_stat_cb_t cb;
+ void *arg;
+} ocs_hw_host_stat_cb_arg_t;
+
+typedef struct ocs_hw_dump_get_cb_arg {
+ ocs_hw_dump_get_cb_t cb;
+ void *arg;
+ void *mbox_cmd;
+} ocs_hw_dump_get_cb_arg_t;
+
+typedef struct ocs_hw_dump_clear_cb_arg {
+ ocs_hw_dump_clear_cb_t cb;
+ void *arg;
+ void *mbox_cmd;
+} ocs_hw_dump_clear_cb_arg_t;
+
+/**
+ * @brief Write a portion of a firmware image to the device.
+ *
+ * @par Description
+ * Calls the correct firmware write function based on the device type.
+ *
+ * @param hw Hardware context.
+ * @param dma DMA structure containing the firmware image chunk.
+ * @param size Size of the firmware image chunk.
+ * @param offset Offset, in bytes, from the beginning of the firmware image.
+ * @param last True if this is the last chunk of the image.
+ * Causes the image to be committed to flash.
+ * @param cb Pointer to a callback function that is called when the command completes.
+ * The callback function prototype is
+ * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
+ * @param arg Pointer to be passed to the callback function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
+{
+ if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
+ return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
+ } else {
+ /* Write firmware_write for BE3/Skyhawk not supported */
+ return -1;
+ }
+}
+
+/**
+ * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
+ *
+ * @par Description
+ * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
+ * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
+ * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
+ * and to signal the caller that the write has completed.
+ *
+ * @param hw Hardware context.
+ * @param dma DMA structure containing the firmware image chunk.
+ * @param size Size of the firmware image chunk.
+ * @param offset Offset, in bytes, from the beginning of the firmware image.
+ * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
+ * @param cb Pointer to a callback function that is called when the command completes.
+ * The callback function prototype is
+ * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
+ * @param arg Pointer to be passed to the callback function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ uint8_t *mbxdata;
+ ocs_hw_fw_write_cb_arg_t *cb_arg;
+ int noc=0; /* No Commit bit - set to 1 for testing */
+
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
+ ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
+ size, offset, "/prg/", dma)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
+ }
+
+ return rc;
+
+}
+
+/**
+ * @brief Called when the WRITE OBJECT command completes.
+ *
+ * @par Description
+ * Get the number of bytes actually written out of the response, free the mailbox
+ * that was malloc'd by ocs_hw_firmware_write(),
+ * then call the callback and pass the status and bytes written.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+
+ sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
+ sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
+ ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
+ uint32_t bytes_written;
+ uint16_t mbox_status;
+ uint32_t change_status;
+
+ bytes_written = wr_obj_rsp->actual_write_length;
+ mbox_status = mbox_rsp->hdr.status;
+ change_status = wr_obj_rsp->change_status;
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if ((status == 0) && mbox_status) {
+ status = mbox_status;
+ }
+ cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
+ }
+
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
+ }
+
+ return 0;
+
+}
+
+/**
+ * @brief Called when the READ_TRANSCEIVER_DATA command completes.
+ *
+ * @par Description
+ * Get the number of bytes read out of the response, free the mailbox that was malloc'd
+ * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ * The callback function prototype is
+ * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+
+ ocs_hw_sfp_cb_arg_t *cb_arg = arg;
+ ocs_dma_t *payload = NULL;
+ sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
+ uint32_t bytes_written;
+
+ if (cb_arg) {
+ payload = &(cb_arg->payload);
+ if (cb_arg->cb) {
+ mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
+ bytes_written = mbox_rsp->hdr.response_length;
+ if ((status == 0) && mbox_rsp->hdr.status) {
+ status = mbox_rsp->hdr.status;
+ }
+ cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
+ }
+
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ return 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Function to retrieve the SFP information.
+ *
+ * @param hw Hardware context.
+ * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
+ * @param cb Function call upon completion of sending the data (may be NULL).
+ * @param arg Argument to pass to IO completion function.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
+ */
+ocs_hw_rtn_e
+ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ ocs_hw_sfp_cb_arg_t *cb_arg;
+ uint8_t *mbxdata;
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* payload holds the non-embedded portion */
+ if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
+ OCS_MIN_DMA_ALIGNMENT)) {
+ ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* Send the HW command */
+ if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
+ &cb_arg->payload)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
+ rc);
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Function to retrieve the temperature information.
+ *
+ * @param hw Hardware context.
+ * @param cb Function call upon completion of sending the data (may be NULL).
+ * @param arg Argument to pass to IO completion function.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
+ */
+ocs_hw_rtn_e
+ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ ocs_hw_temp_cb_arg_t *cb_arg;
+ uint8_t *mbxdata;
+
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+ SLI4_WKI_TAG_SAT_TEM)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Called when the DUMP command completes.
+ *
+ * @par Description
+ * Get the temperature data out of the response, free the mailbox that was malloc'd
+ * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ * The callback function prototype is defined by ocs_hw_temp_cb_t.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+
+ sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
+ ocs_hw_temp_cb_arg_t *cb_arg = arg;
+ uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
+ uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
+ uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
+ uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
+ uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */
+ uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if ((status == 0) && mbox_rsp->hdr.status) {
+ status = mbox_rsp->hdr.status;
+ }
+ cb_arg->cb(status,
+ curr_temp,
+ crit_temp_thrshld,
+ warn_temp_thrshld,
+ norm_temp_thrshld,
+ fan_off_thrshld,
+ fan_on_thrshld,
+ cb_arg->arg);
+ }
+
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
+ }
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ return 0;
+}
+
+/**
+ * @brief Function to retrieve the link statistics.
+ *
+ * @param hw Hardware context.
+ * @param req_ext_counters If TRUE, then the extended counters will be requested.
+ * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
+ * @param clear_all_counters If TRUE, the counters will be cleared.
+ * @param cb Function call upon completion of sending the data (may be NULL).
+ * @param arg Argument to pass to IO completion function.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
+ */
+ocs_hw_rtn_e
+ocs_hw_get_link_stats(ocs_hw_t *hw,
+ uint8_t req_ext_counters,
+ uint8_t clear_overflow_flags,
+ uint8_t clear_all_counters,
+ ocs_hw_link_stat_cb_t cb,
+ void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ ocs_hw_link_stat_cb_arg_t *cb_arg;
+ uint8_t *mbxdata;
+
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+ req_ext_counters,
+ clear_overflow_flags,
+ clear_all_counters)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Called when the READ_LINK_STAT command completes.
+ *
+ * @par Description
+ * Get the counters out of the response, free the mailbox that was malloc'd
+ * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+
+ sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
+ ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
+ ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
+ uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
+
+ ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
+ OCS_HW_LINK_STAT_MAX);
+
+ counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
+ counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
+ counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
+ counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
+ counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
+ counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
+ counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
+ counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
+ counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
+ counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
+ counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
+ counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
+ counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
+ counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
+ counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
+ counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
+ counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
+ counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
+ counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
+ counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
+
+ counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
+ counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
+ counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
+ counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
+ counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
+ counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
+ counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
+ counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
+ counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
+ counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
+ counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
+ counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
+ counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
+ counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
+ counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
+ counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
+ counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
+ counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
+ counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
+ counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if ((status == 0) && mbox_rsp->hdr.status) {
+ status = mbox_rsp->hdr.status;
+ }
+ cb_arg->cb(status,
+ num_counters,
+ counts,
+ cb_arg->arg);
+ }
+
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
+ }
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ return 0;
+}
+
+/**
+ * @brief Function to retrieve the link and host statistics.
+ *
+ * @param hw Hardware context.
+ * @param cc clear counters, if TRUE all counters will be cleared.
+ * @param cb Function call upon completion of receiving the data.
+ * @param arg Argument to pass to pointer fc hosts statistics structure.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
+ */
+ocs_hw_rtn_e
+ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ ocs_hw_host_stat_cb_arg_t *cb_arg;
+ uint8_t *mbxdata;
+
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command to get the host stats */
+ if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief Called when the READ_STATUS command completes.
+ *
+ * @par Description
+ * Get the counters out of the response, free the mailbox that was malloc'd
+ * by ocs_hw_get_host_stats(), then call the callback and pass
+ * the status and data.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ * The callback function prototype is defined by
+ * ocs_hw_host_stat_cb_t.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+
+ sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
+ ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
+ ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
+ uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
+
+ ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
+ OCS_HW_HOST_STAT_MAX);
+
+ counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
+ counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
+ counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
+ counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
+ counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
+ counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
+ counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
+ counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
+ counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
+ counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
+ counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
+ counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
+ counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
+ counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
+
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if ((status == 0) && mbox_rsp->hdr.status) {
+ status = mbox_rsp->hdr.status;
+ }
+ cb_arg->cb(status,
+ num_counters,
+ counts,
+ cb_arg->arg);
+ }
+
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
+ }
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ return 0;
+}
+
+/**
+ * @brief HW link configuration enum to the CLP string value mapping.
+ *
+ * This structure provides a mapping from the ocs_hw_linkcfg_e
+ * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
+ * control) to the CLP string that is used
+ * in the DMTF_CLP_CMD mailbox command.
+ */
+typedef struct ocs_hw_linkcfg_map_s {
+ ocs_hw_linkcfg_e linkcfg;
+ const char *clp_str;
+} ocs_hw_linkcfg_map_t;
+
+/**
+ * @brief Mapping from the HW linkcfg enum to the CLP command value
+ * string.
+ */
+static ocs_hw_linkcfg_map_t linkcfg_map[] = {
+ {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
+ {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
+ {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
+ {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
+ {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
+ {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
+ {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
+
+/**
+ * @brief HW link configuration enum to Skyhawk link config ID mapping.
+ *
+ * This structure provides a mapping from the ocs_hw_linkcfg_e
+ * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
+ * control) to the link config ID numbers used by Skyhawk
+ */
+typedef struct ocs_hw_skyhawk_linkcfg_map_s {
+ ocs_hw_linkcfg_e linkcfg;
+ uint32_t config_id;
+} ocs_hw_skyhawk_linkcfg_map_t;
+
+/**
+ * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
+ */
+static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
+ {OCS_HW_LINKCFG_4X10G, 0x0a},
+ {OCS_HW_LINKCFG_1X40G, 0x09},
+};
+
+/**
+ * @brief Helper function for getting the HW linkcfg enum from the CLP
+ * string value
+ *
+ * @param clp_str CLP string value from OEMELX_LinkConfig.
+ *
+ * @return Returns the HW linkcfg enum corresponding to clp_str.
+ */
+static ocs_hw_linkcfg_e
+ocs_hw_linkcfg_from_clp(const char *clp_str)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
+ if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
+ return linkcfg_map[i].linkcfg;
+ }
+ }
+ return OCS_HW_LINKCFG_NA;
+}
+
+/**
+ * @brief Helper function for getting the CLP string value from the HW
+ * linkcfg enum.
+ *
+ * @param linkcfg HW linkcfg enum.
+ *
+ * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
+ * given linkcfg.
+ */
+static const char *
+ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
+ if (linkcfg_map[i].linkcfg == linkcfg) {
+ return linkcfg_map[i].clp_str;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * @brief Helper function for getting a Skyhawk link config ID from the HW
+ * linkcfg enum.
+ *
+ * @param linkcfg HW linkcfg enum.
+ *
+ * @return Returns the Skyhawk link config ID corresponding to
+ * given linkcfg.
+ */
+static uint32_t
+ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
+ if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
+ return skyhawk_linkcfg_map[i].config_id;
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief Helper function for getting the HW linkcfg enum from a
+ * Skyhawk config ID.
+ *
+ * @param config_id Skyhawk link config ID.
+ *
+ * @return Returns the HW linkcfg enum corresponding to config_id.
+ */
+static ocs_hw_linkcfg_e
+ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
+ if (skyhawk_linkcfg_map[i].config_id == config_id) {
+ return skyhawk_linkcfg_map[i].linkcfg;
+ }
+ }
+ return OCS_HW_LINKCFG_NA;
+}
+
+/**
+ * @brief Link configuration callback argument.
+ */
+typedef struct ocs_hw_linkcfg_cb_arg_s {
+ ocs_hw_port_control_cb_t cb;
+ void *arg;
+ uint32_t opts;
+ int32_t status;
+ ocs_dma_t dma_cmd;
+ ocs_dma_t dma_resp;
+ uint32_t result_len;
+} ocs_hw_linkcfg_cb_arg_t;
+
+/**
+ * @brief Set link configuration.
+ *
+ * @param hw Hardware context.
+ * @param value Link configuration enum to which the link configuration is
+ * set.
+ * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
+ * @param cb Callback function to invoke following mbx command.
+ * @param arg Callback argument.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
+{
+ if (!sli_link_is_configurable(&hw->sli)) {
+ ocs_log_debug(hw->os, "Function not supported\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
+ return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
+ } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
+ (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
+ return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
+ } else {
+ ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
+ return OCS_HW_RTN_ERROR;
+ }
+}
+
+/**
+ * @brief Set link configuration for Lancer
+ *
+ * @param hw Hardware context.
+ * @param value Link configuration enum to which the link configuration is
+ * set.
+ * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
+ * @param cb Callback function to invoke following mbx command.
+ * @param arg Callback argument.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
+{
+ char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
+ ocs_hw_linkcfg_cb_arg_t *cb_arg;
+ const char *value_str = NULL;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* translate ocs_hw_linkcfg_e to CLP string */
+ value_str = ocs_hw_clp_from_linkcfg(value);
+
+ /* allocate memory for callback argument */
+ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
+ /* allocate DMA for command */
+ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
+ ocs_log_err(hw->os, "malloc failed\n");
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
+ ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
+
+ /* allocate DMA for response */
+ if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
+ ocs_log_err(hw->os, "malloc failed\n");
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+ cb_arg->opts = opts;
+
+ rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
+ opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
+
+ if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
+ /* if failed, or polling, free memory here; if success and not
+ * polling, will free in callback function
+ */
+ if (rc) {
+ ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
+ (char *)cb_arg->dma_cmd.virt);
+ }
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_dma_free(hw->os, &cb_arg->dma_resp);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ }
+ return rc;
+}
+
+/**
+ * @brief Callback for ocs_hw_set_linkcfg_skyhawk
+ *
+ * @param hw Hardware context.
+ * @param status Status from the RECONFIG_GET_LINK_INFO command.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return none
+ */
+static void
+ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
+
+ if (status) {
+ ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
+ }
+
+ /* invoke callback */
+ if (cb_arg->cb) {
+ cb_arg->cb(status, 0, cb_arg->arg);
+ }
+
+ /* if polling, will free memory in calling function */
+ if (cb_arg->opts != OCS_CMD_POLL) {
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ }
+}
+
+/**
+ * @brief Set link configuration for a Skyhawk
+ *
+ * @param hw Hardware context.
+ * @param value Link configuration enum to which the link configuration is
+ * set.
+ * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
+ * @param cb Callback function to invoke following mbx command.
+ * @param arg Callback argument.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_linkcfg_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint32_t config_id;
+
+ config_id = ocs_hw_config_id_from_linkcfg(value);
+
+ if (config_id == 0) {
+ ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
+ rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
+ } else if (opts == OCS_CMD_POLL) {
+ /* if we're polling we have to call the callback here. */
+ ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
+ } else {
+ /* We weren't poling, so the callback got called */
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Get link configuration.
+ *
+ * @param hw Hardware context.
+ * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
+ * @param cb Callback function to invoke following mbx command.
+ * @param arg Callback argument.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
+{
+ if (!sli_link_is_configurable(&hw->sli)) {
+ ocs_log_debug(hw->os, "Function not supported\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
+ return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
+ } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
+ (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
+ return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
+ } else {
+ ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
+ return OCS_HW_RTN_ERROR;
+ }
+}
+
+/**
+ * @brief Get link configuration for a Lancer
+ *
+ * @param hw Hardware context.
+ * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
+ * @param cb Callback function to invoke following mbx command.
+ * @param arg Callback argument.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
+{
+ char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
+ ocs_hw_linkcfg_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* allocate memory for callback argument */
+ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
+
+ /* allocate DMA for command */
+ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
+ ocs_log_err(hw->os, "malloc failed\n");
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* copy CLP command to DMA command */
+ ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
+ ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
+
+ /* allocate DMA for response */
+ if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
+ ocs_log_err(hw->os, "malloc failed\n");
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+ cb_arg->opts = opts;
+
+ rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
+ opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
+
+ if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
+ /* if failed or polling, free memory here; if not polling and success,
+ * will free in callback function
+ */
+ if (rc) {
+ ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
+ (char *)cb_arg->dma_cmd.virt);
+ }
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_dma_free(hw->os, &cb_arg->dma_resp);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ }
+ return rc;
+}
+
+
+/**
+ * @brief Get the link configuration callback.
+ *
+ * @param hw Hardware context.
+ * @param status Status from the RECONFIG_GET_LINK_INFO command.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return none
+ */
+static void
+ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
+ sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
+ ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
+
+ if (status) {
+ ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
+ } else {
+ /* Call was successful */
+ value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
+ }
+
+ /* invoke callback */
+ if (cb_arg->cb) {
+ cb_arg->cb(status, value, cb_arg->arg);
+ }
+
+ /* if polling, will free memory in calling function */
+ if (cb_arg->opts != OCS_CMD_POLL) {
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ }
+}
+
+/**
+ * @brief Get link configuration for a Skyhawk.
+ *
+ * @param hw Hardware context.
+ * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
+ * @param cb Callback function to invoke following mbx command.
+ * @param arg Callback argument.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_linkcfg_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+ cb_arg->opts = opts;
+
+ /* dma_mem holds the non-embedded portion */
+ if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
+ ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
+ rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
+ } else if (opts == OCS_CMD_POLL) {
+ /* if we're polling we have to call the callback here. */
+ ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
+ } else {
+ /* We weren't poling, so the callback got called */
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Sets the DIF seed value.
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_set_dif_seed(ocs_hw_t *hw)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint8_t buf[SLI4_BMBX_SIZE];
+ sli4_req_common_set_features_dif_seed_t seed_param;
+
+ ocs_memset(&seed_param, 0, sizeof(seed_param));
+ seed_param.seed = hw->config.dif_seed;
+
+ /* send set_features command */
+ if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
+ SLI4_SET_FEATURES_DIF_SEED,
+ 4,
+ (uint32_t*)&seed_param)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ if (rc) {
+ ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
+ } else {
+ ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
+ hw->config.dif_seed);
+ }
+ } else {
+ ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ return rc;
+}
+
+
+/**
+ * @brief Sets the DIF mode value.
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_set_dif_mode(ocs_hw_t *hw)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint8_t buf[SLI4_BMBX_SIZE];
+ sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
+
+ ocs_memset(&mode_param, 0, sizeof(mode_param));
+ mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
+
+ /* send set_features command */
+ if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
+ SLI4_SET_FEATURES_DIF_MEMORY_MODE,
+ sizeof(mode_param),
+ (uint32_t*)&mode_param)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ if (rc) {
+ ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
+ } else {
+ ocs_log_test(hw->os, "DIF mode set to %s\n",
+ (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
+ }
+ } else {
+ ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+ return rc;
+}
+
+static void
+ocs_hw_watchdog_timer_cb(void *arg)
+{
+ ocs_hw_t *hw = (ocs_hw_t *)arg;
+
+ ocs_hw_config_watchdog_timer(hw);
+ return;
+}
+
+static void
+ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ uint16_t timeout = hw->watchdog_timeout;
+
+ if (status != 0) {
+ ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
+ } else {
+ if(timeout != 0) {
+ /* keeping callback 500ms before timeout to keep heartbeat alive */
+ ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
+ }else {
+ ocs_del_timer(&hw->watchdog_timer);
+ }
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ return;
+}
+
+/**
+ * @brief Set configuration parameters for watchdog timer feature.
+ *
+ * @param hw Hardware context.
+ * @param timeout Timeout for watchdog timer in seconds
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+
+ sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
+ rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
+ if (rc) {
+ ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
+ ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
+ }
+ return rc;
+}
+
+/**
+ * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
+ *
+ * @param hw Hardware context.
+ * @param buf Pointer to a mailbox buffer area.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ sli4_req_common_set_features_xfer_rdy_t10pi_t param;
+
+ ocs_memset(&param, 0, sizeof(param));
+ param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
+ param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
+ param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
+ param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
+ param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
+
+ switch (hw->config.auto_xfer_rdy_p_type) {
+ case 1:
+ param.p_type = 0;
+ break;
+ case 3:
+ param.p_type = 2;
+ break;
+ default:
+ ocs_log_err(hw->os, "unsupported p_type %d\n",
+ hw->config.auto_xfer_rdy_p_type);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
+ SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
+ sizeof(param),
+ &param);
+
+
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ if (rc) {
+ ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
+ } else {
+ ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
+ param.rtc, param.atv, param.p_type,
+ param.app_tag, param.blk_size);
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief enable sli port health check
+ *
+ * @param hw Hardware context.
+ * @param buf Pointer to a mailbox buffer area.
+ * @param query current status of the health check feature enabled/disabled
+ * @param enable if 1: enable 0: disable
+ * @param buf Pointer to a mailbox buffer area.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint8_t buf[SLI4_BMBX_SIZE];
+ sli4_req_common_set_features_health_check_t param;
+
+ ocs_memset(&param, 0, sizeof(param));
+ param.hck = enable;
+ param.qry = query;
+
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
+ sizeof(param),
+ &param);
+
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ if (rc) {
+ ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
+ } else {
+ ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Set FTD transfer hint feature
+ *
+ * @param hw Hardware context.
+ * @param fdt_xfer_hint size in bytes where read requests are segmented.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint8_t buf[SLI4_BMBX_SIZE];
+ sli4_req_common_set_features_set_fdt_xfer_hint_t param;
+
+ ocs_memset(&param, 0, sizeof(param));
+ param.fdt_xfer_hint = fdt_xfer_hint;
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
+ sizeof(param),
+ &param);
+
+
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
+ if (rc) {
+ ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
+ } else {
+ ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Get the link configuration callback.
+ *
+ * @param hw Hardware context.
+ * @param status Status from the DMTF CLP command.
+ * @param result_len Length, in bytes, of the DMTF CLP result.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static void
+ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
+{
+ int32_t rval;
+ char retdata_str[64];
+ ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
+ ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
+
+ if (status) {
+ ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
+ } else {
+ /* parse CLP response to get return data */
+ rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
+ sizeof(retdata_str),
+ cb_arg->dma_resp.virt,
+ result_len);
+
+ if (rval <= 0) {
+ ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
+ } else {
+ /* translate string into hw enum */
+ linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
+ }
+ }
+
+ /* invoke callback */
+ if (cb_arg->cb) {
+ cb_arg->cb(status, linkcfg, cb_arg->arg);
+ }
+
+ /* if polling, will free memory in calling function */
+ if (cb_arg->opts != OCS_CMD_POLL) {
+ ocs_dma_free(hw->os, &cb_arg->dma_cmd);
+ ocs_dma_free(hw->os, &cb_arg->dma_resp);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ }
+}
+
+/**
+ * @brief Set the Lancer dump location
+ * @par Description
+ * This function tells a Lancer chip to use a specific DMA
+ * buffer as a dump location rather than the internal flash.
+ *
+ * @param hw Hardware context.
+ * @param num_buffers The number of DMA buffers to hold the dump (1..n).
+ * @param dump_buffers DMA buffers to hold the dump.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+ocs_hw_rtn_e
+ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
+{
+ uint8_t bus, dev, func;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint8_t buf[SLI4_BMBX_SIZE];
+
+ /*
+ * Make sure the FW is new enough to support this command. If the FW
+ * is too old, the FW will UE.
+ */
+ if (hw->workaround.disable_dump_loc) {
+ ocs_log_test(hw->os, "FW version is too old for this feature\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* This command is only valid for physical port 0 */
+ ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
+ if (fdb == 0 && func != 0) {
+ ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
+ func);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
+ * We must allocate a SGL list and then pass the address of the list to the chip.
+ */
+ if (num_buffers > 1) {
+ uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
+ sli4_sge_t *sge;
+ uint32_t i;
+
+ if (hw->dump_sges.size < sge_size) {
+ ocs_dma_free(hw->os, &hw->dump_sges);
+ if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
+ ocs_log_err(hw->os, "SGE DMA allocation failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+ /* build the SGE list */
+ ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
+ hw->dump_sges.len = sge_size;
+ sge = hw->dump_sges.virt;
+ for (i = 0; i < num_buffers; i++) {
+ sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
+ sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
+ sge[i].last = (i == num_buffers - 1 ? 1 : 0);
+ sge[i].buffer_length = dump_buffers[i].size;
+ }
+ rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
+ SLI4_BMBX_SIZE, FALSE, TRUE,
+ &hw->dump_sges, fdb);
+ } else {
+ dump_buffers->len = dump_buffers->size;
+ rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
+ SLI4_BMBX_SIZE, FALSE, FALSE,
+ dump_buffers, fdb);
+ }
+
+ if (rc) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
+ NULL, NULL);
+ if (rc) {
+ ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
+ rc);
+ }
+ } else {
+ ocs_log_err(hw->os,
+ "sli_cmd_common_set_dump_location failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief Set the Ethernet license.
+ *
+ * @par Description
+ * This function sends the appropriate mailbox command (DMTF
+ * CLP) to set the Ethernet license to the given license value.
+ * Since it is used during the time of ocs_hw_init(), the mailbox
+ * command is sent via polling (the BMBX route).
+ *
+ * @param hw Hardware context.
+ * @param license 32-bit license value.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success.
+ */
+static ocs_hw_rtn_e
+ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
+ ocs_dma_t dma_cmd;
+ ocs_dma_t dma_resp;
+
+ /* only for lancer right now */
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
+ ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
+ /* allocate DMA for command */
+ if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
+ ocs_log_err(hw->os, "malloc failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
+ ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
+
+ /* allocate DMA for response */
+ if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
+ ocs_log_err(hw->os, "malloc failed\n");
+ ocs_dma_free(hw->os, &dma_cmd);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* send DMTF CLP command mbx and poll */
+ if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
+ ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
+ rc = OCS_HW_RTN_ERROR;
+ }
+
+ ocs_dma_free(hw->os, &dma_cmd);
+ ocs_dma_free(hw->os, &dma_resp);
+ return rc;
+}
+
+/**
+ * @brief Callback argument structure for the DMTF CLP commands.
+ */
+typedef struct ocs_hw_clp_cb_arg_s {
+ ocs_hw_dmtf_clp_cb_t cb;
+ ocs_dma_t *dma_resp;
+ int32_t status;
+ uint32_t opts;
+ void *arg;
+} ocs_hw_clp_cb_arg_t;
+
+/**
+ * @brief Execute the DMTF CLP command.
+ *
+ * @param hw Hardware context.
+ * @param dma_cmd DMA buffer containing the CLP command.
+ * @param dma_resp DMA buffer that will contain the response (if successful).
+ * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
+ * @param cb Callback function.
+ * @param arg Callback argument.
+ *
+ * @return Returns the number of bytes written to the response
+ * buffer on success, or a negative value if failed.
+ */
+static ocs_hw_rtn_e
+ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ ocs_hw_clp_cb_arg_t *cb_arg;
+ uint8_t *mbxdata;
+
+ /* allocate DMA for mailbox */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* allocate memory for callback argument */
+ cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+ cb_arg->dma_resp = dma_resp;
+ cb_arg->opts = opts;
+
+ /* Send the HW command */
+ if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+ dma_cmd, dma_resp)) {
+ rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
+
+ if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
+ /* if we're polling, copy response and invoke callback to
+ * parse result */
+ ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
+ ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
+
+ /* set rc to resulting or "parsed" status */
+ rc = cb_arg->status;
+ }
+
+ /* if failed, or polling, free memory here */
+ if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "ocs_hw_command failed\n");
+ }
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ }
+ } else {
+ ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ }
+
+ return rc;
+}
+
+
+/**
+ * @brief Called when the DMTF CLP command completes.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return None.
+ *
+ */
+static void
+ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ int32_t cb_status = 0;
+ sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
+ sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
+ ocs_hw_clp_cb_arg_t *cb_arg = arg;
+ uint32_t result_len = 0;
+ int32_t stat_len;
+ char stat_str[8];
+
+ /* there are several status codes here, check them all and condense
+ * into a single callback status
+ */
+ if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
+ ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n",
+ status,
+ mbox_rsp->hdr.status,
+ clp_rsp->hdr.status,
+ clp_rsp->hdr.additional_status,
+ clp_rsp->clp_status,
+ clp_rsp->clp_detailed_status);
+ if (status) {
+ cb_status = status;
+ } else if (mbox_rsp->hdr.status) {
+ cb_status = mbox_rsp->hdr.status;
+ } else {
+ cb_status = clp_rsp->clp_status;
+ }
+ } else {
+ result_len = clp_rsp->resp_length;
+ }
+
+ if (cb_status) {
+ goto ocs_hw_cb_dmtf_clp_done;
+ }
+
+ if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
+ ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
+ cb_arg->dma_resp->size, result_len);
+ cb_status = -1;
+ goto ocs_hw_cb_dmtf_clp_done;
+ }
+
+ /* parse CLP response to get status */
+ stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
+ sizeof(stat_str),
+ cb_arg->dma_resp->virt,
+ result_len);
+
+ if (stat_len <= 0) {
+ ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
+ cb_status = -1;
+ goto ocs_hw_cb_dmtf_clp_done;
+ }
+
+ if (ocs_strcmp(stat_str, "0") != 0) {
+ ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
+ cb_status = -1;
+ goto ocs_hw_cb_dmtf_clp_done;
+ }
+
+ocs_hw_cb_dmtf_clp_done:
+
+ /* save status in cb_arg for callers with NULL cb's + polling */
+ cb_arg->status = cb_status;
+ if (cb_arg->cb) {
+ cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
+ }
+ /* if polling, caller will free memory */
+ if (cb_arg->opts != OCS_CMD_POLL) {
+ ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ }
+}
+
+/**
+ * @brief Parse the CLP result and get the value corresponding to the given
+ * keyword.
+ *
+ * @param hw Hardware context.
+ * @param keyword CLP keyword for which the value is returned.
+ * @param value Location to which the resulting value is copied.
+ * @param value_len Length of the value parameter.
+ * @param resp Pointer to the response buffer that is searched
+ * for the keyword and value.
+ * @param resp_len Length of response buffer passed in.
+ *
+ * @return Returns the number of bytes written to the value
+ * buffer on success, or a negative vaue on failure.
+ */
+static int32_t
+ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
+{
+ char *start = NULL;
+ char *end = NULL;
+
+ /* look for specified keyword in string */
+ start = ocs_strstr(resp, keyword);
+ if (start == NULL) {
+ ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
+ keyword);
+ return -1;
+ }
+
+ /* now look for '=' and go one past */
+ start = ocs_strchr(start, '=');
+ if (start == NULL) {
+ ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
+ keyword);
+ return -1;
+ }
+ start++;
+
+ /* \r\n terminates value */
+ end = ocs_strstr(start, "\r\n");
+ if (end == NULL) {
+ ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
+ keyword);
+ return -1;
+ }
+
+ /* make sure given result array is big enough */
+ if ((end - start + 1) > value_len) {
+ ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
+ value_len, (end-start));
+ return -1;
+ }
+
+ ocs_strncpy(value, start, (end - start));
+ value[end-start] = '\0';
+ return (end-start+1);
+}
+
+/**
+ * @brief Cause chip to enter an unrecoverable error state.
+ *
+ * @par Description
+ * Cause chip to enter an unrecoverable error state. This is
+ * used when detecting unexpected FW behavior so that the FW can be
+ * hwted from the driver as soon as the error is detected.
+ *
+ * @param hw Hardware context.
+ * @param dump Generate dump as part of reset.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ *
+ */
+ocs_hw_rtn_e
+ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ if (sli_raise_ue(&hw->sli, dump) != 0) {
+ rc = OCS_HW_RTN_ERROR;
+ } else {
+ if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
+ hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Called when the OBJECT_GET command completes.
+ *
+ * @par Description
+ * Get the number of bytes actually written out of the response, free the mailbox
+ * that was malloc'd by ocs_hw_dump_get(), then call the callback
+ * and pass the status and bytes read.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
+ sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
+ ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
+ uint32_t bytes_read;
+ uint8_t eof;
+
+ bytes_read = rd_obj_rsp->actual_read_length;
+ eof = rd_obj_rsp->eof;
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if ((status == 0) && mbox_rsp->hdr.status) {
+ status = mbox_rsp->hdr.status;
+ }
+ cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
+ }
+
+ ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief Read a dump image to the host.
+ *
+ * @par Description
+ * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
+ * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
+ * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
+ * and signal the caller that the read has completed.
+ *
+ * @param hw Hardware context.
+ * @param dma DMA structure to transfer the dump chunk into.
+ * @param size Size of the dump chunk.
+ * @param offset Offset, in bytes, from the beginning of the dump.
+ * @param cb Pointer to a callback function that is called when the command completes.
+ * The callback function prototype is
+ * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
+ * @param arg Pointer to be passed to the callback function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ uint8_t *mbxdata;
+ ocs_hw_dump_get_cb_arg_t *cb_arg;
+ uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
+
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
+ ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (1 != sli_dump_is_present(&hw->sli)) {
+ ocs_log_test(hw->os, "No dump is present\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (1 == sli_reset_required(&hw->sli)) {
+ ocs_log_test(hw->os, "device reset required\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+ cb_arg->mbox_cmd = mbxdata;
+
+ if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+ size, offset, "/dbg/dump.bin", dma)) {
+ rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
+ if (rc == 0 && opts == OCS_CMD_POLL) {
+ ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
+ rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
+ }
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Called when the OBJECT_DELETE command completes.
+ *
+ * @par Description
+ * Free the mailbox that was malloc'd
+ * by ocs_hw_dump_clear(), then call the callback and pass the status.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
+ sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if ((status == 0) && mbox_rsp->hdr.status) {
+ status = mbox_rsp->hdr.status;
+ }
+ cb_arg->cb(status, cb_arg->arg);
+ }
+
+ ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Clear a dump image from the device.
+ *
+ * @par Description
+ * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
+ * the dump, then sends the command with ocs_hw_command(). On completion,
+ * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
+ * and to signal the caller that the write has completed.
+ *
+ * @param hw Hardware context.
+ * @param cb Pointer to a callback function that is called when the command completes.
+ * The callback function prototype is
+ * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
+ * @param arg Pointer to be passed to the callback function.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+ uint8_t *mbxdata;
+ ocs_hw_dump_clear_cb_arg_t *cb_arg;
+ uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
+
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
+ ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+ cb_arg->mbox_cmd = mbxdata;
+
+ if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+ "/dbg/dump.bin")) {
+ rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
+ if (rc == 0 && opts == OCS_CMD_POLL) {
+ ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
+ rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
+ }
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
+ }
+
+ return rc;
+}
+
+typedef struct ocs_hw_get_port_protocol_cb_arg_s {
+ ocs_get_port_protocol_cb_t cb;
+ void *arg;
+ uint32_t pci_func;
+ ocs_dma_t payload;
+} ocs_hw_get_port_protocol_cb_arg_t;
+
+/**
+ * @brief Called for the completion of get_port_profile for a
+ * user request.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE.
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
+ uint8_t *mqe, void *arg)
+{
+ ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
+ ocs_dma_t *payload = &(cb_arg->payload);
+ sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
+ ocs_hw_port_protocol_e port_protocol;
+ int num_descriptors;
+ sli4_resource_descriptor_v1_t *desc_p;
+ sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
+ int i;
+
+ port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
+
+ num_descriptors = response->desc_count;
+ desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
+ for (i=0; i<num_descriptors; i++) {
+ if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
+ pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
+ if (pcie_desc_p->pf_number == cb_arg->pci_func) {
+ switch(pcie_desc_p->pf_type) {
+ case 0x02:
+ port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
+ break;
+ case 0x04:
+ port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
+ break;
+ case 0x10:
+ port_protocol = OCS_HW_PORT_PROTOCOL_FC;
+ break;
+ default:
+ port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
+ break;
+ }
+ }
+ }
+
+ desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
+ }
+
+ if (cb_arg->cb) {
+ cb_arg->cb(status, port_protocol, cb_arg->arg);
+
+ }
+
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ return 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Get the current port protocol.
+ * @par Description
+ * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the
+ * command completes the provided mgmt callback function is
+ * called.
+ *
+ * @param hw Hardware context.
+ * @param pci_func PCI function to query for current protocol.
+ * @param cb Callback function to be called when the command completes.
+ * @param ul_arg An argument that is passed to the callback function.
+ *
+ * @return
+ * - OCS_HW_RTN_SUCCESS on success.
+ * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
+ * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
+ * context.
+ * - OCS_HW_RTN_ERROR on any other error.
+ */
+ocs_hw_rtn_e
+ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
+ ocs_get_port_protocol_cb_t cb, void* ul_arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* Only supported on Skyhawk */
+ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = ul_arg;
+ cb_arg->pci_func = pci_func;
+
+ /* dma_mem holds the non-embedded portion */
+ if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
+ ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ }
+
+ return rc;
+
+}
+
+typedef struct ocs_hw_set_port_protocol_cb_arg_s {
+ ocs_set_port_protocol_cb_t cb;
+ void *arg;
+ ocs_dma_t payload;
+ uint32_t new_protocol;
+ uint32_t pci_func;
+} ocs_hw_set_port_protocol_cb_arg_t;
+
+/**
+ * @brief Called for the completion of set_port_profile for a
+ * user request.
+ *
+ * @par Description
+ * This is the second of two callbacks for the set_port_protocol
+ * function. The set operation is a read-modify-write. This
+ * callback is called when the write (SET_PROFILE_CONFIG)
+ * completes.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE.
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
+
+ if (cb_arg->cb) {
+ cb_arg->cb( status, cb_arg->arg);
+ }
+
+ ocs_dma_free(hw->os, &(cb_arg->payload));
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
+
+ return 0;
+}
+
+/**
+ * @brief Called for the completion of set_port_profile for a
+ * user request.
+ *
+ * @par Description
+ * This is the first of two callbacks for the set_port_protocol
+ * function. The set operation is a read-modify-write. This
+ * callback is called when the read completes
+ * (GET_PROFILE_CONFG). It will updated the resource
+ * descriptors, then queue the write (SET_PROFILE_CONFIG).
+ *
+ * On entry there are three memory areas that were allocated by
+ * ocs_hw_set_port_protocol. If a failure is detected in this
+ * function those need to be freed. If this function succeeds
+ * it allocates three more areas.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return Returns 0 on success, or a non-zero value otherwise.
+ */
+static int32_t
+ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
+ ocs_dma_t *payload = &(cb_arg->payload);
+ sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
+ int num_descriptors;
+ sli4_resource_descriptor_v1_t *desc_p;
+ sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
+ int i;
+ ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
+ ocs_hw_port_protocol_e new_protocol;
+ uint8_t *dst;
+ sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
+ uint8_t *mbxdata;
+ int pci_descriptor_count;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ int num_fcoe_ports = 0;
+ int num_iscsi_ports = 0;
+
+ new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
+
+ num_descriptors = response->desc_count;
+
+ /* Count PCI descriptors */
+ pci_descriptor_count = 0;
+ desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
+ for (i=0; i<num_descriptors; i++) {
+ if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
+ ++pci_descriptor_count;
+ }
+ desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
+ }
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
+ if (new_cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ new_cb_arg->cb = cb_arg->cb;
+ new_cb_arg->arg = cb_arg->arg;
+
+ /* Allocate memory for the descriptors we're going to send. This is
+ * one for each PCI descriptor plus one ISAP descriptor. */
+ if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
+ (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
+ sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
+ ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+ &new_cb_arg->payload,
+ 0, pci_descriptor_count+1, 1);
+
+ /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
+ dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
+
+ /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it
+ * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor
+ * that we're trying to change also set its pf_type.
+ */
+ desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
+ for (i=0; i<num_descriptors; i++) {
+ if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
+ pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
+ if (pcie_desc_p->pf_number == cb_arg->pci_func) {
+ /* This is the PCIe descriptor for this OCS instance.
+ * Update it with the new pf_type */
+ switch(new_protocol) {
+ case OCS_HW_PORT_PROTOCOL_FC:
+ pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
+ break;
+ case OCS_HW_PORT_PROTOCOL_FCOE:
+ pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
+ break;
+ case OCS_HW_PORT_PROTOCOL_ISCSI:
+ pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
+ break;
+ default:
+ pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
+ break;
+ }
+
+ }
+
+ if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
+ ++num_fcoe_ports;
+ }
+ if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
+ ++num_iscsi_ports;
+ }
+ ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
+ dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
+ }
+
+ desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
+ }
+
+ /* Create an ISAP resource descriptor */
+ isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
+ isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
+ isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
+ if (num_iscsi_ports > 0) {
+ isap_desc_p->iscsi_tgt = 1;
+ isap_desc_p->iscsi_ini = 1;
+ isap_desc_p->iscsi_dif = 1;
+ }
+ if (num_fcoe_ports > 0) {
+ isap_desc_p->fcoe_tgt = 1;
+ isap_desc_p->fcoe_ini = 1;
+ isap_desc_p->fcoe_dif = 1;
+ }
+
+ /* At this point we're done with the memory allocated by ocs_port_set_protocol */
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
+
+
+ /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
+ if (rc) {
+ ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
+ /* Call the upper level callback to report a failure */
+ if (new_cb_arg->cb) {
+ new_cb_arg->cb( rc, new_cb_arg->arg);
+ }
+
+ /* Free the memory allocated by this function */
+ ocs_dma_free(hw->os, &new_cb_arg->payload);
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
+ }
+
+
+ return rc;
+}
+
+/**
+ * @ingroup io
+ * @brief Set the port protocol.
+ * @par Description
+ * Setting the port protocol is a read-modify-write operation.
+ * This function submits a GET_PROFILE_CONFIG command to read
+ * the current settings. The callback function will modify the
+ * settings and issue the write.
+ *
+ * On successful completion this function will have allocated
+ * two regular memory areas and one dma area which will need to
+ * get freed later in the callbacks.
+ *
+ * @param hw Hardware context.
+ * @param new_protocol New protocol to use.
+ * @param pci_func PCI function to configure.
+ * @param cb Callback function to be called when the command completes.
+ * @param ul_arg An argument that is passed to the callback function.
+ *
+ * @return
+ * - OCS_HW_RTN_SUCCESS on success.
+ * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
+ * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
+ * context.
+ * - OCS_HW_RTN_ERROR on any other error.
+ */
+ocs_hw_rtn_e
+ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
+ uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+
+ /* Only supported on Skyhawk */
+ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = ul_arg;
+ cb_arg->new_protocol = new_protocol;
+ cb_arg->pci_func = pci_func;
+
+ /* dma_mem holds the non-embedded portion */
+ if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
+ ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ }
+
+ return rc;
+}
+
+typedef struct ocs_hw_get_profile_list_cb_arg_s {
+ ocs_get_profile_list_cb_t cb;
+ void *arg;
+ ocs_dma_t payload;
+} ocs_hw_get_profile_list_cb_arg_t;
+
+/**
+ * @brief Called for the completion of get_profile_list for a
+ * user request.
+ * @par Description
+ * This function is called when the COMMMON_GET_PROFILE_LIST
+ * mailbox completes. The response will be in
+ * ctx->non_embedded_mem.virt. This function parses the
+ * response and creates a ocs_hw_profile_list, then calls the
+ * mgmt_cb callback function and passes that list to it.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_profile_list_t *list;
+ ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
+ ocs_dma_t *payload = &(cb_arg->payload);
+ sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
+ int i;
+ int num_descriptors;
+
+ list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
+ list->num_descriptors = response->profile_descriptor_count;
+
+ num_descriptors = list->num_descriptors;
+ if (num_descriptors > OCS_HW_MAX_PROFILES) {
+ num_descriptors = OCS_HW_MAX_PROFILES;
+ }
+
+ for (i=0; i<num_descriptors; i++) {
+ list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
+ list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
+ ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
+ }
+
+ if (cb_arg->cb) {
+ cb_arg->cb(status, list, cb_arg->arg);
+ } else {
+ ocs_free(hw->os, list, sizeof(*list));
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
+
+ return 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Get a list of available profiles.
+ * @par Description
+ * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the
+ * command completes the provided mgmt callback function is
+ * called.
+ *
+ * @param hw Hardware context.
+ * @param cb Callback function to be called when the
+ * command completes.
+ * @param ul_arg An argument that is passed to the callback
+ * function.
+ *
+ * @return
+ * - OCS_HW_RTN_SUCCESS on success.
+ * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
+ * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
+ * context.
+ * - OCS_HW_RTN_ERROR on any other error.
+ */
+ocs_hw_rtn_e
+ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_get_profile_list_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* Only supported on Skyhawk */
+ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = ul_arg;
+
+ /* dma_mem holds the non-embedded portion */
+ if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
+ ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_dma_free(hw->os, &cb_arg->payload);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
+ }
+
+ return rc;
+}
+
+typedef struct ocs_hw_get_active_profile_cb_arg_s {
+ ocs_get_active_profile_cb_t cb;
+ void *arg;
+} ocs_hw_get_active_profile_cb_arg_t;
+
+/**
+ * @brief Called for the completion of get_active_profile for a
+ * user request.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
+ sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
+ sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
+ uint32_t active_profile;
+
+ active_profile = response->active_profile_id;
+
+ if (cb_arg->cb) {
+ cb_arg->cb(status, active_profile, cb_arg->arg);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
+
+ return 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Get the currently active profile.
+ * @par Description
+ * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
+ * command completes the provided mgmt callback function is
+ * called.
+ *
+ * @param hw Hardware context.
+ * @param cb Callback function to be called when the
+ * command completes.
+ * @param ul_arg An argument that is passed to the callback
+ * function.
+ *
+ * @return
+ * - OCS_HW_RTN_SUCCESS on success.
+ * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
+ * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
+ * context.
+ * - OCS_HW_RTN_ERROR on any other error.
+ */
+int32_t
+ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_get_active_profile_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* Only supported on Skyhawk */
+ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = ul_arg;
+
+ if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
+ }
+
+ return rc;
+}
+
+typedef struct ocs_hw_get_nvparms_cb_arg_s {
+ ocs_get_nvparms_cb_t cb;
+ void *arg;
+} ocs_hw_get_nvparms_cb_arg_t;
+
+/**
+ * @brief Called for the completion of get_nvparms for a
+ * user request.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE.
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
+ sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
+
+ if (cb_arg->cb) {
+ cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
+ mbox_rsp->preferred_d_id, cb_arg->arg);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
+
+ return 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Read non-volatile parms.
+ * @par Description
+ * Issues a SLI-4 READ_NVPARMS mailbox. When the
+ * command completes the provided mgmt callback function is
+ * called.
+ *
+ * @param hw Hardware context.
+ * @param cb Callback function to be called when the
+ * command completes.
+ * @param ul_arg An argument that is passed to the callback
+ * function.
+ *
+ * @return
+ * - OCS_HW_RTN_SUCCESS on success.
+ * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
+ * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
+ * context.
+ * - OCS_HW_RTN_ERROR on any other error.
+ */
+int32_t
+ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_get_nvparms_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = ul_arg;
+
+ if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "READ_NVPARMS failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
+ }
+
+ return rc;
+}
+
+typedef struct ocs_hw_set_nvparms_cb_arg_s {
+ ocs_set_nvparms_cb_t cb;
+ void *arg;
+} ocs_hw_set_nvparms_cb_arg_t;
+
+/**
+ * @brief Called for the completion of set_nvparms for a
+ * user request.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE.
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
+
+ if (cb_arg->cb) {
+ cb_arg->cb(status, cb_arg->arg);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
+
+ return 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Write non-volatile parms.
+ * @par Description
+ * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
+ * command completes the provided mgmt callback function is
+ * called.
+ *
+ * @param hw Hardware context.
+ * @param cb Callback function to be called when the
+ * command completes.
+ * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
+ * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
+ * @param hard_alpa A hard AL_PA address setting used during loop
+ * initialization. If no hard AL_PA is required, set to 0.
+ * @param preferred_d_id A preferred D_ID address setting
+ * that may be overridden with the CONFIG_LINK mailbox command.
+ * If there is no preference, set to 0.
+ * @param ul_arg An argument that is passed to the callback
+ * function.
+ *
+ * @return
+ * - OCS_HW_RTN_SUCCESS on success.
+ * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
+ * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
+ * context.
+ * - OCS_HW_RTN_ERROR on any other error.
+ */
+int32_t
+ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
+ uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_set_nvparms_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = ul_arg;
+
+ if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "SET_NVPARMS failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
+ }
+
+ return rc;
+}
+
+
+
+/**
+ * @brief Called to obtain the count for the specified type.
+ *
+ * @param hw Hardware context.
+ * @param io_count_type IO count type (inuse, free, wait_free).
+ *
+ * @return Returns the number of IOs on the specified list type.
+ */
+uint32_t
+ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
+{
+ ocs_hw_io_t *io = NULL;
+ uint32_t count = 0;
+
+ ocs_lock(&hw->io_lock);
+
+ switch (io_count_type) {
+ case OCS_HW_IO_INUSE_COUNT :
+ ocs_list_foreach(&hw->io_inuse, io) {
+ count++;
+ }
+ break;
+ case OCS_HW_IO_FREE_COUNT :
+ ocs_list_foreach(&hw->io_free, io) {
+ count++;
+ }
+ break;
+ case OCS_HW_IO_WAIT_FREE_COUNT :
+ ocs_list_foreach(&hw->io_wait_free, io) {
+ count++;
+ }
+ break;
+ case OCS_HW_IO_PORT_OWNED_COUNT:
+ ocs_list_foreach(&hw->io_port_owned, io) {
+ count++;
+ }
+ break;
+ case OCS_HW_IO_N_TOTAL_IO_COUNT :
+ count = hw->config.n_io;
+ break;
+ }
+
+ ocs_unlock(&hw->io_lock);
+
+ return count;
+}
+
+/**
+ * @brief Called to obtain the count of produced RQs.
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns the number of RQs produced.
+ */
+uint32_t
+ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
+{
+ uint32_t count = 0;
+ uint32_t i;
+ uint32_t j;
+
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ hw_rq_t *rq = hw->hw_rq[i];
+ if (rq->rq_tracker != NULL) {
+ for (j = 0; j < rq->entry_count; j++) {
+ if (rq->rq_tracker[j] != NULL) {
+ count++;
+ }
+ }
+ }
+ }
+
+ return count;
+}
+
+typedef struct ocs_hw_set_active_profile_cb_arg_s {
+ ocs_set_active_profile_cb_t cb;
+ void *arg;
+} ocs_hw_set_active_profile_cb_arg_t;
+
+/**
+ * @brief Called for the completion of set_active_profile for a
+ * user request.
+ *
+ * @param hw Hardware context.
+ * @param status The status from the MQE
+ * @param mqe Pointer to mailbox command buffer.
+ * @param arg Pointer to a callback argument.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
+
+ if (cb_arg->cb) {
+ cb_arg->cb(status, cb_arg->arg);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
+
+ return 0;
+}
+
+/**
+ * @ingroup io
+ * @brief Set the currently active profile.
+ * @par Description
+ * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
+ * command completes the provided mgmt callback function is
+ * called.
+ *
+ * @param hw Hardware context.
+ * @param profile_id Profile ID to activate.
+ * @param cb Callback function to be called when the command completes.
+ * @param ul_arg An argument that is passed to the callback function.
+ *
+ * @return
+ * - OCS_HW_RTN_SUCCESS on success.
+ * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
+ * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
+ * context.
+ * - OCS_HW_RTN_ERROR on any other error.
+ */
+int32_t
+ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
+{
+ uint8_t *mbxdata;
+ ocs_hw_set_active_profile_cb_arg_t *cb_arg;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* Only supported on Skyhawk */
+ if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* mbxdata holds the header of the command */
+ mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mbxdata == NULL) {
+ ocs_log_err(hw->os, "failed to malloc mbox\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+
+ /* cb_arg holds the data that will be passed to the callback on completion */
+ cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
+ if (cb_arg == NULL) {
+ ocs_log_err(hw->os, "failed to malloc cb_arg\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ cb_arg->cb = cb;
+ cb_arg->arg = ul_arg;
+
+ if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
+ rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
+ ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
+ ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
+ }
+
+ return rc;
+}
+
+
+
+/*
+ * Private functions
+ */
+
+/**
+ * @brief Update the queue hash with the ID and index.
+ *
+ * @param hash Pointer to hash table.
+ * @param id ID that was created.
+ * @param index The index into the hash object.
+ */
+static void
+ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
+{
+ uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the number of queues, then we
+ * never have to worry about an infinite loop.
+ */
+ while(hash[hash_index].in_use) {
+ hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
+ }
+
+ /* not used, claim the entry */
+ hash[hash_index].id = id;
+ hash[hash_index].in_use = 1;
+ hash[hash_index].index = index;
+}
+
+/**
+ * @brief Find index given queue ID.
+ *
+ * @param hash Pointer to hash table.
+ * @param id ID to find.
+ *
+ * @return Returns the index into the HW cq array or -1 if not found.
+ */
+int32_t
+ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
+{
+ int32_t rc = -1;
+ int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the maximum number of Qs, then we
+ * never have to worry about an infinite loop. We will always find an
+ * unused entry.
+ */
+ do {
+ if (hash[index].in_use &&
+ hash[index].id == id) {
+ rc = hash[index].index;
+ } else {
+ index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
+ }
+ } while(rc == -1 && hash[index].in_use);
+
+ return rc;
+}
+
+static int32_t
+ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
+{
+ int32_t rc = OCS_HW_RTN_ERROR;
+ uint16_t fcfi = UINT16_MAX;
+
+ if ((hw == NULL) || (domain == NULL)) {
+ ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
+ hw, domain);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ fcfi = domain->fcf_indicator;
+
+ if (fcfi < SLI4_MAX_FCFI) {
+ uint16_t fcf_index = UINT16_MAX;
+
+ ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
+ domain, fcfi);
+ hw->domains[fcfi] = domain;
+
+ /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
+ if (hw->workaround.override_fcfi) {
+ if (hw->first_domain_idx < 0) {
+ hw->first_domain_idx = fcfi;
+ }
+ }
+
+ fcf_index = domain->fcf;
+
+ if (fcf_index < SLI4_MAX_FCF_INDEX) {
+ ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
+ fcf_index, fcfi);
+ hw->fcf_index_fcfi[fcf_index] = fcfi;
+ rc = OCS_HW_RTN_SUCCESS;
+ } else {
+ ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
+ fcf_index, SLI4_MAX_FCF_INDEX);
+ hw->domains[fcfi] = NULL;
+ }
+ } else {
+ ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
+ fcfi, SLI4_MAX_FCFI);
+ }
+
+ return rc;
+}
+
+static int32_t
+ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
+{
+ int32_t rc = OCS_HW_RTN_ERROR;
+ uint16_t fcfi = UINT16_MAX;
+
+ if ((hw == NULL) || (domain == NULL)) {
+ ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
+ hw, domain);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ fcfi = domain->fcf_indicator;
+
+ if (fcfi < SLI4_MAX_FCFI) {
+ uint16_t fcf_index = UINT16_MAX;
+
+ ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
+ domain, fcfi);
+
+ if (domain != hw->domains[fcfi]) {
+ ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
+ domain, hw->domains[fcfi]);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ hw->domains[fcfi] = NULL;
+
+ /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
+ if (hw->workaround.override_fcfi) {
+ if (hw->first_domain_idx == fcfi) {
+ hw->first_domain_idx = -1;
+ }
+ }
+
+ fcf_index = domain->fcf;
+
+ if (fcf_index < SLI4_MAX_FCF_INDEX) {
+ if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
+ hw->fcf_index_fcfi[fcf_index] = 0;
+ rc = OCS_HW_RTN_SUCCESS;
+ } else {
+ ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
+ hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
+ }
+ } else {
+ ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
+ fcf_index, SLI4_MAX_FCF_INDEX);
+ }
+ } else {
+ ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
+ fcfi, SLI4_MAX_FCFI);
+ }
+
+ return rc;
+}
+
+ocs_domain_t *
+ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
+{
+
+ if (hw == NULL) {
+ ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
+ return NULL;
+ }
+
+ if (fcfi < SLI4_MAX_FCFI) {
+ return hw->domains[fcfi];
+ } else {
+ ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
+ fcfi, SLI4_MAX_FCFI);
+ return NULL;
+ }
+}
+
+static ocs_domain_t *
+ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
+{
+
+ if (hw == NULL) {
+ ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
+ return NULL;
+ }
+
+ if (fcf_index < SLI4_MAX_FCF_INDEX) {
+ return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
+ } else {
+ ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
+ fcf_index, SLI4_MAX_FCF_INDEX);
+ return NULL;
+ }
+}
+
+/**
+ * @brief Quaratine an IO by taking a reference count and adding it to the
+ * quarantine list. When the IO is popped from the list then the
+ * count is released and the IO MAY be freed depending on whether
+ * it is still referenced by the IO.
+ *
+ * @n @b Note: BZ 160124 - If this is a target write or an initiator read using
+ * DIF, then we must add the XRI to a quarantine list until we receive
+ * 4 more completions of this same type.
+ *
+ * @param hw Hardware context.
+ * @param wq Pointer to the WQ associated with the IO object to quarantine.
+ * @param io Pointer to the io object to quarantine.
+ */
+static void
+ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
+{
+ ocs_quarantine_info_t *q_info = &wq->quarantine_info;
+ uint32_t index;
+ ocs_hw_io_t *free_io = NULL;
+
+ /* return if the QX bit was clear */
+ if (!io->quarantine) {
+ return;
+ }
+
+ /* increment the IO refcount to prevent it from being freed before the quarantine is over */
+ if (ocs_ref_get_unless_zero(&io->ref) == 0) {
+ /* command no longer active */
+ ocs_log_debug(hw ? hw->os : NULL,
+ "io not active xri=0x%x tag=0x%x\n",
+ io->indicator, io->reqtag);
+ return;
+ }
+
+ sli_queue_lock(wq->queue);
+ index = q_info->quarantine_index;
+ free_io = q_info->quarantine_ios[index];
+ q_info->quarantine_ios[index] = io;
+ q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
+ sli_queue_unlock(wq->queue);
+
+ if (free_io != NULL) {
+ ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
+ }
+}
+
+/**
+ * @brief Process entries on the given completion queue.
+ *
+ * @param hw Hardware context.
+ * @param cq Pointer to the HW completion queue object.
+ *
+ * @return None.
+ */
+void
+ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
+{
+ uint8_t cqe[sizeof(sli4_mcqe_t)];
+ uint16_t rid = UINT16_MAX;
+ sli4_qentry_e ctype; /* completion type */
+ int32_t status;
+ uint32_t n_processed = 0;
+ time_t tstart;
+ time_t telapsed;
+
+ tstart = ocs_msectime();
+
+ while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
+ status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
+ /*
+ * The sign of status is significant. If status is:
+ * == 0 : call completed correctly and the CQE indicated success
+ * > 0 : call completed correctly and the CQE indicated an error
+ * < 0 : call failed and no information is available about the CQE
+ */
+ if (status < 0) {
+ if (status == -2) {
+ /* Notification that an entry was consumed, but not completed */
+ continue;
+ }
+
+ break;
+ }
+
+ switch (ctype) {
+ case SLI_QENTRY_ASYNC:
+ CPUTRACE("async");
+ sli_cqe_async(&hw->sli, cqe);
+ break;
+ case SLI_QENTRY_MQ:
+ /*
+ * Process MQ entry. Note there is no way to determine
+ * the MQ_ID from the completion entry.
+ */
+ CPUTRACE("mq");
+ ocs_hw_mq_process(hw, status, hw->mq);
+ break;
+ case SLI_QENTRY_OPT_WRITE_CMD:
+ ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
+ break;
+ case SLI_QENTRY_OPT_WRITE_DATA:
+ ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
+ break;
+ case SLI_QENTRY_WQ:
+ CPUTRACE("wq");
+ ocs_hw_wq_process(hw, cq, cqe, status, rid);
+ break;
+ case SLI_QENTRY_WQ_RELEASE: {
+ uint32_t wq_id = rid;
+ uint32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
+ hw_wq_t *wq = hw->hw_wq[index];
+
+ /* Submit any HW IOs that are on the WQ pending list */
+ hw_wq_submit_pending(wq, wq->wqec_set_count);
+
+ break;
+ }
+
+ case SLI_QENTRY_RQ:
+ CPUTRACE("rq");
+ ocs_hw_rqpair_process_rq(hw, cq, cqe);
+ break;
+ case SLI_QENTRY_XABT: {
+ CPUTRACE("xabt");
+ ocs_hw_xabt_process(hw, cq, cqe, rid);
+ break;
+
+ }
+ default:
+ ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
+ break;
+ }
+
+ n_processed++;
+ if (n_processed == cq->queue->proc_limit) {
+ break;
+ }
+
+ if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
+ sli_queue_arm(&hw->sli, cq->queue, FALSE);
+ }
+ }
+
+ sli_queue_arm(&hw->sli, cq->queue, TRUE);
+
+ if (n_processed > cq->queue->max_num_processed) {
+ cq->queue->max_num_processed = n_processed;
+ }
+ telapsed = ocs_msectime() - tstart;
+ if (telapsed > cq->queue->max_process_time) {
+ cq->queue->max_process_time = telapsed;
+ }
+}
+
+/**
+ * @brief Process WQ completion queue entries.
+ *
+ * @param hw Hardware context.
+ * @param cq Pointer to the HW completion queue object.
+ * @param cqe Pointer to WQ completion queue.
+ * @param status Completion status.
+ * @param rid Resource ID (IO tag).
+ *
+ * @return none
+ */
+void
+ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
+{
+ hw_wq_callback_t *wqcb;
+
+ ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
+ ((cq->queue->index - 1) & (cq->queue->length - 1)));
+
+ if(rid == OCS_HW_REQUE_XRI_REGTAG) {
+ if(status) {
+ ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
+ }
+ return;
+ }
+
+ wqcb = ocs_hw_reqtag_get_instance(hw, rid);
+ if (wqcb == NULL) {
+ ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
+ return;
+ }
+
+ if (wqcb->callback == NULL) {
+ ocs_log_err(hw->os, "wqcb callback is NULL\n");
+ return;
+ }
+
+ (*wqcb->callback)(wqcb->arg, cqe, status);
+}
+
+/**
+ * @brief Process WQ completions for IO requests
+ *
+ * @param arg Generic callback argument
+ * @param cqe Pointer to completion queue entry
+ * @param status Completion status
+ *
+ * @par Description
+ * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
+ * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
+ *
+ * @return None.
+ */
+static void
+ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
+{
+ ocs_hw_io_t *io = arg;
+ ocs_hw_t *hw = io->hw;
+ sli4_fc_wcqe_t *wcqe = (void *)cqe;
+ uint32_t len = 0;
+ uint32_t ext = 0;
+ uint8_t out_of_order_axr_cmd = 0;
+ uint8_t out_of_order_axr_data = 0;
+ uint8_t lock_taken = 0;
+#if defined(OCS_DISC_SPIN_DELAY)
+ uint32_t delay = 0;
+ char prop_buf[32];
+#endif
+
+ /*
+ * For the primary IO, this will also be used for the
+ * response. So it is important to only set/clear this
+ * flag on the first data phase of the IO because
+ * subsequent phases will be done on the secondary XRI.
+ */
+ if (io->quarantine && io->quarantine_first_phase) {
+ io->quarantine = (wcqe->qx == 1);
+ ocs_hw_io_quarantine(hw, io->wq, io);
+ }
+ io->quarantine_first_phase = FALSE;
+
+ /* BZ 161832 - free secondary HW IO */
+ if (io->sec_hio != NULL &&
+ io->sec_hio->quarantine) {
+ /*
+ * If the quarantine flag is set on the
+ * IO, then set it on the secondary IO
+ * based on the quarantine XRI (QX) bit
+ * sent by the FW.
+ */
+ io->sec_hio->quarantine = (wcqe->qx == 1);
+ /* use the primary io->wq because it is not set on the secondary IO. */
+ ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
+ }
+
+ ocs_hw_remove_io_timed_wqe(hw, io);
+
+ /* clear xbusy flag if WCQE[XB] is clear */
+ if (io->xbusy && wcqe->xb == 0) {
+ io->xbusy = FALSE;
+ }
+
+ /* get extended CQE status */
+ switch (io->type) {
+ case OCS_HW_BLS_ACC:
+ case OCS_HW_BLS_ACC_SID:
+ break;
+ case OCS_HW_ELS_REQ:
+ sli_fc_els_did(&hw->sli, cqe, &ext);
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case OCS_HW_ELS_RSP:
+ case OCS_HW_ELS_RSP_SID:
+ case OCS_HW_FC_CT_RSP:
+ break;
+ case OCS_HW_FC_CT:
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case OCS_HW_IO_TARGET_WRITE:
+ len = sli_fc_io_length(&hw->sli, cqe);
+#if defined(OCS_DISC_SPIN_DELAY)
+ if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
+ delay = ocs_strtoul(prop_buf, 0, 0);
+ ocs_udelay(delay);
+ }
+#endif
+ break;
+ case OCS_HW_IO_TARGET_READ:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ /*
+ * if_type == 2 seems to return 0 "total length placed" on
+ * FCP_TSEND64_WQE completions. If this appears to happen,
+ * use the CTIO data transfer length instead.
+ */
+ if (hw->workaround.retain_tsend_io_length && !len && !status) {
+ len = io->length;
+ }
+
+ break;
+ case OCS_HW_IO_TARGET_RSP:
+ if(io->is_port_owned) {
+ ocs_lock(&io->axr_lock);
+ lock_taken = 1;
+ if(io->axr_buf->call_axr_cmd) {
+ out_of_order_axr_cmd = 1;
+ }
+ if(io->axr_buf->call_axr_data) {
+ out_of_order_axr_data = 1;
+ }
+ }
+ break;
+ case OCS_HW_IO_INITIATOR_READ:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case OCS_HW_IO_INITIATOR_WRITE:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case OCS_HW_IO_INITIATOR_NODATA:
+ break;
+ case OCS_HW_IO_DNRX_REQUEUE:
+ /* release the count for re-posting the buffer */
+ //ocs_hw_io_free(hw, io);
+ break;
+ default:
+ ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
+ io->type, io->indicator);
+ break;
+ }
+ if (status) {
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
+ * abort exchange if an error occurred and exchange is still busy.
+ */
+ if (hw->config.i_only_aab &&
+ (ocs_hw_iotype_is_originator(io->type)) &&
+ (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
+ ocs_hw_rtn_e rc;
+
+ ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
+ io->indicator, io->reqtag);
+ /*
+ * Because the initiator will not issue another IO phase, then it is OK to to issue the
+ * callback on the abort completion, but for consistency with the target, wait for the
+ * XRI_ABORTED CQE to issue the IO callback.
+ */
+ rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
+
+ if (rc == OCS_HW_RTN_SUCCESS) {
+ /* latch status to return after abort is complete */
+ io->status_saved = 1;
+ io->saved_status = status;
+ io->saved_ext = ext;
+ io->saved_len = len;
+ goto exit_ocs_hw_wq_process_io;
+ } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
+ /*
+ * Already being aborted by someone else (ABTS
+ * perhaps). Just fall through and return original
+ * error.
+ */
+ ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
+ io->indicator, io->reqtag);
+
+ } else {
+ /* Failed to abort for some other reason, log error */
+ ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
+ io->indicator, io->reqtag, rc);
+ }
+ }
+
+ /*
+ * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
+ */
+ if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
+ ocs_hw_rtn_e rc;
+
+ ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
+
+ /*
+ * Because targets may send a response when the IO completes using the same XRI, we must
+ * wait for the XRI_ABORTED CQE to issue the IO callback
+ */
+ rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS) {
+ /* latch status to return after abort is complete */
+ io->status_saved = 1;
+ io->saved_status = status;
+ io->saved_ext = ext;
+ io->saved_len = len;
+ goto exit_ocs_hw_wq_process_io;
+ } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
+ /*
+ * Already being aborted by someone else (ABTS
+ * perhaps). Just fall through and return original
+ * error.
+ */
+ ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
+ io->indicator, io->reqtag);
+
+ } else {
+ /* Failed to abort for some other reason, log error */
+ ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
+ io->indicator, io->reqtag, rc);
+ }
+ }
+ }
+ /* BZ 161832 - free secondary HW IO */
+ if (io->sec_hio != NULL) {
+ ocs_hw_io_free(hw, io->sec_hio);
+ io->sec_hio = NULL;
+ }
+
+ if (io->done != NULL) {
+ ocs_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ io->done = NULL;
+
+ if (io->status_saved) {
+ /* use latched status if exists */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = 0;
+ }
+
+ /* Restore default SGL */
+ ocs_hw_io_restore_sgl(hw, io);
+ done(io, io->rnode, len, status, ext, arg);
+ }
+
+ if(out_of_order_axr_cmd) {
+ /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
+ if (hw->config.bounce) {
+ fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
+ uint32_t s_id = fc_be24toh(hdr->s_id);
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+ uint32_t ox_id = ocs_be16toh(hdr->ox_id);
+ if (hw->callback.bounce != NULL) {
+ (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
+ }
+ }else {
+ hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
+ }
+
+ if(out_of_order_axr_data) {
+ /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
+ if (hw->config.bounce) {
+ fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
+ uint32_t s_id = fc_be24toh(hdr->s_id);
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+ uint32_t ox_id = ocs_be16toh(hdr->ox_id);
+ if (hw->callback.bounce != NULL) {
+ (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
+ }
+ }else {
+ hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
+ }
+ }
+ }
+
+exit_ocs_hw_wq_process_io:
+ if(lock_taken) {
+ ocs_unlock(&io->axr_lock);
+ }
+}
+
+/**
+ * @brief Process WQ completions for abort requests.
+ *
+ * @param arg Generic callback argument.
+ * @param cqe Pointer to completion queue entry.
+ * @param status Completion status.
+ *
+ * @return None.
+ */
+static void
+ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
+{
+ ocs_hw_io_t *io = arg;
+ ocs_hw_t *hw = io->hw;
+ uint32_t ext = 0;
+ uint32_t len = 0;
+ hw_wq_callback_t *wqcb;
+
+ /*
+ * For IOs that were aborted internally, we may need to issue the callback here depending
+ * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
+ * issue the callback now.
+ */
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
+ ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
+ io->done != NULL) {
+ ocs_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ io->done = NULL;
+
+ /*
+ * Use latched status as this is always saved for an internal abort
+ *
+ * Note: We wont have both a done and abort_done function, so don't worry about
+ * clobbering the len, status and ext fields.
+ */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = 0;
+ done(io, io->rnode, len, status, ext, arg);
+ }
+
+ if (io->abort_done != NULL) {
+ ocs_hw_done_t done = io->abort_done;
+ void *arg = io->abort_arg;
+
+ io->abort_done = NULL;
+
+ done(io, io->rnode, len, status, ext, arg);
+ }
+ ocs_lock(&hw->io_abort_lock);
+ /* clear abort bit to indicate abort is complete */
+ io->abort_in_progress = 0;
+ ocs_unlock(&hw->io_abort_lock);
+
+ /* Free the WQ callback */
+ ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
+ wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
+ ocs_hw_reqtag_free(hw, wqcb);
+
+ /*
+ * Call ocs_hw_io_free() because this releases the WQ reservation as
+ * well as doing the refcount put. Don't duplicate the code here.
+ */
+ (void)ocs_hw_io_free(hw, io);
+}
+
+/**
+ * @brief Process XABT completions
+ *
+ * @param hw Hardware context.
+ * @param cq Pointer to the HW completion queue object.
+ * @param cqe Pointer to WQ completion queue.
+ * @param rid Resource ID (IO tag).
+ *
+ *
+ * @return None.
+ */
+void
+ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
+{
+ /* search IOs wait free list */
+ ocs_hw_io_t *io = NULL;
+
+ io = ocs_hw_io_lookup(hw, rid);
+
+ ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
+ ((cq->queue->index - 1) & (cq->queue->length - 1)));
+ if (io == NULL) {
+ /* IO lookup failure should never happen */
+ ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
+ return;
+ }
+
+ if (!io->xbusy) {
+ ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
+ } else {
+ /* mark IO as no longer busy */
+ io->xbusy = FALSE;
+ }
+
+ if (io->is_port_owned) {
+ ocs_lock(&hw->io_lock);
+ /* Take reference so that below callback will not free io before reque */
+ ocs_ref_get(&io->ref);
+ ocs_unlock(&hw->io_lock);
+ }
+
+
+
+ /* For IOs that were aborted internally, we need to issue any pending callback here. */
+ if (io->done != NULL) {
+ ocs_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ /* Use latched status as this is always saved for an internal abort */
+ int32_t status = io->saved_status;
+ uint32_t len = io->saved_len;
+ uint32_t ext = io->saved_ext;
+
+ io->done = NULL;
+ io->status_saved = 0;
+
+ done(io, io->rnode, len, status, ext, arg);
+ }
+
+ /* Check to see if this is a port owned XRI */
+ if (io->is_port_owned) {
+ ocs_lock(&hw->io_lock);
+ ocs_hw_reque_xri(hw, io);
+ ocs_unlock(&hw->io_lock);
+ /* Not hanlding reque xri completion, free io */
+ ocs_hw_io_free(hw, io);
+ return;
+ }
+
+ ocs_lock(&hw->io_lock);
+ if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
+ /* if on wait_free list, caller has already freed IO;
+ * remove from wait_free list and add to free list.
+ * if on in-use list, already marked as no longer busy;
+ * just leave there and wait for caller to free.
+ */
+ if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
+ io->state = OCS_HW_IO_STATE_FREE;
+ ocs_list_remove(&hw->io_wait_free, io);
+ ocs_hw_io_free_move_correct_list(hw, io);
+ }
+ }
+ ocs_unlock(&hw->io_lock);
+}
+
+/**
+ * @brief Adjust the number of WQs and CQs within the HW.
+ *
+ * @par Description
+ * Calculates the number of WQs and associated CQs needed in the HW based on
+ * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
+ * MQ.
+ *
+ * @param hw Hardware context allocated by the caller.
+ */
+static void
+ocs_hw_adjust_wqs(ocs_hw_t *hw)
+{
+ uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
+ uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
+ uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
+
+ /*
+ * possibly adjust the the size of the WQs so that the CQ is twice as
+ * big as the WQ to allow for 2 completions per IO. This allows us to
+ * handle multi-phase as well as aborts.
+ */
+ if (max_cq_entries < max_wq_entries * 2) {
+ max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
+ }
+
+ /*
+ * Calculate the number of WQs to use base on the number of IOs.
+ *
+ * Note: We need to reserve room for aborts which must be sent down
+ * the same WQ as the IO. So we allocate enough WQ space to
+ * handle 2 times the number of IOs. Half of the space will be
+ * used for normal IOs and the other hwf is reserved for aborts.
+ */
+ hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
+
+ /*
+ * For performance reasons, it is best to use use a minimum of 4 WQs
+ * for BE3 and Skyhawk.
+ */
+ if (hw->config.n_wq < 4 &&
+ SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
+ hw->config.n_wq = 4;
+ }
+
+ /*
+ * For dual-chute support, we need to have at least one WQ per chute.
+ */
+ if (hw->config.n_wq < 2 &&
+ ocs_hw_get_num_chutes(hw) > 1) {
+ hw->config.n_wq = 2;
+ }
+
+ /* make sure we haven't exceeded the max supported in the HW */
+ if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
+ hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
+ }
+
+ /* make sure we haven't exceeded the chip maximum */
+ if (hw->config.n_wq > max_wq_num) {
+ hw->config.n_wq = max_wq_num;
+ }
+
+ /*
+ * Using Queue Topology string, we divide by number of chutes
+ */
+ hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
+}
+
+static int32_t
+ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
+{
+ ocs_command_ctx_t *ctx = NULL;
+
+ ocs_lock(&hw->cmd_lock);
+ if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
+ ocs_log_err(hw->os, "XXX no command context?!?\n");
+ ocs_unlock(&hw->cmd_lock);
+ return -1;
+ }
+
+ hw->cmd_head_count--;
+
+ /* Post any pending requests */
+ ocs_hw_cmd_submit_pending(hw);
+
+ ocs_unlock(&hw->cmd_lock);
+
+ if (ctx->cb) {
+ if (ctx->buf) {
+ ocs_memcpy(ctx->buf, mqe, size);
+ }
+ ctx->cb(hw, status, ctx->buf, ctx->arg);
+ }
+
+ ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
+ ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
+
+ return 0;
+}
+
+
+
+
+/**
+ * @brief Process entries on the given mailbox queue.
+ *
+ * @param hw Hardware context.
+ * @param status CQE status.
+ * @param mq Pointer to the mailbox queue object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
+{
+ uint8_t mqe[SLI4_BMBX_SIZE];
+
+ if (!sli_queue_read(&hw->sli, mq, mqe)) {
+ ocs_hw_command_process(hw, status, mqe, mq->size);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Read a FCF table entry.
+ *
+ * @param hw Hardware context.
+ * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
+ * read and the next_index field from the FCOE_READ_FCF_TABLE command
+ * for subsequent reads.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static ocs_hw_rtn_e
+ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
+{
+ uint8_t *buf = NULL;
+ int32_t rc = OCS_HW_RTN_ERROR;
+
+ buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (!buf) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
+ index)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
+ ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Callback function for the FCOE_READ_FCF_TABLE command.
+ *
+ * @par Description
+ * Note that the caller has allocated:
+ * - DMA memory to hold the table contents
+ * - DMA memory structure
+ * - Command/results buffer
+ * .
+ * Each of these must be freed here.
+ *
+ * @param hw Hardware context.
+ * @param status Hardware status.
+ * @param mqe Pointer to the mailbox command/results buffer.
+ * @param arg Pointer to the DMA memory structure.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_dma_t *dma = arg;
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+
+ if (status || hdr->status) {
+ ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
+ status, hdr->status);
+ } else if (dma->virt) {
+ sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
+
+ /* if FC or FCOE and FCF entry valid, process it */
+ if (read_fcf->fcf_entry.fc ||
+ (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
+ if (hw->callback.domain != NULL) {
+ ocs_domain_record_t drec = {0};
+
+ if (read_fcf->fcf_entry.fc) {
+ /*
+ * This is a pseudo FCF entry. Create a domain
+ * record based on the read topology information
+ */
+ drec.speed = hw->link.speed;
+ drec.fc_id = hw->link.fc_id;
+ drec.is_fc = TRUE;
+ if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
+ drec.is_loop = TRUE;
+ ocs_memcpy(drec.map.loop, hw->link.loop_map,
+ sizeof(drec.map.loop));
+ } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
+ drec.is_nport = TRUE;
+ }
+ } else {
+ drec.index = read_fcf->fcf_entry.fcf_index;
+ drec.priority = read_fcf->fcf_entry.fip_priority;
+
+ /* copy address, wwn and vlan_bitmap */
+ ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
+ sizeof(drec.address));
+ ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
+ sizeof(drec.wwn));
+ ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
+ sizeof(drec.map.vlan));
+
+ drec.is_ethernet = TRUE;
+ drec.is_nport = TRUE;
+ }
+
+ hw->callback.domain(hw->args.domain,
+ OCS_HW_DOMAIN_FOUND,
+ &drec);
+ }
+ } else {
+ /* if FCOE and FCF is not valid, ignore it */
+ ocs_log_test(hw->os, "ignore invalid FCF entry\n");
+ }
+
+ if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
+ ocs_hw_read_fcf(hw, read_fcf->next_index);
+ }
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ //ocs_dma_free(hw->os, dma);
+ //ocs_free(hw->os, dma, sizeof(ocs_dma_t));
+
+ return 0;
+}
+
+/**
+ * @brief Callback function for the SLI link events.
+ *
+ * @par Description
+ * This function allocates memory which must be freed in its callback.
+ *
+ * @param ctx Hardware context pointer (that is, ocs_hw_t *).
+ * @param e Event structure pointer (that is, sli4_link_event_t *).
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_hw_cb_link(void *ctx, void *e)
+{
+ ocs_hw_t *hw = ctx;
+ sli4_link_event_t *event = e;
+ ocs_domain_t *d = NULL;
+ uint32_t i = 0;
+ int32_t rc = OCS_HW_RTN_ERROR;
+ ocs_t *ocs = hw->os;
+
+ ocs_hw_link_event_init(hw);
+
+ switch (event->status) {
+ case SLI_LINK_STATUS_UP:
+
+ hw->link = *event;
+
+ if (SLI_LINK_TOPO_NPORT == event->topology) {
+ device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
+ ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
+ } else if (SLI_LINK_TOPO_LOOP == event->topology) {
+ uint8_t *buf = NULL;
+ device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
+
+ buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (!buf) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ break;
+ }
+
+ if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
+ rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
+ }
+
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
+ ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
+ }
+ } else {
+ device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
+ event->topology, event->speed);
+ }
+ break;
+ case SLI_LINK_STATUS_DOWN:
+ device_printf(ocs->dev, "Link Down\n");
+
+ hw->link.status = event->status;
+
+ for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
+ if (d != NULL &&
+ hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
+ }
+ }
+ break;
+ default:
+ ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
+ break;
+ }
+
+ return 0;
+}
+
+static int32_t
+ocs_hw_cb_fip(void *ctx, void *e)
+{
+ ocs_hw_t *hw = ctx;
+ ocs_domain_t *domain = NULL;
+ sli4_fip_event_t *event = e;
+
+ /* Find the associated domain object */
+ if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
+ ocs_domain_t *d = NULL;
+ uint32_t i = 0;
+
+ /* Clear VLINK is different from the other FIP events as it passes back
+ * a VPI instead of a FCF index. Check all attached SLI ports for a
+ * matching VPI */
+ for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
+ if (d != NULL) {
+ ocs_sport_t *sport = NULL;
+
+ ocs_list_foreach(&d->sport_list, sport) {
+ if (sport->indicator == event->index) {
+ domain = d;
+ break;
+ }
+ }
+
+ if (domain != NULL) {
+ break;
+ }
+ }
+ }
+ } else {
+ domain = ocs_hw_domain_get_indexed(hw, event->index);
+ }
+
+ switch (event->type) {
+ case SLI4_FCOE_FIP_FCF_DISCOVERED:
+ ocs_hw_read_fcf(hw, event->index);
+ break;
+ case SLI4_FCOE_FIP_FCF_DEAD:
+ if (domain != NULL &&
+ hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
+ }
+ break;
+ case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
+ if (domain != NULL &&
+ hw->callback.domain != NULL) {
+ /*
+ * We will want to issue rediscover FCF when this domain is free'd in order
+ * to invalidate the FCF table
+ */
+ domain->req_rediscover_fcf = TRUE;
+ hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
+ }
+ break;
+ case SLI4_FCOE_FIP_FCF_MODIFIED:
+ if (domain != NULL &&
+ hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
+ }
+
+ ocs_hw_read_fcf(hw, event->index);
+ break;
+ default:
+ ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
+ }
+
+ return 0;
+}
+
+static int32_t
+ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_remote_node_t *rnode = arg;
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+ ocs_hw_remote_node_event_e evt = 0;
+
+ if (status || hdr->status) {
+ ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
+ hdr->status);
+ ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
+ rnode->attached = FALSE;
+ ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
+ evt = OCS_HW_NODE_ATTACH_FAIL;
+ } else {
+ rnode->attached = TRUE;
+ ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
+ evt = OCS_HW_NODE_ATTACH_OK;
+ }
+
+ if (hw->callback.rnode != NULL) {
+ hw->callback.rnode(hw->args.rnode, evt, rnode);
+ }
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ return 0;
+}
+
+static int32_t
+ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_remote_node_t *rnode = arg;
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+ ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
+ int32_t rc = 0;
+
+ if (status || hdr->status) {
+ ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
+ hdr->status);
+
+ /*
+ * In certain cases, a non-zero MQE status is OK (all must be true):
+ * - node is attached
+ * - if High Login Mode is enabled, node is part of a node group
+ * - status is 0x1400
+ */
+ if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
+ (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
+ rc = -1;
+ }
+ }
+
+ if (rc == 0) {
+ rnode->node_group = FALSE;
+ rnode->attached = FALSE;
+
+ if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
+ ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
+ }
+
+ evt = OCS_HW_NODE_FREE_OK;
+ }
+
+ if (hw->callback.rnode != NULL) {
+ hw->callback.rnode(hw->args.rnode, evt, rnode);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ return rc;
+}
+
+static int32_t
+ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+ ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
+ int32_t rc = 0;
+ uint32_t i;
+
+ if (status || hdr->status) {
+ ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
+ hdr->status);
+ } else {
+ evt = OCS_HW_NODE_FREE_ALL_OK;
+ }
+
+ if (evt == OCS_HW_NODE_FREE_ALL_OK) {
+ for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
+ ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
+ }
+
+ if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
+ ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
+ rc = -1;
+ }
+ }
+
+ if (hw->callback.rnode != NULL) {
+ hw->callback.rnode(hw->args.rnode, evt, NULL);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ return rc;
+}
+
+/**
+ * @brief Initialize the pool of HW IO objects.
+ *
+ * @param hw Hardware context.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static ocs_hw_rtn_e
+ocs_hw_setup_io(ocs_hw_t *hw)
+{
+ uint32_t i = 0;
+ ocs_hw_io_t *io = NULL;
+ uintptr_t xfer_virt = 0;
+ uintptr_t xfer_phys = 0;
+ uint32_t index;
+ uint8_t new_alloc = TRUE;
+
+ if (NULL == hw->io) {
+ hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
+
+ if (NULL == hw->io) {
+ ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
+ hw->config.n_io,
+ sizeof(ocs_hw_io_t *));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ for (i = 0; i < hw->config.n_io; i++) {
+ hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
+ OCS_M_ZERO | OCS_M_NOWAIT);
+ if (hw->io[i] == NULL) {
+ ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
+ goto error;
+ }
+ }
+
+ /* Create WQE buffs for IO */
+ hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
+ OCS_M_ZERO | OCS_M_NOWAIT);
+ if (NULL == hw->wqe_buffs) {
+ ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
+ ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
+ __func__, hw->config.n_io, hw->sli.config.wqe_size);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ } else {
+ /* re-use existing IOs, including SGLs */
+ new_alloc = FALSE;
+ }
+
+ if (new_alloc) {
+ if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
+ sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
+ 4/*XXX what does this need to be? */)) {
+ ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+ xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
+ xfer_phys = hw->xfer_rdy.phys;
+
+ for (i = 0; i < hw->config.n_io; i++) {
+ hw_wq_callback_t *wqcb;
+
+ io = hw->io[i];
+
+ /* initialize IO fields */
+ io->hw = hw;
+
+ /* Assign a WQE buff */
+ io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
+
+ /* Allocate the request tag for this IO */
+ wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
+ if (wqcb == NULL) {
+ ocs_log_err(hw->os, "can't allocate request tag\n");
+ return OCS_HW_RTN_NO_RESOURCES;
+ }
+ io->reqtag = wqcb->instance_index;
+
+ /* Now for the fields that are initialized on each free */
+ ocs_hw_init_free_io(io);
+
+ /* The XB flag isn't cleared on IO free, so initialize it to zero here */
+ io->xbusy = 0;
+
+ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
+ ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
+ ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
+ ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ io->def_sgl_count = hw->config.n_sgl;
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+
+ if (hw->xfer_rdy.size) {
+ io->xfer_rdy.virt = (void *)xfer_virt;
+ io->xfer_rdy.phys = xfer_phys;
+ io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
+
+ xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
+ xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
+ }
+ }
+
+ return OCS_HW_RTN_SUCCESS;
+error:
+ for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
+ ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
+ hw->io[i] = NULL;
+ }
+
+ return OCS_HW_RTN_NO_MEMORY;
+}
+
+static ocs_hw_rtn_e
+ocs_hw_init_io(ocs_hw_t *hw)
+{
+ uint32_t i = 0, io_index = 0;
+ uint32_t prereg = 0;
+ ocs_hw_io_t *io = NULL;
+ uint8_t cmd[SLI4_BMBX_SIZE];
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+ uint32_t nremaining;
+ uint32_t n = 0;
+ uint32_t sgls_per_request = 256;
+ ocs_dma_t **sgls = NULL;
+ ocs_dma_t reqbuf = { 0 };
+
+ prereg = sli_get_sgl_preregister(&hw->sli);
+
+ if (prereg) {
+ sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
+ if (sgls == NULL) {
+ ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
+ if (rc) {
+ ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
+ ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+
+ io = hw->io[io_index];
+ for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
+ if (prereg) {
+ /* Copy address of SGL's into local sgls[] array, break out if the xri
+ * is not contiguous.
+ */
+ for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
+ /* Check that we have contiguous xri values */
+ if (n > 0) {
+ if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
+ break;
+ }
+ }
+ sgls[n] = hw->io[io_index + n]->sgl;
+ }
+
+ if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
+ io->indicator, n, sgls, NULL, &reqbuf)) {
+ if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
+ rc = OCS_HW_RTN_ERROR;
+ ocs_log_err(hw->os, "SGL post failed\n");
+ break;
+ }
+ }
+ } else {
+ n = nremaining;
+ }
+
+ /* Add to tail if successful */
+ for (i = 0; i < n; i ++) {
+ io->is_port_owned = 0;
+ io->state = OCS_HW_IO_STATE_FREE;
+ ocs_list_add_tail(&hw->io_free, io);
+ io = hw->io[io_index+1];
+ io_index++;
+ }
+ }
+
+ if (prereg) {
+ ocs_dma_free(hw->os, &reqbuf);
+ ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
+ }
+
+ return rc;
+}
+
+static int32_t
+ocs_hw_flush(ocs_hw_t *hw)
+{
+ uint32_t i = 0;
+
+ /* Process any remaining completions */
+ for (i = 0; i < hw->eq_count; i++) {
+ ocs_hw_process(hw, i, ~0);
+ }
+
+ return 0;
+}
+
+static int32_t
+ocs_hw_command_cancel(ocs_hw_t *hw)
+{
+
+ ocs_lock(&hw->cmd_lock);
+
+ /*
+ * Manually clean up remaining commands. Note: since this calls
+ * ocs_hw_command_process(), we'll also process the cmd_pending
+ * list, so no need to manually clean that out.
+ */
+ while (!ocs_list_empty(&hw->cmd_head)) {
+ uint8_t mqe[SLI4_BMBX_SIZE] = { 0 };
+ ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
+
+ ocs_log_test(hw->os, "hung command %08x\n",
+ NULL == ctx ? UINT32_MAX :
+ (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
+ ocs_unlock(&hw->cmd_lock);
+ ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
+ ocs_lock(&hw->cmd_lock);
+ }
+
+ ocs_unlock(&hw->cmd_lock);
+
+ return 0;
+}
+
+/**
+ * @brief Find IO given indicator (xri).
+ *
+ * @param hw Hal context.
+ * @param indicator Indicator (xri) to look for.
+ *
+ * @return Returns io if found, NULL otherwise.
+ */
+ocs_hw_io_t *
+ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
+{
+ uint32_t ioindex;
+ ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
+ return hw->io[ioindex];
+}
+
+/**
+ * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
+ *
+ * @param hw Hal context.
+ * @param io Pointer to the IO to cleanup.
+ */
+static void
+ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ ocs_hw_done_t done = io->done;
+ ocs_hw_done_t abort_done = io->abort_done;
+
+ /* first check active_wqe list and remove if there */
+ if (ocs_list_on_list(&io->wqe_link)) {
+ ocs_list_remove(&hw->io_timed_wqe, io);
+ }
+
+ /* Remove from WQ pending list */
+ if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
+ ocs_list_remove(&io->wq->pending_list, io);
+ }
+
+ if (io->done) {
+ void *arg = io->arg;
+
+ io->done = NULL;
+ ocs_unlock(&hw->io_lock);
+ done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
+ ocs_lock(&hw->io_lock);
+ }
+
+ if (io->abort_done != NULL) {
+ void *abort_arg = io->abort_arg;
+
+ io->abort_done = NULL;
+ ocs_unlock(&hw->io_lock);
+ abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
+ ocs_lock(&hw->io_lock);
+ }
+}
+
+static int32_t
+ocs_hw_io_cancel(ocs_hw_t *hw)
+{
+ ocs_hw_io_t *io = NULL;
+ ocs_hw_io_t *tmp_io = NULL;
+ uint32_t iters = 100; /* One second limit */
+
+ /*
+ * Manually clean up outstanding IO.
+ * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
+ */
+ ocs_lock(&hw->io_lock);
+ ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
+ ocs_hw_done_t done = io->done;
+ ocs_hw_done_t abort_done = io->abort_done;
+
+ ocs_hw_io_cancel_cleanup(hw, io);
+
+ /*
+ * Since this is called in a reset/shutdown
+ * case, If there is no callback, then just
+ * free the IO.
+ *
+ * Note: A port owned XRI cannot be on
+ * the in use list. We cannot call
+ * ocs_hw_io_free() because we already
+ * hold the io_lock.
+ */
+ if (done == NULL &&
+ abort_done == NULL) {
+ /*
+ * Since this is called in a reset/shutdown
+ * case, If there is no callback, then just
+ * free the IO.
+ */
+ ocs_hw_io_free_common(hw, io);
+ ocs_list_remove(&hw->io_inuse, io);
+ ocs_hw_io_free_move_correct_list(hw, io);
+ }
+ }
+
+ /*
+ * For port owned XRIs, they are not on the in use list, so
+ * walk though XRIs and issue any callbacks.
+ */
+ ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
+ /* check list and remove if there */
+ if (ocs_list_on_list(&io->dnrx_link)) {
+ ocs_list_remove(&hw->io_port_dnrx, io);
+ ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
+ }
+ ocs_hw_io_cancel_cleanup(hw, io);
+ ocs_list_remove(&hw->io_port_owned, io);
+ ocs_hw_io_free_common(hw, io);
+ }
+ ocs_unlock(&hw->io_lock);
+
+ /* Give time for the callbacks to complete */
+ do {
+ ocs_udelay(10000);
+ iters--;
+ } while (!ocs_list_empty(&hw->io_inuse) && iters);
+
+ /* Leave a breadcrumb that cleanup is not yet complete. */
+ if (!ocs_list_empty(&hw->io_inuse)) {
+ ocs_log_test(hw->os, "io_inuse list is not empty\n");
+ }
+
+ return 0;
+}
+
+static int32_t
+ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
+ ocs_dma_t *rsp)
+{
+ sli4_sge_t *data = NULL;
+
+ if (!hw || !io) {
+ ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ data = io->def_sgl.virt;
+
+ /* setup command pointer */
+ data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
+ data->buffer_address_low = ocs_addr32_lo(cmnd->phys);
+ data->buffer_length = cmnd_size;
+ data++;
+
+ /* setup response pointer */
+ data->buffer_address_high = ocs_addr32_hi(rsp->phys);
+ data->buffer_address_low = ocs_addr32_lo(rsp->phys);
+ data->buffer_length = rsp->size;
+
+ return 0;
+}
+
+static int32_t
+__ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
+
+ if (status || read_topo->hdr.status) {
+ ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
+ status, read_topo->hdr.status);
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ return -1;
+ }
+
+ switch (read_topo->attention_type) {
+ case SLI4_READ_TOPOLOGY_LINK_UP:
+ hw->link.status = SLI_LINK_STATUS_UP;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_DOWN:
+ hw->link.status = SLI_LINK_STATUS_DOWN;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
+ hw->link.status = SLI_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ hw->link.status = SLI_LINK_STATUS_MAX;
+ break;
+ }
+
+ switch (read_topo->topology) {
+ case SLI4_READ_TOPOLOGY_NPORT:
+ hw->link.topology = SLI_LINK_TOPO_NPORT;
+ break;
+ case SLI4_READ_TOPOLOGY_FC_AL:
+ hw->link.topology = SLI_LINK_TOPO_LOOP;
+ if (SLI_LINK_STATUS_UP == hw->link.status) {
+ hw->link.loop_map = hw->loop_map.virt;
+ }
+ hw->link.fc_id = read_topo->acquired_al_pa;
+ break;
+ default:
+ hw->link.topology = SLI_LINK_TOPO_MAX;
+ break;
+ }
+
+ hw->link.medium = SLI_LINK_MEDIUM_FC;
+
+ switch (read_topo->link_current.link_speed) {
+ case SLI4_READ_TOPOLOGY_SPEED_1G:
+ hw->link.speed = 1 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_2G:
+ hw->link.speed = 2 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_4G:
+ hw->link.speed = 4 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_8G:
+ hw->link.speed = 8 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_16G:
+ hw->link.speed = 16 * 1000;
+ hw->link.loop_map = NULL;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_32G:
+ hw->link.speed = 32 * 1000;
+ hw->link.loop_map = NULL;
+ break;
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+
+ ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
+
+ return 0;
+}
+
+static int32_t
+__ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_EXIT:
+ /* ignore */
+ break;
+
+ case OCS_EVT_HW_PORT_REQ_FREE:
+ case OCS_EVT_HW_PORT_REQ_ATTACH:
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ /* fall through */
+ default:
+ ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
+ break;
+ }
+
+ return 0;
+}
+
+static void *
+__ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ if (hw->callback.port != NULL) {
+ hw->callback.port(hw->args.port,
+ OCS_HW_PORT_FREE_FAIL, sport);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* free SLI resource */
+ if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
+ ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
+ }
+
+ /* free mailbox buffer */
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ if (hw->callback.port != NULL) {
+ hw->callback.port(hw->args.port,
+ OCS_HW_PORT_FREE_OK, sport);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* free SLI resource */
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
+
+ /* free mailbox buffer */
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+
+ if (hw->callback.port != NULL) {
+ hw->callback.port(hw->args.port,
+ OCS_HW_PORT_ATTACH_FAIL, sport);
+ }
+ if (sport->sm_free_req_pending) {
+ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
+ }
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+ uint8_t *cmd = NULL;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* allocate memory and send unreg_vpi */
+ cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (!cmd) {
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
+ SLI4_UNREG_TYPE_PORT)) {
+ ocs_log_err(hw->os, "UNREG_VPI format failure\n");
+ ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
+ ocs_log_err(hw->os, "UNREG_VPI command failure\n");
+ ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* Forward to execute in mailbox completion processing context */
+ if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
+ ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ if (hw->callback.port != NULL) {
+ hw->callback.port(hw->args.port,
+ OCS_HW_PORT_ATTACH_OK, sport);
+ }
+ if (sport->sm_free_req_pending) {
+ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
+ }
+ break;
+ case OCS_EVT_HW_PORT_REQ_FREE:
+ /* virtual/physical port request free */
+ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
+ ocs_log_err(hw->os, "REG_VPI format failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
+ ocs_log_err(hw->os, "REG_VPI command failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
+ break;
+ case OCS_EVT_HW_PORT_REQ_FREE:
+ /* Wait for attach response and then free */
+ sport->sm_free_req_pending = 1;
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* free SLI resource */
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
+
+ /* free mailbox buffer */
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ if (hw->callback.port != NULL) {
+ hw->callback.port(hw->args.port,
+ OCS_HW_PORT_ALLOC_OK, sport);
+ }
+ /* If there is a pending free request, then handle it now */
+ if (sport->sm_free_req_pending) {
+ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
+ }
+ break;
+ case OCS_EVT_HW_PORT_REQ_ATTACH:
+ /* virtual port requests attach */
+ ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
+ break;
+ case OCS_EVT_HW_PORT_ATTACH_OK:
+ /* physical port attached (as part of attaching domain) */
+ ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
+ break;
+ case OCS_EVT_HW_PORT_REQ_FREE:
+ /* virtual port request free */
+ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
+ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
+ } else {
+ /*
+ * Note: BE3/Skyhawk will respond with a status of 0x20
+ * unless the reg_vpi has been issued, so we can
+ * skip the unreg_vpi for these adapters.
+ *
+ * Send a nop to make sure that free doesn't occur in
+ * same context
+ */
+ ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
+ }
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* free SLI resource */
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
+
+ /* free mailbox buffer */
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+
+ if (hw->callback.port != NULL) {
+ hw->callback.port(hw->args.port,
+ OCS_HW_PORT_ALLOC_FAIL, sport);
+ }
+
+ /* If there is a pending free request, then handle it now */
+ if (sport->sm_free_req_pending) {
+ ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
+ }
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+ uint8_t *payload = NULL;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* allocate memory for the service parameters */
+ if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
+ ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
+ ocs_sm_transition(ctx, __ocs_hw_port_done, data);
+ break;
+ }
+
+ if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
+ &sport->dma, sport->indicator)) {
+ ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
+ ocs_dma_free(hw->os, &sport->dma);
+ ocs_sm_transition(ctx, __ocs_hw_port_done, data);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
+ ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
+ ocs_dma_free(hw->os, &sport->dma);
+ ocs_sm_transition(ctx, __ocs_hw_port_done, data);
+ break;
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ payload = sport->dma.virt;
+
+ ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
+
+ ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
+ sizeof(sport->sli_wwpn));
+ ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
+ sizeof(sport->sli_wwnn));
+
+ ocs_dma_free(hw->os, &sport->dma);
+ ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_dma_free(hw->os, &sport->dma);
+ ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
+ break;
+ case OCS_EVT_HW_PORT_REQ_FREE:
+ /* Wait for attach response and then free */
+ sport->sm_free_req_pending = 1;
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* no-op */
+ break;
+ case OCS_EVT_HW_PORT_ALLOC_OK:
+ ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
+ break;
+ case OCS_EVT_HW_PORT_ALLOC_FAIL:
+ ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
+ break;
+ case OCS_EVT_HW_PORT_REQ_FREE:
+ /* Wait for attach response and then free */
+ sport->sm_free_req_pending = 1;
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_sli_port_t *sport = ctx->app;
+ ocs_hw_t *hw = sport->hw;
+
+ smtrace("port");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* If there is a pending free request, then handle it now */
+ if (sport->sm_free_req_pending) {
+ ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
+ return NULL;
+ }
+
+ /* TODO XXX transitioning to done only works if this is called
+ * directly from ocs_hw_port_alloc BUT not if called from
+ * read_sparm64. In the later case, we actually want to go
+ * through report_ok/fail
+ */
+ if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
+ sport->indicator, sport->domain->indicator)) {
+ ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
+ ocs_sm_transition(ctx, __ocs_hw_port_done, data);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
+ ocs_log_err(hw->os, "INIT_VPI command failure\n");
+ ocs_sm_transition(ctx, __ocs_hw_port_done, data);
+ break;
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
+ break;
+ case OCS_EVT_HW_PORT_REQ_FREE:
+ /* Wait for attach response and then free */
+ sport->sm_free_req_pending = 1;
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ default:
+ __ocs_hw_port_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static int32_t
+__ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_sli_port_t *sport = arg;
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+ ocs_sm_event_t evt;
+
+ if (status || hdr->status) {
+ ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
+ sport->indicator, status, hdr->status);
+ evt = OCS_EVT_ERROR;
+ } else {
+ evt = OCS_EVT_RESPONSE;
+ }
+
+ ocs_sm_post_event(&sport->ctx, evt, mqe);
+
+ return 0;
+}
+
+static int32_t
+__ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_sli_port_t *sport = arg;
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+ ocs_sm_event_t evt;
+ uint8_t *mqecpy;
+
+ if (status || hdr->status) {
+ ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
+ sport->indicator, status, hdr->status);
+ evt = OCS_EVT_ERROR;
+ } else {
+ evt = OCS_EVT_RESPONSE;
+ }
+
+ /*
+ * In this case we have to malloc a mailbox command buffer, as it is reused
+ * in the state machine post event call, and eventually freed
+ */
+ mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (mqecpy == NULL) {
+ ocs_log_err(hw->os, "malloc mqecpy failed\n");
+ return -1;
+ }
+ ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
+
+ ocs_sm_post_event(&sport->ctx, evt, mqecpy);
+
+ return 0;
+}
+
+/***************************************************************************
+ * Domain state machine
+ */
+
+static int32_t
+__ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_EXIT:
+ /* ignore */
+ break;
+
+ default:
+ ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
+ break;
+ }
+
+ return 0;
+}
+
+static void *
+__ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* free command buffer */
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ /* free SLI resources */
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
+ /* TODO how to free FCFI (or do we at all)? */
+
+ if (hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain,
+ OCS_HW_DOMAIN_ALLOC_FAIL,
+ domain);
+ }
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* free mailbox buffer and send alloc ok to physical sport */
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
+
+ /* now inform registered callbacks */
+ if (hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain,
+ OCS_HW_DOMAIN_ATTACH_OK,
+ domain);
+ }
+ break;
+ case OCS_EVT_HW_DOMAIN_REQ_FREE:
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (data != NULL) {
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ }
+ /* free SLI resources */
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
+ /* TODO how to free FCFI (or do we at all)? */
+
+ if (hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain,
+ OCS_HW_DOMAIN_ATTACH_FAIL,
+ domain);
+ }
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+
+ ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
+
+ if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
+ ocs_log_err(hw->os, "REG_VFI format failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
+ ocs_log_err(hw->os, "REG_VFI command failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* free mailbox buffer and send alloc ok to physical sport */
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
+
+ ocs_hw_domain_add(hw, domain);
+
+ /* now inform registered callbacks */
+ if (hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain,
+ OCS_HW_DOMAIN_ALLOC_OK,
+ domain);
+ }
+ break;
+ case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
+ ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
+ break;
+ case OCS_EVT_HW_DOMAIN_REQ_FREE:
+ /* unreg_fcfi/vfi */
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
+ } else {
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
+ }
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
+ &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
+ ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
+ ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ case OCS_EVT_RESPONSE:
+ ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
+
+ ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_sli_port_t *sport = domain->sport;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
+ domain->fcf_indicator, sport->indicator)) {
+ ocs_log_err(hw->os, "INIT_VFI format failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
+ ocs_log_err(hw->os, "INIT_VFI command failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ case OCS_EVT_RESPONSE:
+ ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER: {
+ sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ uint32_t i;
+
+ /* Set the filter match/mask values from hw's filter_def values */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_cfg[i].rq_id = 0xffff;
+ rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
+ rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
+ rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
+ rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
+ }
+
+ /* Set the rq_id for each, in order of RQ definition */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ if (i >= ARRAY_SIZE(rq_cfg)) {
+ ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
+ break;
+ }
+ rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
+ }
+
+ if (!data) {
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (hw->hw_mrq_count) {
+ if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
+ domain->vlan_id, domain->fcf)) {
+ ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ } else {
+ if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
+ rq_cfg, domain->vlan_id)) {
+ ocs_log_err(hw->os, "REG_FCFI format failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
+ ocs_log_err(hw->os, "REG_FCFI command failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ }
+ case OCS_EVT_EXIT:
+ break;
+ case OCS_EVT_RESPONSE:
+ if (!data) {
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
+
+ /*
+ * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
+ * and instead rely on implicit initialization during VFI registration.
+ * Short circuit normal processing here for those devices.
+ */
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
+ ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
+ } else {
+ ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
+ }
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
+ /*
+ * For FC, the HW alread registered a FCFI
+ * Copy FCF information into the domain and jump to INIT_VFI
+ */
+ domain->fcf_indicator = hw->fcf_indicator;
+ ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
+ } else {
+ ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
+ }
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (domain != NULL) {
+ ocs_hw_t *hw = domain->hw;
+
+ ocs_hw_domain_del(hw, domain);
+
+ if (hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain,
+ OCS_HW_DOMAIN_FREE_FAIL,
+ domain);
+ }
+ }
+
+ /* free command buffer */
+ if (data != NULL) {
+ ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
+ }
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* Free DMA and mailbox buffer */
+ if (domain != NULL) {
+ ocs_hw_t *hw = domain->hw;
+
+ /* free VFI resource */
+ sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
+ domain->indicator);
+
+ ocs_hw_domain_del(hw, domain);
+
+ /* inform registered callbacks */
+ if (hw->callback.domain != NULL) {
+ hw->callback.domain(hw->args.domain,
+ OCS_HW_DOMAIN_FREE_OK,
+ domain);
+ }
+ }
+ if (data != NULL) {
+ ocs_free(NULL, data, SLI4_BMBX_SIZE);
+ }
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+
+static void *
+__ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ /* if we're in the middle of a teardown, skip sending rediscover */
+ if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
+ ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
+ break;
+ }
+ if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
+ ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
+ ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ case OCS_EVT_ERROR:
+ /* REDISCOVER_FCF can fail if none exist */
+ ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+
+ smtrace("domain");
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (data == NULL) {
+ data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (!data) {
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ }
+
+ if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
+ ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
+ ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ if (domain->req_rediscover_fcf) {
+ domain->req_rediscover_fcf = FALSE;
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
+ } else {
+ ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
+ }
+ break;
+ case OCS_EVT_ERROR:
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
+ break;
+ case OCS_EVT_EXIT:
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+static void *
+__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ ocs_domain_t *domain = ctx->app;
+ ocs_hw_t *hw = domain->hw;
+ uint8_t is_fc = FALSE;
+
+ smtrace("domain");
+
+ is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
+
+ switch (evt) {
+ case OCS_EVT_ENTER:
+ if (data == NULL) {
+ data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (!data) {
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ }
+
+ if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
+ SLI4_UNREG_TYPE_DOMAIN)) {
+ ocs_log_err(hw->os, "UNREG_VFI format failure\n");
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+
+ if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
+ ocs_log_err(hw->os, "UNREG_VFI command failure\n");
+ ocs_free(hw->os, data, SLI4_BMBX_SIZE);
+ ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
+ break;
+ }
+ break;
+ case OCS_EVT_ERROR:
+ if (is_fc) {
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
+ } else {
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
+ }
+ break;
+ case OCS_EVT_RESPONSE:
+ if (is_fc) {
+ ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
+ } else {
+ ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
+ }
+ break;
+ default:
+ __ocs_hw_domain_common(__func__, ctx, evt, data);
+ break;
+ }
+
+ return NULL;
+}
+
+/* callback for domain alloc/attach/free */
+static int32_t
+__ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_domain_t *domain = arg;
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+ ocs_sm_event_t evt;
+
+ if (status || hdr->status) {
+ ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
+ domain->indicator, status, hdr->status);
+ evt = OCS_EVT_ERROR;
+ } else {
+ evt = OCS_EVT_RESPONSE;
+ }
+
+ ocs_sm_post_event(&domain->sm, evt, mqe);
+
+ return 0;
+}
+
+static int32_t
+target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_io_t *io = NULL;
+ ocs_hw_io_t *io_next = NULL;
+ uint64_t ticks_current = ocs_get_os_ticks();
+ uint32_t sec_elapsed;
+
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
+
+ if (status || hdr->status) {
+ ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
+ status, hdr->status);
+ /* go ahead and proceed with wqe timer checks... */
+ }
+
+ /* loop through active WQE list and check for timeouts */
+ ocs_lock(&hw->io_lock);
+ ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
+ sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
+
+ /*
+ * If elapsed time > timeout, abort it. No need to check type since
+ * it wouldn't be on this list unless it was a target WQE
+ */
+ if (sec_elapsed > io->tgt_wqe_timeout) {
+ ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
+ io->indicator, io->reqtag, io->type);
+
+ /* remove from active_wqe list so won't try to abort again */
+ ocs_list_remove(&hw->io_timed_wqe, io);
+
+ /* save status of "timed out" for when abort completes */
+ io->status_saved = 1;
+ io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
+ io->saved_ext = 0;
+ io->saved_len = 0;
+
+ /* now abort outstanding IO */
+ ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
+ }
+ /*
+ * need to go through entire list since each IO could have a
+ * different timeout value
+ */
+ }
+ ocs_unlock(&hw->io_lock);
+
+ /* if we're not in the middle of shutting down, schedule next timer */
+ if (!hw->active_wqe_timer_shutdown) {
+ ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
+ }
+ hw->in_active_wqe_timer = FALSE;
+ return 0;
+}
+
+static void
+target_wqe_timer_cb(void *arg)
+{
+ ocs_hw_t *hw = (ocs_hw_t *)arg;
+
+ /* delete existing timer; will kick off new timer after checking wqe timeouts */
+ hw->in_active_wqe_timer = TRUE;
+ ocs_del_timer(&hw->wqe_timer);
+
+ /* Forward timer callback to execute in the mailbox completion processing context */
+ if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
+ ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
+ }
+}
+
+static void
+shutdown_target_wqe_timer(ocs_hw_t *hw)
+{
+ uint32_t iters = 100;
+
+ if (hw->config.emulate_tgt_wqe_timeout) {
+ /* request active wqe timer shutdown, then wait for it to complete */
+ hw->active_wqe_timer_shutdown = TRUE;
+
+ /* delete WQE timer and wait for timer handler to complete (if necessary) */
+ ocs_del_timer(&hw->wqe_timer);
+
+ /* now wait for timer handler to complete (if necessary) */
+ while (hw->in_active_wqe_timer && iters) {
+ /*
+ * if we happen to have just sent NOP mailbox command, make sure
+ * completions are being processed
+ */
+ ocs_hw_flush(hw);
+ iters--;
+ }
+
+ if (iters == 0) {
+ ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
+ }
+ }
+}
+
+/**
+ * @brief Determine if HW IO is owned by the port.
+ *
+ * @par Description
+ * Determines if the given HW IO has been posted to the chip.
+ *
+ * @param hw Hardware context allocated by the caller.
+ * @param io HW IO.
+ *
+ * @return Returns TRUE if given HW IO is port-owned.
+ */
+uint8_t
+ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ /* Check to see if this is a port owned XRI */
+ return io->is_port_owned;
+}
+
+/**
+ * @brief Return TRUE if exchange is port-owned.
+ *
+ * @par Description
+ * Test to see if the xri is a port-owned xri.
+ *
+ * @param hw Hardware context.
+ * @param xri Exchange indicator.
+ *
+ * @return Returns TRUE if XRI is a port owned XRI.
+ */
+
+uint8_t
+ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
+{
+ ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
+ return (io == NULL ? FALSE : io->is_port_owned);
+}
+
+/**
+ * @brief Returns an XRI from the port owned list to the host.
+ *
+ * @par Description
+ * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
+ *
+ * @param hw Hardware context.
+ * @param xri_base The starting XRI number.
+ * @param xri_count The number of XRIs to free from the base.
+ */
+static void
+ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
+{
+ ocs_hw_io_t *io;
+ uint32_t i;
+
+ for (i = 0; i < xri_count; i++) {
+ io = ocs_hw_io_lookup(hw, xri_base + i);
+
+ /*
+ * if this is an auto xfer rdy XRI, then we need to release any
+ * buffer attached to the XRI before moving the XRI back to the free pool.
+ */
+ if (hw->auto_xfer_rdy_enabled) {
+ ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
+ }
+
+ ocs_lock(&hw->io_lock);
+ ocs_list_remove(&hw->io_port_owned, io);
+ io->is_port_owned = 0;
+ ocs_list_add_tail(&hw->io_free, io);
+ ocs_unlock(&hw->io_lock);
+ }
+}
+
+/**
+ * @brief Called when the POST_XRI command completes.
+ *
+ * @par Description
+ * Free the mailbox command buffer and reclaim the XRIs on failure.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe;
+
+ /* Reclaim the XRIs as host owned if the command fails */
+ if (status != 0) {
+ ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
+ status, post_xri->xri_base, post_xri->xri_count);
+ ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ return 0;
+}
+
+/**
+ * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
+ *
+ * @param hw Hardware context.
+ * @param xri_start The starting XRI to post.
+ * @param num_to_post The number of XRIs to post.
+ *
+ * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
+ */
+
+static ocs_hw_rtn_e
+ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
+{
+ uint8_t *post_xri;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+
+ /* Since we need to allocate for mailbox queue, just always allocate */
+ post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (post_xri == NULL) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* Register the XRIs */
+ if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
+ xri_start, num_to_post)) {
+ rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
+ ocs_log_err(hw->os, "post_xri failed\n");
+ }
+ }
+ return rc;
+}
+
+/**
+ * @brief Move XRIs from the host-controlled pool to the port.
+ *
+ * @par Description
+ * Removes IOs from the free list and moves them to the port.
+ *
+ * @param hw Hardware context.
+ * @param num_xri The number of XRIs being requested to move to the chip.
+ *
+ * @return Returns the number of XRIs that were moved.
+ */
+
+uint32_t
+ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
+{
+ ocs_hw_io_t *io;
+ uint32_t i;
+ uint32_t num_posted = 0;
+
+ /*
+ * Note: We cannot use ocs_hw_io_alloc() because that would place the
+ * IO on the io_inuse list. We need to move from the io_free to
+ * the io_port_owned list.
+ */
+ ocs_lock(&hw->io_lock);
+
+ for (i = 0; i < num_xri; i++) {
+
+ if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
+ ocs_hw_rtn_e rc;
+
+ /*
+ * if this is an auto xfer rdy XRI, then we need to attach a
+ * buffer to the XRI before submitting it to the chip. If a
+ * buffer is unavailable, then we cannot post it, so return it
+ * to the free pool.
+ */
+ if (hw->auto_xfer_rdy_enabled) {
+ /* Note: uses the IO lock to get the auto xfer rdy buffer */
+ ocs_unlock(&hw->io_lock);
+ rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
+ ocs_lock(&hw->io_lock);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_list_add_head(&hw->io_free, io);
+ break;
+ }
+ }
+ ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
+ io->is_port_owned = 1;
+ ocs_list_add_tail(&hw->io_port_owned, io);
+
+ /* Post XRI */
+ if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
+ ocs_hw_reclaim_xri(hw, io->indicator, i);
+ break;
+ }
+ num_posted++;
+ } else {
+ /* no more free XRIs */
+ break;
+ }
+ }
+ ocs_unlock(&hw->io_lock);
+
+ return num_posted;
+}
+
+/**
+ * @brief Called when the RELEASE_XRI command completes.
+ *
+ * @par Description
+ * Move the IOs back to the free pool on success.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe;
+ uint8_t i;
+
+ /* Reclaim the XRIs as host owned if the command fails */
+ if (status != 0) {
+ ocs_log_err(hw->os, "Status 0x%x\n", status);
+ } else {
+ for (i = 0; i < release_xri->released_xri_count; i++) {
+ uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
+ release_xri->xri_tbl[i/2].xri_tag1);
+ ocs_hw_reclaim_xri(hw, xri, 1);
+ }
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ return 0;
+}
+
+/**
+ * @brief Move XRIs from the port-controlled pool to the host.
+ *
+ * Requests XRIs from the FW to return to the host-owned pool.
+ *
+ * @param hw Hardware context.
+ * @param num_xri The number of XRIs being requested to moved from the chip.
+ *
+ * @return Returns 0 for success, or a negative error code value for failure.
+ */
+
+ocs_hw_rtn_e
+ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
+{
+ uint8_t *release_xri;
+ ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
+
+ /* non-local buffer required for mailbox queue */
+ release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (release_xri == NULL) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* release the XRIs */
+ if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
+ rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(hw->os, "release_xri failed\n");
+ }
+ }
+ /* If we are polling or an error occurred, then free the mailbox buffer */
+ if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
+ ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
+ }
+ return rc;
+}
+
+
+/**
+ * @brief Allocate an ocs_hw_rx_buffer_t array.
+ *
+ * @par Description
+ * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
+ *
+ * @param hw Pointer to HW object.
+ * @param rqindex RQ index for this buffer.
+ * @param count Count of buffers in array.
+ * @param size Size of buffer.
+ *
+ * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
+ */
+static ocs_hw_rq_buffer_t *
+ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
+{
+ ocs_t *ocs = hw->os;
+ ocs_hw_rq_buffer_t *rq_buf = NULL;
+ ocs_hw_rq_buffer_t *prq;
+ uint32_t i;
+
+ if (count != 0) {
+ rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
+ if (rq_buf == NULL) {
+ ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
+ return NULL;
+ }
+
+ for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
+ prq->rqindex = rqindex;
+ if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
+ ocs_log_err(hw->os, "DMA allocation failed\n");
+ ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
+ rq_buf = NULL;
+ break;
+ }
+ }
+ }
+ return rq_buf;
+}
+
+/**
+ * @brief Free an ocs_hw_rx_buffer_t array.
+ *
+ * @par Description
+ * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
+ *
+ * @param hw Pointer to HW object.
+ * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
+ * @param count Count of buffers in array.
+ *
+ * @return None.
+ */
+static void
+ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
+{
+ ocs_t *ocs = hw->os;
+ uint32_t i;
+ ocs_hw_rq_buffer_t *prq;
+
+ if (rq_buf != NULL) {
+ for (i = 0, prq = rq_buf; i < count; i++, prq++) {
+ ocs_dma_free(ocs, &prq->dma);
+ }
+ ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
+ }
+}
+
+/**
+ * @brief Allocate the RQ data buffers.
+ *
+ * @param hw Pointer to HW object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_rx_allocate(ocs_hw_t *hw)
+{
+ ocs_t *ocs = hw->os;
+ uint32_t i;
+ int32_t rc = OCS_HW_RTN_SUCCESS;
+ uint32_t rqindex = 0;
+ hw_rq_t *rq;
+ uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
+ uint32_t payload_size = hw->config.rq_default_buffer_size;
+
+ rqindex = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ rq = hw->hw_rq[i];
+
+ /* Allocate header buffers */
+ rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
+ if (rq->hdr_buf == NULL) {
+ ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ break;
+ }
+
+ ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id,
+ rq->entry_count, hdr_size);
+
+ rqindex++;
+
+ /* Allocate payload buffers */
+ rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
+ if (rq->payload_buf == NULL) {
+ ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
+ rc = OCS_HW_RTN_ERROR;
+ break;
+ }
+ ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
+ rq->entry_count, payload_size);
+ rqindex++;
+ }
+
+ return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @brief Post the RQ data buffers to the chip.
+ *
+ * @param hw Pointer to HW object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_rx_post(ocs_hw_t *hw)
+{
+ uint32_t i;
+ uint32_t idx;
+ uint32_t rq_idx;
+ int32_t rc = 0;
+
+ /*
+ * In RQ pair mode, we MUST post the header and payload buffer at the
+ * same time.
+ */
+ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
+ hw_rq_t *rq = hw->hw_rq[rq_idx];
+
+ for (i = 0; i < rq->entry_count-1; i++) {
+ ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
+ ocs_hw_assert(seq != NULL);
+
+ seq->header = &rq->hdr_buf[i];
+
+ seq->payload = &rq->payload_buf[i];
+
+ rc = ocs_hw_sequence_free(hw, seq);
+ if (rc) {
+ break;
+ }
+ }
+ if (rc) {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Free the RQ data buffers.
+ *
+ * @param hw Pointer to HW object.
+ *
+ */
+void
+ocs_hw_rx_free(ocs_hw_t *hw)
+{
+ hw_rq_t *rq;
+ uint32_t i;
+
+ /* Free hw_rq buffers */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ rq = hw->hw_rq[i];
+ if (rq != NULL) {
+ ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
+ rq->hdr_buf = NULL;
+ ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
+ rq->payload_buf = NULL;
+ }
+ }
+}
+
+/**
+ * @brief HW async call context structure.
+ */
+typedef struct {
+ ocs_hw_async_cb_t callback;
+ void *arg;
+ uint8_t cmd[SLI4_BMBX_SIZE];
+} ocs_hw_async_call_ctx_t;
+
+/**
+ * @brief HW async callback handler
+ *
+ * @par Description
+ * This function is called when the NOP mailbox command completes. The callback stored
+ * in the requesting context is invoked.
+ *
+ * @param hw Pointer to HW object.
+ * @param status Completion status.
+ * @param mqe Pointer to mailbox completion queue entry.
+ * @param arg Caller-provided argument.
+ *
+ * @return None.
+ */
+static void
+ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_hw_async_call_ctx_t *ctx = arg;
+
+ if (ctx != NULL) {
+ if (ctx->callback != NULL) {
+ (*ctx->callback)(hw, status, mqe, ctx->arg);
+ }
+ ocs_free(hw->os, ctx, sizeof(*ctx));
+ }
+}
+
+/**
+ * @brief Make an async callback using NOP mailbox command
+ *
+ * @par Description
+ * Post a NOP mailbox command; the callback with argument is invoked upon completion
+ * while in the event processing context.
+ *
+ * @param hw Pointer to HW object.
+ * @param callback Pointer to callback function.
+ * @param arg Caller-provided callback.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
+{
+ int32_t rc = 0;
+ ocs_hw_async_call_ctx_t *ctx;
+
+ /*
+ * Allocate a callback context (which includes the mailbox command buffer), we need
+ * this to be persistent as the mailbox command submission may be queued and executed later
+ * execution.
+ */
+ ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (ctx == NULL) {
+ ocs_log_err(hw->os, "failed to malloc async call context\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ ctx->callback = callback;
+ ctx->arg = arg;
+
+ /* Build and send a NOP mailbox command */
+ if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
+ ocs_log_err(hw->os, "COMMON_NOP format failure\n");
+ ocs_free(hw->os, ctx, sizeof(*ctx));
+ rc = -1;
+ }
+
+ if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
+ ocs_log_err(hw->os, "COMMON_NOP command failure\n");
+ ocs_free(hw->os, ctx, sizeof(*ctx));
+ rc = -1;
+ }
+ return rc;
+}
+
+/**
+ * @brief Initialize the reqtag pool.
+ *
+ * @par Description
+ * The WQ request tag pool is initialized.
+ *
+ * @param hw Pointer to HW object.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_reqtag_init(ocs_hw_t *hw)
+{
+ if (hw->wq_reqtag_pool == NULL) {
+ hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
+ if (hw->wq_reqtag_pool == NULL) {
+ ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+ ocs_hw_reqtag_reset(hw);
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @brief Allocate a WQ request tag.
+ *
+ * Allocate and populate a WQ request tag from the WQ request tag pool.
+ *
+ * @param hw Pointer to HW object.
+ * @param callback Callback function.
+ * @param arg Pointer to callback argument.
+ *
+ * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
+ */
+hw_wq_callback_t *
+ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
+{
+ hw_wq_callback_t *wqcb;
+
+ ocs_hw_assert(callback != NULL);
+
+ wqcb = ocs_pool_get(hw->wq_reqtag_pool);
+ if (wqcb != NULL) {
+ ocs_hw_assert(wqcb->callback == NULL);
+ wqcb->callback = callback;
+ wqcb->arg = arg;
+ }
+ return wqcb;
+}
+
+/**
+ * @brief Free a WQ request tag.
+ *
+ * Free the passed in WQ request tag.
+ *
+ * @param hw Pointer to HW object.
+ * @param wqcb Pointer to WQ request tag object to free.
+ *
+ * @return None.
+ */
+void
+ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
+{
+ ocs_hw_assert(wqcb->callback != NULL);
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ ocs_pool_put(hw->wq_reqtag_pool, wqcb);
+}
+
+/**
+ * @brief Return WQ request tag by index.
+ *
+ * @par Description
+ * Return pointer to WQ request tag object given an index.
+ *
+ * @param hw Pointer to HW object.
+ * @param instance_index Index of WQ request tag to return.
+ *
+ * @return Pointer to WQ request tag, or NULL.
+ */
+hw_wq_callback_t *
+ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
+{
+ hw_wq_callback_t *wqcb;
+
+ wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
+ if (wqcb == NULL) {
+ ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
+ }
+ return wqcb;
+}
+
+/**
+ * @brief Reset the WQ request tag pool.
+ *
+ * @par Description
+ * Reset the WQ request tag pool, returning all to the free list.
+ *
+ * @param hw pointer to HW object.
+ *
+ * @return None.
+ */
+void
+ocs_hw_reqtag_reset(ocs_hw_t *hw)
+{
+ hw_wq_callback_t *wqcb;
+ uint32_t i;
+
+ /* Remove all from freelist */
+ while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
+ ;
+ }
+
+ /* Put them all back */
+ for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
+ wqcb->instance_index = i;
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ ocs_pool_put(hw->wq_reqtag_pool, wqcb);
+ }
+}
+
+/**
+ * @brief Handle HW assertion
+ *
+ * HW assert, display diagnostic message, and abort.
+ *
+ * @param cond string describing failing assertion condition
+ * @param filename file name
+ * @param linenum line number
+ *
+ * @return none
+ */
+void
+_ocs_hw_assert(const char *cond, const char *filename, int linenum)
+{
+ ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
+ ocs_abort();
+ /* no return */
+}
+
+/**
+ * @brief Handle HW verify
+ *
+ * HW verify, display diagnostic message, dump stack and return.
+ *
+ * @param cond string describing failing verify condition
+ * @param filename file name
+ * @param linenum line number
+ *
+ * @return none
+ */
+void
+_ocs_hw_verify(const char *cond, const char *filename, int linenum)
+{
+ ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
+ ocs_print_stack();
+}
+
+/**
+ * @brief Reque XRI
+ *
+ * @par Description
+ * Reque XRI
+ *
+ * @param hw Pointer to HW object.
+ * @param io Pointer to HW IO
+ *
+ * @return Return 0 if successful else returns -1
+ */
+int32_t
+ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
+{
+ int32_t rc = 0;
+
+ rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
+ if (rc) {
+ ocs_list_add_tail(&hw->io_port_dnrx, io);
+ rc = -1;
+ goto exit_ocs_hw_reque_xri;
+ }
+
+ io->auto_xfer_rdy_dnrx = 0;
+ io->type = OCS_HW_IO_DNRX_REQUEUE;
+ if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
+ /* Clear buffer from XRI */
+ ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
+ io->axr_buf = NULL;
+
+ ocs_log_err(hw->os, "requeue_xri WQE error\n");
+ ocs_list_add_tail(&hw->io_port_dnrx, io);
+
+ rc = -1;
+ goto exit_ocs_hw_reque_xri;
+ }
+
+ if (io->wq == NULL) {
+ io->wq = ocs_hw_queue_next_wq(hw, io);
+ ocs_hw_assert(io->wq != NULL);
+ }
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
+ OCS_STAT(io->wq->use_count++);
+
+ rc = hw_wq_write(io->wq, &io->wqe);
+ if (rc < 0) {
+ ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
+ rc = -1;
+ }
+
+exit_ocs_hw_reque_xri:
+ return 0;
+}
+
+uint32_t
+ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
+{
+ sli4_t *sli4 = &ocs->hw.sli;
+ ocs_dma_t dma;
+ uint8_t *payload = NULL;
+
+ int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
+
+ /* allocate memory for the service parameters */
+ if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
+ ocs_log_err(ocs, "Failed to allocate DMA memory\n");
+ return 1;
+ }
+
+ if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
+ &dma, indicator)) {
+ ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
+ ocs_dma_free(ocs, &dma);
+ return 1;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_err(ocs, "READ_SPARM64 command failure\n");
+ ocs_dma_free(ocs, &dma);
+ return 1;
+ }
+
+ payload = dma.virt;
+ ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
+ ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
+ ocs_dma_free(ocs, &dma);
+ return 0;
+}
+
+/**
+ * @page fc_hw_api_overview HW APIs
+ * - @ref devInitShutdown
+ * - @ref domain
+ * - @ref port
+ * - @ref node
+ * - @ref io
+ * - @ref interrupt
+ *
+ * <div class="overview">
+ * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
+ * message details, but the higher level code must still manage domains, ports,
+ * IT nexuses, and IOs. The HW API is designed to help the higher level manage
+ * these objects.<br><br>
+ *
+ * The HW uses function callbacks to notify the higher-level code of events
+ * that are received from the chip. There are currently three types of
+ * functions that may be registered:
+ *
+ * <ul><li>domain – This function is called whenever a domain event is generated
+ * within the HW. Examples include a new FCF is discovered, a connection
+ * to a domain is disrupted, and allocation callbacks.</li>
+ * <li>unsolicited – This function is called whenever new data is received in
+ * the SLI-4 receive queue.</li>
+ * <li>rnode – This function is called for remote node events, such as attach status
+ * and allocation callbacks.</li></ul>
+ *
+ * Upper layer functions may be registered by using the ocs_hw_callback() function.
+ *
+ * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
+ * <h2>FC/FCoE HW API</h2>
+ * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
+ * interface for creating the necessary common objects and sending I/Os. It may be used
+ * “as is” in customer implementations or it can serve as an example of typical interactions
+ * between a driver and the SLI-4 hardware. The broad categories of functionality include:
+ *
+ * <ul><li>Setting-up and tearing-down of the HW.</li>
+ * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
+ * <li>Sending and receiving I/Os.</li></ul>
+ *
+ * <h3>HW Setup</h3>
+ * To set up the HW:
+ *
+ * <ol>
+ * <li>Set up the HW object using ocs_hw_setup().<br>
+ * This step performs a basic configuration of the SLI-4 component and the HW to
+ * enable querying the hardware for its capabilities. At this stage, the HW is not
+ * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
+ * <li>Configure the HW according to the driver requirements.<br>
+ * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
+ * well as configures the amount of resources required (ocs_hw_set()). The driver
+ * must also register callback functions (ocs_hw_callback()) to receive notification of
+ * various asynchronous events.<br><br>
+ * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
+ * step creates the underlying queues, commits resources to the hardware, and
+ * prepares the hardware for operation. While the hardware is operational, the
+ * port is not online, and cannot send or receive data.</li><br><br>
+ * <br><br>
+ * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
+ * When the link comes up, the HW determines if a domain is present and notifies the
+ * driver using the domain callback function. This is the starting point of the driver's
+ * interaction with the common objects.<br><br>
+ * @b Note: For FCoE, there may be more than one domain available and, therefore,
+ * more than one callback.</li>
+ * </ol>
+ *
+ * <h3>Allocating and Using Common Objects</h3>
+ * Common objects provide a mechanism through which the various OneCore Storage
+ * driver components share and track information. These data structures are primarily
+ * used to track SLI component information but can be extended by other components, if
+ * needed. The main objects are:
+ *
+ * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
+ * memory access (DMA) transactions.</li>
+ * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
+ * any infrastructure devices such as FC switches and FC forwarders. The domain
+ * object contains both an FCFI and a VFI.</li>
+ * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
+ * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
+ * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
+ * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
+ *
+ * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
+ * node common objects and establish the connections between them. The goal is to
+ * connect the driver to the SCSI domain to exchange I/Os with other devices. These
+ * common object connections are shown in the following figure, FC Driver Common Objects:
+ * <img src="elx_fc_common_objects.jpg"
+ * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
+ *
+ * The first step is to create a connection to the domain by allocating an SLI Port object.
+ * The SLI Port object represents a particular FC ID and must be initialized with one. With
+ * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
+ * a domain, the driver allocates a domain object and attaches to it using the previous SLI
+ * port object.<br><br>
+ *
+ * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
+ * FLOGI) with the domain before attaching.<br><br>
+ *
+ * Once attached to the domain, the driver can discover and attach to other devices
+ * (remote nodes). The exact discovery method depends on the driver, but it typically
+ * includes using a position map, querying the fabric name server, or an out-of-band
+ * method. In most cases, it is necessary to log in with devices before performing I/Os.
+ * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
+ * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
+ * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
+ * before exchanging FCP I/O.<br><br>
+ *
+ * @b Note: The HW manages both the well known fabric address and the name server as
+ * nodes in the domain. Therefore, the driver must allocate node objects prior to
+ * communicating with either of these entities.
+ *
+ * <h3>Sending and Receiving I/Os</h3>
+ * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
+ * commands are conceptually similar. Since the commands complete asynchronously,
+ * the caller must provide a HW I/O object that maintains the I/O state, as well as
+ * provide a callback function. The driver may use the same callback function for all I/O
+ * operations, but each operation must use a unique HW I/O object. In the SLI-4
+ * architecture, there is a direct association between the HW I/O object and the SGL used
+ * to describe the data. Therefore, a driver typically performs the following operations:
+ *
+ * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
+ * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
+ * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
+ * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
+ *
+ * <h3>HW Tear Down</h3>
+ * To tear-down the HW:
+ *
+ * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
+ * data andevents.</li>
+ * <li>Destroy the HW object (ocs_hw_teardown()).</li>
+ * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
+ * <br>
+ * </div><!-- overview -->
+ *
+ */
+
+
+
+
+/**
+ * This contains all hw runtime workaround code. Based on the asic type,
+ * asic revision, and range of fw revisions, a particular workaround may be enabled.
+ *
+ * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
+ * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
+ * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
+ * control specific runtime behavior.
+ *
+ * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we
+ * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than
+ * what we might previously see as "if this is a BE3, then do xxx"
+ *
+ */
+
+
+#define HW_FWREV_ZERO (0ull)
+#define HW_FWREV_MAX (~0ull)
+
+#define SLI4_ASIC_TYPE_ANY 0
+#define SLI4_ASIC_REV_ANY 0
+
+/**
+ * @brief Internal definition of workarounds
+ */
+
+typedef enum {
+ HW_WORKAROUND_TEST = 1,
+ HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */
+ HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */
+ HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
+ HW_WORKAROUND_WQE_COUNT_METHOD,
+ HW_WORKAROUND_RQE_COUNT_METHOD,
+ HW_WORKAROUND_USE_UNREGISTERD_RPI,
+ HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
+ HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
+ HW_WORKAROUND_USE_DIF_QUARANTINE,
+ HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */
+ HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */
+ HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */
+ HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
+ HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */
+} hw_workaround_e;
+
+/**
+ * @brief Internal workaround structure instance
+ */
+
+typedef struct {
+ sli4_asic_type_e asic_type;
+ sli4_asic_rev_e asic_rev;
+ uint64_t fwrev_low;
+ uint64_t fwrev_high;
+
+ hw_workaround_e workaround;
+ uint32_t value;
+} hw_workaround_t;
+
+static hw_workaround_t hw_workarounds[] = {
+ {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_TEST, 999},
+
+ /* Bug: 127585: if_type == 2 returns 0 for total length placed on
+ * FCP_TSEND64_WQE completions. Note, original driver code enables this
+ * workaround for all asic types
+ */
+ {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
+
+ /* Bug: unknown, Lancer A0 has mis-reported max queue depth */
+ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_MAX_QUEUE, 2048},
+
+ /* Bug: 143399, BE3 has mis-reported max RQ queue depth */
+ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
+ HW_WORKAROUND_MAX_RQ, 2048},
+
+ /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
+ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
+ HW_WORKAROUND_MAX_RQ, 2048},
+
+ /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
+ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
+ HW_WORKAROUND_WQE_COUNT_METHOD, 1},
+
+ /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
+ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
+ HW_WORKAROUND_RQE_COUNT_METHOD, 1},
+
+ /* Bug: 142968, BE3 UE with RPI == 0xffff */
+ {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
+
+ /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */
+ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
+
+ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
+ HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
+
+ /* Bug: 160124, Skyhawk quarantine DIF XRIs */
+ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
+
+ /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
+ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
+
+ /* Bug: xxxxxx, FCFI reported in SRB not corrrect */
+ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
+#if 0
+ /* Bug: 165642, FW version check for driver */
+ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
+ HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
+#endif
+ {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
+ HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
+
+ /* Bug 177061, Lancer FW does not set the SGLC bit */
+ {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_SGLC_MISREPORTED, 0},
+
+ /* BZ 181208/183914, enable this workaround for ALL revisions */
+ {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
+ HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
+};
+
+/**
+ * @brief Function prototypes
+ */
+
+static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
+
+/**
+ * @brief Parse the firmware version (name)
+ *
+ * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
+ * by the HW_FWREV() macro
+ *
+ * @param fwrev_string pointer to the firmware string
+ *
+ * @return packed firmware revision value
+ */
+
+static uint64_t
+parse_fw_version(const char *fwrev_string)
+{
+ int v[4] = {0};
+ const char *p;
+ int i;
+
+ for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
+ v[i] = ocs_strtoul(p, 0, 0);
+ while(*p && *p != '.') {
+ p ++;
+ }
+ if (*p) {
+ p ++;
+ }
+ }
+
+ /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
+ if (v[2] == 9999) {
+ return HW_FWREV_MAX;
+ } else {
+ return HW_FWREV(v[0], v[1], v[2], v[3]);
+ }
+}
+
+/**
+ * @brief Test for a workaround match
+ *
+ * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
+ *
+ * @param hw Pointer to the HW structure
+ * @param w Pointer to a workaround structure entry
+ *
+ * @return Return TRUE for a match
+ */
+
+static int32_t
+ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
+{
+ return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
+ ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
+ (w->fwrev_low <= hw->workaround.fwrev) &&
+ ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
+}
+
+/**
+ * @brief Setup HW runtime workarounds
+ *
+ * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
+ * based on the HW/SLI setup.
+ *
+ * @param hw Pointer to HW structure
+ *
+ * @return none
+ */
+
+void
+ocs_hw_workaround_setup(struct ocs_hw_s *hw)
+{
+ hw_workaround_t *w;
+ sli4_t *sli4 = &hw->sli;
+ uint32_t i;
+
+ /* Initialize the workaround settings */
+ ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
+
+ /* If hw_war_version is non-null, then its a value that was set by a module parameter
+ * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
+ */
+
+ if (hw->hw_war_version) {
+ hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
+ } else {
+ hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
+ }
+
+ /* Walk the workaround list, if a match is found, then handle it */
+ for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
+ if (ocs_hw_workaround_match(hw, w)) {
+ switch(w->workaround) {
+
+ case HW_WORKAROUND_TEST: {
+ ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
+ break;
+ }
+
+ case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
+ ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
+ hw->workaround.retain_tsend_io_length = 1;
+ break;
+ }
+ case HW_WORKAROUND_MAX_QUEUE: {
+ sli4_qtype_e q;
+
+ ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
+ for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
+ if (hw->num_qentries[q] > w->value) {
+ hw->num_qentries[q] = w->value;
+ }
+ }
+ break;
+ }
+ case HW_WORKAROUND_MAX_RQ: {
+ ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
+ if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
+ hw->num_qentries[SLI_QTYPE_RQ] = w->value;
+ }
+ break;
+ }
+ case HW_WORKAROUND_WQE_COUNT_METHOD: {
+ ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
+ sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
+ sli_calc_max_qentries(sli4);
+ break;
+ }
+ case HW_WORKAROUND_RQE_COUNT_METHOD: {
+ ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
+ sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
+ sli_calc_max_qentries(sli4);
+ break;
+ }
+ case HW_WORKAROUND_USE_UNREGISTERD_RPI:
+ ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
+ hw->workaround.use_unregistered_rpi = TRUE;
+ /*
+ * Allocate an RPI that is never registered, to be used in the case where
+ * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
+ */
+ if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
+ &hw->workaround.unregistered_index)) {
+ ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
+ hw->workaround.use_unregistered_rpi = FALSE;
+ }
+ break;
+ case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
+ ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
+ hw->workaround.disable_ar_tgt_dif = TRUE;
+ break;
+ case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
+ ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
+ hw->workaround.disable_dump_loc = TRUE;
+ break;
+ case HW_WORKAROUND_USE_DIF_QUARANTINE:
+ ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
+ hw->workaround.use_dif_quarantine = TRUE;
+ break;
+ case HW_WORKAROUND_USE_DIF_SEC_XRI:
+ ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
+ hw->workaround.use_dif_sec_xri = TRUE;
+ break;
+ case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
+ ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
+ hw->workaround.override_fcfi = TRUE;
+ break;
+
+ case HW_WORKAROUND_FW_VERSION_TOO_LOW:
+ ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
+ hw->workaround.fw_version_too_low = TRUE;
+ break;
+ case HW_WORKAROUND_SGLC_MISREPORTED:
+ ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
+ hw->workaround.sglc_misreported = TRUE;
+ break;
+ case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
+ ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
+ hw->workaround.ignore_send_frame = TRUE;
+ break;
+ } /* switch(w->workaround) */
+ }
+ }
+}
diff --git a/sys/dev/ocs_fc/ocs_hw.h b/sys/dev/ocs_fc/ocs_hw.h
new file mode 100644
index 000000000000..86a7d98c24d3
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_hw.h
@@ -0,0 +1,1547 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Defines the Hardware Abstraction Layer (HW) interface functions.
+ */
+
+#ifndef _OCS_HW_H
+#define _OCS_HW_H
+
+#include "sli4.h"
+#include "ocs_hw.h"
+#include "ocs_stats.h"
+#include "ocs_utils.h"
+
+typedef struct ocs_hw_io_s ocs_hw_io_t;
+
+
+#if defined(OCS_INCLUDE_DEBUG)
+#else
+#define ocs_queue_history_wq(...)
+#define ocs_queue_history_cqe(...)
+#define ocs_queue_history_init(...)
+#define ocs_queue_history_free(...)
+#endif
+
+/**
+ * @brief HW queue forward declarations
+ */
+typedef struct hw_eq_s hw_eq_t;
+typedef struct hw_cq_s hw_cq_t;
+typedef struct hw_mq_s hw_mq_t;
+typedef struct hw_wq_s hw_wq_t;
+typedef struct hw_rq_s hw_rq_t;
+typedef struct hw_rq_grp_s hw_rq_grp_t;
+
+/* HW asserts/verify
+ *
+ */
+
+extern void _ocs_hw_assert(const char *cond, const char *filename, int linenum);
+extern void _ocs_hw_verify(const char *cond, const char *filename, int linenum);
+
+#if defined(HW_NDEBUG)
+#define ocs_hw_assert(cond)
+#define ocs_hw_verify(cond, ...)
+#else
+#define ocs_hw_assert(cond) \
+ do { \
+ if ((!(cond))) { \
+ _ocs_hw_assert(#cond, __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+#define ocs_hw_verify(cond, ...) \
+ do { \
+ if ((!(cond))) { \
+ _ocs_hw_verify(#cond, __FILE__, __LINE__); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+#endif
+#define ocs_hw_verify_arg(cond) ocs_hw_verify(cond, OCS_HW_RTN_INVALID_ARG)
+
+/*
+ * HW completion loop control parameters.
+ *
+ * The HW completion loop must terminate periodically to keep the OS happy. The
+ * loop terminates when a predefined time has elapsed, but to keep the overhead of
+ * computing time down, the time is only checked after a number of loop iterations
+ * has completed.
+ *
+ * OCS_HW_TIMECHECK_ITERATIONS number of loop iterations between time checks
+ *
+ */
+
+#define OCS_HW_TIMECHECK_ITERATIONS 100
+#define OCS_HW_MAX_NUM_MQ 1
+#define OCS_HW_MAX_NUM_RQ 32
+#define OCS_HW_MAX_NUM_EQ 16
+#define OCS_HW_MAX_NUM_WQ 32
+
+#define OCE_HW_MAX_NUM_MRQ_PAIRS 16
+
+#define OCS_HW_MAX_WQ_CLASS 4
+#define OCS_HW_MAX_WQ_CPU 128
+
+/*
+ * A CQ will be assinged to each WQ (CQ must have 2X entries of the WQ for abort
+ * processing), plus a separate one for each RQ PAIR and one for MQ
+ */
+#define OCS_HW_MAX_NUM_CQ ((OCS_HW_MAX_NUM_WQ*2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
+
+/*
+ * Q hash - size is the maximum of all the queue sizes, rounded up to the next
+ * power of 2
+ */
+#define OCS_HW_Q_HASH_SIZE B32_NEXT_POWER_OF_2(OCS_MAX(OCS_HW_MAX_NUM_MQ, OCS_MAX(OCS_HW_MAX_NUM_RQ, \
+ OCS_MAX(OCS_HW_MAX_NUM_EQ, OCS_MAX(OCS_HW_MAX_NUM_WQ, \
+ OCS_HW_MAX_NUM_CQ)))))
+
+#define OCS_HW_RQ_HEADER_SIZE 128
+#define OCS_HW_RQ_HEADER_INDEX 0
+
+/**
+ * @brief Options for ocs_hw_command().
+ */
+enum {
+ OCS_CMD_POLL, /**< command executes synchronously and busy-waits for completion */
+ OCS_CMD_NOWAIT, /**< command executes asynchronously. Uses callback */
+};
+
+typedef enum {
+ OCS_HW_RTN_SUCCESS = 0,
+ OCS_HW_RTN_SUCCESS_SYNC = 1,
+ OCS_HW_RTN_ERROR = -1,
+ OCS_HW_RTN_NO_RESOURCES = -2,
+ OCS_HW_RTN_NO_MEMORY = -3,
+ OCS_HW_RTN_IO_NOT_ACTIVE = -4,
+ OCS_HW_RTN_IO_ABORT_IN_PROGRESS = -5,
+ OCS_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED = -6,
+ OCS_HW_RTN_INVALID_ARG = -7,
+} ocs_hw_rtn_e;
+#define OCS_HW_RTN_IS_ERROR(e) ((e) < 0)
+
+typedef enum {
+ OCS_HW_RESET_FUNCTION,
+ OCS_HW_RESET_FIRMWARE,
+ OCS_HW_RESET_MAX
+} ocs_hw_reset_e;
+
+typedef enum {
+ OCS_HW_N_IO,
+ OCS_HW_N_SGL,
+ OCS_HW_MAX_IO,
+ OCS_HW_MAX_SGE,
+ OCS_HW_MAX_SGL,
+ OCS_HW_MAX_NODES,
+ OCS_HW_MAX_RQ_ENTRIES,
+ OCS_HW_TOPOLOGY, /**< auto, nport, loop */
+ OCS_HW_WWN_NODE,
+ OCS_HW_WWN_PORT,
+ OCS_HW_FW_REV,
+ OCS_HW_FW_REV2,
+ OCS_HW_IPL,
+ OCS_HW_VPD,
+ OCS_HW_VPD_LEN,
+ OCS_HW_MODE, /**< initiator, target, both */
+ OCS_HW_LINK_SPEED,
+ OCS_HW_IF_TYPE,
+ OCS_HW_SLI_REV,
+ OCS_HW_SLI_FAMILY,
+ OCS_HW_RQ_PROCESS_LIMIT,
+ OCS_HW_RQ_DEFAULT_BUFFER_SIZE,
+ OCS_HW_AUTO_XFER_RDY_CAPABLE,
+ OCS_HW_AUTO_XFER_RDY_XRI_CNT,
+ OCS_HW_AUTO_XFER_RDY_SIZE,
+ OCS_HW_AUTO_XFER_RDY_BLK_SIZE,
+ OCS_HW_AUTO_XFER_RDY_T10_ENABLE,
+ OCS_HW_AUTO_XFER_RDY_P_TYPE,
+ OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA,
+ OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID,
+ OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE,
+ OCS_HW_DIF_CAPABLE,
+ OCS_HW_DIF_SEED,
+ OCS_HW_DIF_MODE,
+ OCS_HW_DIF_MULTI_SEPARATE,
+ OCS_HW_DUMP_MAX_SIZE,
+ OCS_HW_DUMP_READY,
+ OCS_HW_DUMP_PRESENT,
+ OCS_HW_RESET_REQUIRED,
+ OCS_HW_FW_ERROR,
+ OCS_HW_FW_READY,
+ OCS_HW_HIGH_LOGIN_MODE,
+ OCS_HW_PREREGISTER_SGL,
+ OCS_HW_HW_REV1,
+ OCS_HW_HW_REV2,
+ OCS_HW_HW_REV3,
+ OCS_HW_LINKCFG,
+ OCS_HW_ETH_LICENSE,
+ OCS_HW_LINK_MODULE_TYPE,
+ OCS_HW_NUM_CHUTES,
+ OCS_HW_WAR_VERSION,
+ OCS_HW_DISABLE_AR_TGT_DIF,
+ OCS_HW_EMULATE_I_ONLY_AAB, /**< emulate IAAB=0 for initiator-commands only */
+ OCS_HW_EMULATE_TARGET_WQE_TIMEOUT, /**< enable driver timeouts for target WQEs */
+ OCS_HW_LINK_CONFIG_SPEED,
+ OCS_HW_CONFIG_TOPOLOGY,
+ OCS_HW_BOUNCE,
+ OCS_HW_PORTNUM,
+ OCS_HW_BIOS_VERSION_STRING,
+ OCS_HW_RQ_SELECT_POLICY,
+ OCS_HW_SGL_CHAINING_CAPABLE,
+ OCS_HW_SGL_CHAINING_ALLOWED,
+ OCS_HW_SGL_CHAINING_HOST_ALLOCATED,
+ OCS_HW_SEND_FRAME_CAPABLE,
+ OCS_HW_RQ_SELECTION_POLICY,
+ OCS_HW_RR_QUANTA,
+ OCS_HW_FILTER_DEF,
+ OCS_HW_MAX_VPORTS,
+ OCS_ESOC,
+ OCS_HW_FW_TIMED_OUT,
+} ocs_hw_property_e;
+
+enum {
+ OCS_HW_TOPOLOGY_AUTO,
+ OCS_HW_TOPOLOGY_NPORT,
+ OCS_HW_TOPOLOGY_LOOP,
+ OCS_HW_TOPOLOGY_NONE,
+ OCS_HW_TOPOLOGY_MAX
+};
+
+enum {
+ OCS_HW_MODE_INITIATOR,
+ OCS_HW_MODE_TARGET,
+ OCS_HW_MODE_BOTH,
+ OCS_HW_MODE_MAX
+};
+
+/**
+ * @brief Port protocols
+ */
+
+typedef enum {
+ OCS_HW_PORT_PROTOCOL_ISCSI,
+ OCS_HW_PORT_PROTOCOL_FCOE,
+ OCS_HW_PORT_PROTOCOL_FC,
+ OCS_HW_PORT_PROTOCOL_OTHER,
+} ocs_hw_port_protocol_e;
+
+#define OCS_HW_MAX_PROFILES 40
+/**
+ * @brief A Profile Descriptor
+ */
+typedef struct {
+ uint32_t profile_index;
+ uint32_t profile_id;
+ char profile_description[512];
+} ocs_hw_profile_descriptor_t;
+
+/**
+ * @brief A Profile List
+ */
+typedef struct {
+ uint32_t num_descriptors;
+ ocs_hw_profile_descriptor_t descriptors[OCS_HW_MAX_PROFILES];
+} ocs_hw_profile_list_t;
+
+
+/**
+ * @brief Defines DIF operation modes
+ */
+enum {
+ OCS_HW_DIF_MODE_INLINE,
+ OCS_HW_DIF_MODE_SEPARATE,
+};
+
+/**
+ * @brief Defines the type of RQ buffer
+ */
+typedef enum {
+ OCS_HW_RQ_BUFFER_TYPE_HDR,
+ OCS_HW_RQ_BUFFER_TYPE_PAYLOAD,
+ OCS_HW_RQ_BUFFER_TYPE_MAX,
+} ocs_hw_rq_buffer_type_e;
+
+/**
+ * @brief Defines a wrapper for the RQ payload buffers so that we can place it
+ * back on the proper queue.
+ */
+typedef struct {
+ uint16_t rqindex;
+ ocs_dma_t dma;
+} ocs_hw_rq_buffer_t;
+
+/**
+ * @brief T10 DIF operations.
+ */
+typedef enum {
+ OCS_HW_DIF_OPER_DISABLED,
+ OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC,
+ OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF,
+ OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM,
+ OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF,
+ OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC,
+ OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM,
+ OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM,
+ OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC,
+ OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW,
+} ocs_hw_dif_oper_e;
+
+#define OCS_HW_DIF_OPER_PASS_THRU OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC
+#define OCS_HW_DIF_OPER_STRIP OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF
+#define OCS_HW_DIF_OPER_INSERT OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC
+
+/**
+ * @brief T10 DIF block sizes.
+ */
+typedef enum {
+ OCS_HW_DIF_BK_SIZE_512,
+ OCS_HW_DIF_BK_SIZE_1024,
+ OCS_HW_DIF_BK_SIZE_2048,
+ OCS_HW_DIF_BK_SIZE_4096,
+ OCS_HW_DIF_BK_SIZE_520,
+ OCS_HW_DIF_BK_SIZE_4104,
+ OCS_HW_DIF_BK_SIZE_NA = 0
+} ocs_hw_dif_blk_size_e;
+
+/**
+ * @brief Link configurations.
+ */
+typedef enum {
+ OCS_HW_LINKCFG_4X10G = 0,
+ OCS_HW_LINKCFG_1X40G,
+ OCS_HW_LINKCFG_2X16G,
+ OCS_HW_LINKCFG_4X8G,
+ OCS_HW_LINKCFG_4X1G,
+ OCS_HW_LINKCFG_2X10G,
+ OCS_HW_LINKCFG_2X10G_2X8G,
+
+ /* must be last */
+ OCS_HW_LINKCFG_NA,
+} ocs_hw_linkcfg_e;
+
+/**
+ * @brief link module types
+ *
+ * (note: these just happen to match SLI4 values)
+ */
+
+enum {
+ OCS_HW_LINK_MODULE_TYPE_1GB = 0x0004,
+ OCS_HW_LINK_MODULE_TYPE_2GB = 0x0008,
+ OCS_HW_LINK_MODULE_TYPE_4GB = 0x0040,
+ OCS_HW_LINK_MODULE_TYPE_8GB = 0x0080,
+ OCS_HW_LINK_MODULE_TYPE_10GB = 0x0100,
+ OCS_HW_LINK_MODULE_TYPE_16GB = 0x0200,
+ OCS_HW_LINK_MODULE_TYPE_32GB = 0x0400,
+};
+
+/**
+ * @brief T10 DIF information passed to the transport.
+ */
+typedef struct ocs_hw_dif_info_s {
+ ocs_hw_dif_oper_e dif_oper;
+ ocs_hw_dif_blk_size_e blk_size;
+ uint32_t ref_tag_cmp;
+ uint32_t ref_tag_repl;
+ uint32_t app_tag_cmp:16,
+ app_tag_repl:16;
+ uint32_t check_ref_tag:1,
+ check_app_tag:1,
+ check_guard:1,
+ auto_incr_ref_tag:1,
+ repl_app_tag:1,
+ repl_ref_tag:1,
+ dif:2,
+ dif_separate:1,
+
+ /* If the APP TAG is 0xFFFF, disable checking the REF TAG and CRC fields */
+ disable_app_ffff:1,
+
+ /* if the APP TAG is 0xFFFF and REF TAG is 0xFFFF_FFFF, disable checking the received CRC field. */
+ disable_app_ref_ffff:1,
+
+ :21;
+ uint16_t dif_seed;
+} ocs_hw_dif_info_t;
+typedef enum {
+ OCS_HW_ELS_REQ, /**< ELS request */
+ OCS_HW_ELS_RSP, /**< ELS response */
+ OCS_HW_ELS_RSP_SID, /**< ELS response, override the S_ID */
+ OCS_HW_FC_CT, /**< FC Common Transport */
+ OCS_HW_FC_CT_RSP, /**< FC Common Transport Response */
+ OCS_HW_BLS_ACC, /**< BLS accept (BA_ACC) */
+ OCS_HW_BLS_ACC_SID, /**< BLS accept (BA_ACC), override the S_ID */
+ OCS_HW_BLS_RJT, /**< BLS reject (BA_RJT) */
+ OCS_HW_BCAST, /**< Class 3 broadcast sequence */
+ OCS_HW_IO_TARGET_READ,
+ OCS_HW_IO_TARGET_WRITE,
+ OCS_HW_IO_TARGET_RSP,
+ OCS_HW_IO_INITIATOR_READ,
+ OCS_HW_IO_INITIATOR_WRITE,
+ OCS_HW_IO_INITIATOR_NODATA,
+ OCS_HW_IO_DNRX_REQUEUE,
+ OCS_HW_IO_MAX,
+} ocs_hw_io_type_e;
+
+typedef enum {
+ OCS_HW_IO_STATE_FREE,
+ OCS_HW_IO_STATE_INUSE,
+ OCS_HW_IO_STATE_WAIT_FREE,
+ OCS_HW_IO_STATE_WAIT_SEC_HIO,
+} ocs_hw_io_state_e;
+
+/* Descriptive strings for the HW IO request types (note: these must always
+ * match up with the ocs_hw_io_type_e declaration) */
+#define OCS_HW_IO_TYPE_STRINGS \
+ "ELS request", \
+ "ELS response", \
+ "ELS response(set SID)", \
+ "FC CT request", \
+ "BLS accept", \
+ "BLS accept(set SID)", \
+ "BLS reject", \
+ "target read", \
+ "target write", \
+ "target response", \
+ "initiator read", \
+ "initiator write", \
+ "initiator nodata",
+
+/**
+ * @brief HW command context.
+ *
+ * Stores the state for the asynchronous commands sent to the hardware.
+ */
+typedef struct ocs_command_ctx_s {
+ ocs_list_t link;
+ /**< Callback function */
+ int32_t (*cb)(struct ocs_hw_s *, int32_t, uint8_t *, void *);
+ void *arg; /**< Argument for callback */
+ uint8_t *buf; /**< buffer holding command / results */
+ void *ctx; /**< upper layer context */
+} ocs_command_ctx_t;
+
+typedef struct ocs_hw_sgl_s {
+ uintptr_t addr;
+ size_t len;
+} ocs_hw_sgl_t;
+
+/**
+ * @brief HW callback type
+ *
+ * Typedef for HW "done" callback.
+ */
+typedef int32_t (*ocs_hw_done_t)(struct ocs_hw_io_s *, ocs_remote_node_t *, uint32_t len, int32_t status, uint32_t ext, void *ul_arg);
+
+
+typedef union ocs_hw_io_param_u {
+ struct {
+ uint16_t ox_id;
+ uint16_t rx_id;
+ uint8_t payload[12]; /**< big enough for ABTS BA_ACC */
+ } bls;
+ struct {
+ uint32_t s_id;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ uint8_t payload[12]; /**< big enough for ABTS BA_ACC */
+ } bls_sid;
+ struct {
+ uint8_t r_ctl;
+ uint8_t type;
+ uint8_t df_ctl;
+ uint8_t timeout;
+ } bcast;
+ struct {
+ uint16_t ox_id;
+ uint8_t timeout;
+ } els;
+ struct {
+ uint32_t s_id;
+ uint16_t ox_id;
+ uint8_t timeout;
+ } els_sid;
+ struct {
+ uint8_t r_ctl;
+ uint8_t type;
+ uint8_t df_ctl;
+ uint8_t timeout;
+ } fc_ct;
+ struct {
+ uint8_t r_ctl;
+ uint8_t type;
+ uint8_t df_ctl;
+ uint8_t timeout;
+ uint16_t ox_id;
+ } fc_ct_rsp;
+ struct {
+ uint32_t offset;
+ uint16_t ox_id;
+ uint16_t flags;
+ uint8_t cs_ctl;
+ ocs_hw_dif_oper_e dif_oper;
+ ocs_hw_dif_blk_size_e blk_size;
+ uint8_t timeout;
+ uint32_t app_id;
+ } fcp_tgt;
+ struct {
+ ocs_dma_t *cmnd;
+ ocs_dma_t *rsp;
+ ocs_hw_dif_oper_e dif_oper;
+ ocs_hw_dif_blk_size_e blk_size;
+ uint32_t cmnd_size;
+ uint16_t flags;
+ uint8_t timeout;
+ uint32_t first_burst;
+ } fcp_ini;
+} ocs_hw_io_param_t;
+
+/**
+ * @brief WQ steering mode
+ */
+typedef enum {
+ OCS_HW_WQ_STEERING_CLASS,
+ OCS_HW_WQ_STEERING_REQUEST,
+ OCS_HW_WQ_STEERING_CPU,
+} ocs_hw_wq_steering_e;
+
+/**
+ * @brief HW wqe object
+ */
+typedef struct {
+ uint32_t abort_wqe_submit_needed:1, /**< set if abort wqe needs to be submitted */
+ send_abts:1, /**< set to 1 to have hardware to automatically send ABTS */
+ auto_xfer_rdy_dnrx:1, /**< TRUE if DNRX was set on this IO */
+ :29;
+ uint32_t id;
+ uint32_t abort_reqtag;
+ ocs_list_link_t link;
+ uint8_t *wqebuf; /**< work queue entry buffer */
+} ocs_hw_wqe_t;
+
+/**
+ * @brief HW IO object.
+ *
+ * Stores the per-IO information necessary for both the lower (SLI) and upper
+ * layers (ocs).
+ */
+struct ocs_hw_io_s {
+ /* Owned by HW */
+ ocs_list_link_t link; /**< used for busy, wait_free, free lists */
+ ocs_list_link_t wqe_link; /**< used for timed_wqe list */
+ ocs_list_link_t dnrx_link; /**< used for io posted dnrx list */
+ ocs_hw_io_state_e state; /**< state of IO: free, busy, wait_free */
+ ocs_hw_wqe_t wqe; /**< Work queue object, with link for pending */
+ ocs_lock_t axr_lock; /**< Lock to synchronize TRSP and AXT Data/Cmd Cqes */
+ ocs_hw_t *hw; /**< pointer back to hardware context */
+ ocs_remote_node_t *rnode;
+ struct ocs_hw_auto_xfer_rdy_buffer_s *axr_buf;
+ ocs_dma_t xfer_rdy;
+ uint16_t type;
+ uint32_t port_owned_abort_count; /**< IO abort count */
+ hw_wq_t *wq; /**< WQ assigned to the exchange */
+ uint32_t xbusy; /**< Exchange is active in FW */
+ ocs_hw_done_t done; /**< Function called on IO completion */
+ void *arg; /**< argument passed to "IO done" callback */
+ ocs_hw_done_t abort_done; /**< Function called on abort completion */
+ void *abort_arg; /**< argument passed to "abort done" callback */
+ ocs_ref_t ref; /**< refcount object */
+ size_t length; /**< needed for bug O127585: length of IO */
+ uint8_t tgt_wqe_timeout; /**< timeout value for target WQEs */
+ uint64_t submit_ticks; /**< timestamp when current WQE was submitted */
+
+ uint32_t status_saved:1, /**< if TRUE, latched status should be returned */
+ abort_in_progress:1, /**< if TRUE, abort is in progress */
+ quarantine:1, /**< set if IO to be quarantined */
+ quarantine_first_phase:1, /**< set if first phase of IO */
+ is_port_owned:1, /**< set if POST_XRI was used to send XRI to th chip */
+ auto_xfer_rdy_dnrx:1, /**< TRUE if DNRX was set on this IO */
+ :26;
+ uint32_t saved_status; /**< latched status */
+ uint32_t saved_len; /**< latched length */
+ uint32_t saved_ext; /**< latched extended status */
+
+ hw_eq_t *eq; /**< EQ that this HIO came up on */
+ ocs_hw_wq_steering_e wq_steering; /**< WQ steering mode request */
+ uint8_t wq_class; /**< WQ class if steering mode is Class */
+
+ /* Owned by SLI layer */
+ uint16_t reqtag; /**< request tag for this HW IO */
+ uint32_t abort_reqtag; /**< request tag for an abort of this HW IO (note: this is a 32 bit value
+ to allow us to use UINT32_MAX as an uninitialized value) */
+ uint32_t indicator; /**< XRI */
+ ocs_dma_t def_sgl; /**< default scatter gather list */
+ uint32_t def_sgl_count; /**< count of SGEs in default SGL */
+ ocs_dma_t *sgl; /**< pointer to current active SGL */
+ uint32_t sgl_count; /**< count of SGEs in io->sgl */
+ uint32_t first_data_sge; /**< index of first data SGE */
+ ocs_dma_t *ovfl_sgl; /**< overflow SGL */
+ uint32_t ovfl_sgl_count; /**< count of SGEs in default SGL */
+ sli4_lsp_sge_t *ovfl_lsp; /**< pointer to overflow segment length */
+ ocs_hw_io_t *ovfl_io; /**< Used for SGL chaining on skyhawk */
+ uint32_t n_sge; /**< number of active SGEs */
+ uint32_t sge_offset;
+
+ /* BZ 161832 Workaround: */
+ struct ocs_hw_io_s *sec_hio; /**< Secondary HW IO context */
+ ocs_hw_io_param_t sec_iparam; /**< Secondary HW IO context saved iparam */
+ uint32_t sec_len; /**< Secondary HW IO context saved len */
+
+ /* Owned by upper layer */
+ void *ul_io; /**< where upper layer can store reference to its IO */
+};
+
+typedef enum {
+ OCS_HW_PORT_INIT,
+ OCS_HW_PORT_SHUTDOWN,
+ OCS_HW_PORT_SET_LINK_CONFIG,
+} ocs_hw_port_e;
+
+/**
+ * @brief Fabric/Domain events
+ */
+typedef enum {
+ OCS_HW_DOMAIN_ALLOC_OK, /**< domain successfully allocated */
+ OCS_HW_DOMAIN_ALLOC_FAIL, /**< domain allocation failed */
+ OCS_HW_DOMAIN_ATTACH_OK, /**< successfully attached to domain */
+ OCS_HW_DOMAIN_ATTACH_FAIL, /**< domain attach failed */
+ OCS_HW_DOMAIN_FREE_OK, /**< successfully freed domain */
+ OCS_HW_DOMAIN_FREE_FAIL, /**< domain free failed */
+ OCS_HW_DOMAIN_LOST, /**< previously discovered domain no longer available */
+ OCS_HW_DOMAIN_FOUND, /**< new domain discovered */
+ OCS_HW_DOMAIN_CHANGED, /**< previously discovered domain properties have changed */
+} ocs_hw_domain_event_e;
+
+typedef enum {
+ OCS_HW_PORT_ALLOC_OK, /**< port successfully allocated */
+ OCS_HW_PORT_ALLOC_FAIL, /**< port allocation failed */
+ OCS_HW_PORT_ATTACH_OK, /**< successfully attached to port */
+ OCS_HW_PORT_ATTACH_FAIL, /**< port attach failed */
+ OCS_HW_PORT_FREE_OK, /**< successfully freed port */
+ OCS_HW_PORT_FREE_FAIL, /**< port free failed */
+} ocs_hw_port_event_e;
+
+typedef enum {
+ OCS_HW_NODE_ATTACH_OK,
+ OCS_HW_NODE_ATTACH_FAIL,
+ OCS_HW_NODE_FREE_OK,
+ OCS_HW_NODE_FREE_FAIL,
+ OCS_HW_NODE_FREE_ALL_OK,
+ OCS_HW_NODE_FREE_ALL_FAIL,
+} ocs_hw_remote_node_event_e;
+
+typedef enum {
+ OCS_HW_CB_DOMAIN,
+ OCS_HW_CB_PORT,
+ OCS_HW_CB_REMOTE_NODE,
+ OCS_HW_CB_UNSOLICITED,
+ OCS_HW_CB_BOUNCE,
+ OCS_HW_CB_MAX, /**< must be last */
+} ocs_hw_callback_e;
+
+/**
+ * @brief HW unsolicited callback status
+ */
+typedef enum {
+ OCS_HW_UNSOL_SUCCESS,
+ OCS_HW_UNSOL_ERROR,
+ OCS_HW_UNSOL_ABTS_RCVD,
+ OCS_HW_UNSOL_MAX, /**< must be last */
+} ocs_hw_unsol_status_e;
+
+/**
+ * @brief Node group rpi reference
+ */
+typedef struct {
+ ocs_atomic_t rpi_count;
+ ocs_atomic_t rpi_attached;
+} ocs_hw_rpi_ref_t;
+
+/**
+ * @brief HW link stat types
+ */
+typedef enum {
+ OCS_HW_LINK_STAT_LINK_FAILURE_COUNT,
+ OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
+ OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
+ OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
+ OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
+ OCS_HW_LINK_STAT_CRC_COUNT,
+ OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
+ OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
+ OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
+ OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
+ OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
+ OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
+ OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
+ OCS_HW_LINK_STAT_RCV_EOFA_COUNT,
+ OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT,
+ OCS_HW_LINK_STAT_RCV_EOFNI_COUNT,
+ OCS_HW_LINK_STAT_RCV_SOFF_COUNT,
+ OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
+ OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
+ OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
+ OCS_HW_LINK_STAT_MAX, /**< must be last */
+} ocs_hw_link_stat_e;
+
+typedef enum {
+ OCS_HW_HOST_STAT_TX_KBYTE_COUNT,
+ OCS_HW_HOST_STAT_RX_KBYTE_COUNT,
+ OCS_HW_HOST_STAT_TX_FRAME_COUNT,
+ OCS_HW_HOST_STAT_RX_FRAME_COUNT,
+ OCS_HW_HOST_STAT_TX_SEQ_COUNT,
+ OCS_HW_HOST_STAT_RX_SEQ_COUNT,
+ OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG,
+ OCS_HW_HOST_STAT_TOTAL_EXCH_RESP,
+ OCS_HW_HOSY_STAT_RX_P_BSY_COUNT,
+ OCS_HW_HOST_STAT_RX_F_BSY_COUNT,
+ OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
+ OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
+ OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
+ OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
+ OCS_HW_HOST_STAT_MAX /* MUST BE LAST */
+} ocs_hw_host_stat_e;
+
+typedef enum {
+ OCS_HW_STATE_UNINITIALIZED, /* power-on, no allocations, no initializations */
+ OCS_HW_STATE_QUEUES_ALLOCATED, /* chip is reset, allocations are complete (queues not registered) */
+ OCS_HW_STATE_ACTIVE, /* chip is up an running */
+ OCS_HW_STATE_RESET_IN_PROGRESS, /* chip is being reset */
+ OCS_HW_STATE_TEARDOWN_IN_PROGRESS, /* teardown has been started */
+} ocs_hw_state_e;
+
+/**
+ * @brief Defines a general FC sequence object, consisting of a header, payload buffers
+ * and a HW IO in the case of port owned XRI
+ */
+typedef struct {
+ ocs_hw_t *hw; /**< HW that owns this sequence */
+ /* sequence information */
+ uint8_t fcfi; /**< FCFI associated with sequence */
+ uint8_t auto_xrdy; /**< If auto XFER_RDY was generated */
+ uint8_t out_of_xris; /**< If IO would have been assisted if XRIs were available */
+ ocs_hw_rq_buffer_t *header;
+ ocs_hw_rq_buffer_t *payload; /**< received frame payload buffer */
+
+ /* other "state" information from the SRB (sequence coalescing) */
+ ocs_hw_unsol_status_e status;
+ uint32_t xri; /**< XRI associated with sequence; sequence coalescing only */
+ ocs_hw_io_t *hio; /**< HW IO */
+
+ ocs_list_link_t link;
+ void *hw_priv; /**< HW private context */
+} ocs_hw_sequence_t;
+
+/**
+ * @brief Structure to track optimized write buffers posted to chip owned XRIs.
+ *
+ * Note: The rqindex will be set the following "fake" indexes. This will be used
+ * when the buffer is returned via ocs_seq_free() to make the buffer available
+ * for re-use on another XRI.
+ *
+ * The dma->alloc pointer on the dummy header will be used to get back to this structure when the buffer is freed.
+ *
+ * More of these object may be allocated on the fly if more XRIs are pushed to the chip.
+ */
+#define OCS_HW_RQ_INDEX_DUMMY_HDR 0xFF00
+#define OCS_HW_RQ_INDEX_DUMMY_DATA 0xFF01
+typedef struct ocs_hw_auto_xfer_rdy_buffer_s {
+ fc_header_t hdr; /**< used to build a dummy data header for unsolicited processing */
+ ocs_hw_rq_buffer_t header; /**< Points to the dummy data header */
+ ocs_hw_rq_buffer_t payload; /**< received frame payload buffer */
+ ocs_hw_sequence_t seq; /**< sequence for passing the buffers */
+ uint8_t data_cqe;
+ uint8_t cmd_cqe;
+
+ /* fields saved from the command header that are needed when the data arrives */
+ uint8_t fcfi;
+
+ /* To handle outof order completions save AXR cmd and data cqes */
+ uint8_t call_axr_cmd;
+ uint8_t call_axr_data;
+ ocs_hw_sequence_t *cmd_seq;
+} ocs_hw_auto_xfer_rdy_buffer_t;
+
+/**
+ * @brief Node group rpi reference
+ */
+typedef struct {
+ uint8_t overflow;
+ uint32_t counter;
+} ocs_hw_link_stat_counts_t;
+
+/**
+ * @brief HW object describing fc host stats
+ */
+typedef struct {
+ uint32_t counter;
+} ocs_hw_host_stat_counts_t;
+
+#define TID_HASH_BITS 8
+#define TID_HASH_LEN (1U << TID_HASH_BITS)
+
+typedef struct ocs_hw_iopt_s {
+ char name[32];
+ uint32_t instance_index;
+ ocs_thread_t iopt_thread;
+ ocs_cbuf_t *iopt_free_queue; /* multiple reader, multiple writer */
+ ocs_cbuf_t *iopt_work_queue;
+ ocs_array_t *iopt_cmd_array;
+} ocs_hw_iopt_t;
+
+typedef enum {
+ HW_CQ_HANDLER_LOCAL,
+ HW_CQ_HANDLER_THREAD,
+} hw_cq_handler_e;
+
+#include "ocs_hw_queues.h"
+
+/**
+ * @brief Stucture used for the hash lookup of queue IDs
+ */
+typedef struct {
+ uint32_t id:16,
+ in_use:1,
+ index:15;
+} ocs_queue_hash_t;
+
+/**
+ * @brief Define the fields required to implement the skyhawk DIF quarantine.
+ */
+#define OCS_HW_QUARANTINE_QUEUE_DEPTH 4
+
+typedef struct {
+ uint32_t quarantine_index;
+ ocs_hw_io_t *quarantine_ios[OCS_HW_QUARANTINE_QUEUE_DEPTH];
+} ocs_quarantine_info_t;
+
+/**
+ * @brief Define the WQ callback object
+ */
+typedef struct {
+ uint16_t instance_index; /**< use for request tag */
+ void (*callback)(void *arg, uint8_t *cqe, int32_t status);
+ void *arg;
+} hw_wq_callback_t;
+
+typedef struct {
+ uint64_t fwrev;
+
+ /* Control Declarations here ...*/
+
+ uint8_t retain_tsend_io_length;
+
+ /* Use unregistered RPI */
+ uint8_t use_unregistered_rpi;
+ uint32_t unregistered_rid;
+ uint32_t unregistered_index;
+
+ uint8_t disable_ar_tgt_dif; /* Disable auto response if target DIF */
+ uint8_t disable_dump_loc;
+ uint8_t use_dif_quarantine;
+ uint8_t use_dif_sec_xri;
+
+ uint8_t override_fcfi;
+
+ uint8_t fw_version_too_low;
+
+ uint8_t sglc_misreported;
+
+ uint8_t ignore_send_frame;
+
+} ocs_hw_workaround_t;
+
+
+
+/**
+ * @brief HW object
+ */
+struct ocs_hw_s {
+ ocs_os_handle_t os;
+ sli4_t sli;
+ uint16_t ulp_start;
+ uint16_t ulp_max;
+ uint32_t dump_size;
+ ocs_hw_state_e state;
+ uint8_t hw_setup_called;
+ uint8_t sliport_healthcheck;
+ uint16_t watchdog_timeout;
+ ocs_lock_t watchdog_lock;
+
+ /** HW configuration, subject to ocs_hw_set() */
+ struct {
+ uint32_t n_eq; /**< number of event queues */
+ uint32_t n_cq; /**< number of completion queues */
+ uint32_t n_mq; /**< number of mailbox queues */
+ uint32_t n_rq; /**< number of receive queues */
+ uint32_t n_wq; /**< number of work queues */
+ uint32_t n_io; /**< total number of IO objects */
+ uint32_t n_sgl;/**< length of SGL */
+ uint32_t speed; /** requested link speed in Mbps */
+ uint32_t topology; /** requested link topology */
+ uint32_t rq_default_buffer_size; /** size of the buffers for first burst */
+ uint32_t auto_xfer_rdy_xri_cnt; /** Initial XRIs to post to chip at initialization */
+ uint32_t auto_xfer_rdy_size; /** max size IO to use with this feature */
+ uint8_t auto_xfer_rdy_blk_size_chip; /** block size to use with this feature */
+ uint8_t esoc;
+ uint16_t dif_seed; /** The seed for the DIF CRC calculation */
+ uint16_t auto_xfer_rdy_app_tag_value;
+ uint8_t dif_mode; /**< DIF mode to use */
+ uint8_t i_only_aab; /** Enable initiator-only auto-abort */
+ uint8_t emulate_tgt_wqe_timeout; /** Enable driver target wqe timeouts */
+ uint32_t bounce:1;
+ const char *queue_topology; /**< Queue topology string */
+ uint8_t auto_xfer_rdy_t10_enable; /** Enable t10 PI for auto xfer ready */
+ uint8_t auto_xfer_rdy_p_type; /** p_type for auto xfer ready */
+ uint8_t auto_xfer_rdy_ref_tag_is_lba;
+ uint8_t auto_xfer_rdy_app_tag_valid;
+ uint8_t rq_selection_policy; /** MRQ RQ selection policy */
+ uint8_t rr_quanta; /** RQ quanta if rq_selection_policy == 2 */
+ uint32_t filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ } config;
+
+ /* calculated queue sizes for each type */
+ uint32_t num_qentries[SLI_QTYPE_MAX];
+
+ /* Storage for SLI queue objects */
+ sli4_queue_t wq[OCS_HW_MAX_NUM_WQ];
+ sli4_queue_t rq[OCS_HW_MAX_NUM_RQ];
+ uint16_t hw_rq_lookup[OCS_HW_MAX_NUM_RQ];
+ sli4_queue_t mq[OCS_HW_MAX_NUM_MQ];
+ sli4_queue_t cq[OCS_HW_MAX_NUM_CQ];
+ sli4_queue_t eq[OCS_HW_MAX_NUM_EQ];
+
+ /* HW queue */
+ uint32_t eq_count;
+ uint32_t cq_count;
+ uint32_t mq_count;
+ uint32_t wq_count;
+ uint32_t rq_count; /**< count of SLI RQs */
+ ocs_list_t eq_list;
+
+ ocs_queue_hash_t cq_hash[OCS_HW_Q_HASH_SIZE];
+ ocs_queue_hash_t rq_hash[OCS_HW_Q_HASH_SIZE];
+ ocs_queue_hash_t wq_hash[OCS_HW_Q_HASH_SIZE];
+
+ /* Storage for HW queue objects */
+ hw_wq_t *hw_wq[OCS_HW_MAX_NUM_WQ];
+ hw_rq_t *hw_rq[OCS_HW_MAX_NUM_RQ];
+ hw_mq_t *hw_mq[OCS_HW_MAX_NUM_MQ];
+ hw_cq_t *hw_cq[OCS_HW_MAX_NUM_CQ];
+ hw_eq_t *hw_eq[OCS_HW_MAX_NUM_EQ];
+ uint32_t hw_rq_count; /**< count of hw_rq[] entries */
+ uint32_t hw_mrq_count; /**< count of multirq RQs */
+
+ ocs_varray_t *wq_class_array[OCS_HW_MAX_WQ_CLASS]; /**< pool per class WQs */
+ ocs_varray_t *wq_cpu_array[OCS_HW_MAX_WQ_CPU]; /**< pool per CPU WQs */
+
+ /* Sequence objects used in incoming frame processing */
+ ocs_array_t *seq_pool;
+
+ /* Auto XFER RDY Buffers - protect with io_lock */
+ uint32_t auto_xfer_rdy_enabled:1, /**< TRUE if auto xfer rdy is enabled */
+ :31;
+ ocs_pool_t *auto_xfer_rdy_buf_pool; /**< pool of ocs_hw_auto_xfer_rdy_buffer_t objects */
+
+ /** Maintain an ordered, linked list of outstanding HW commands. */
+ ocs_lock_t cmd_lock;
+ ocs_list_t cmd_head;
+ ocs_list_t cmd_pending;
+ uint32_t cmd_head_count;
+
+
+ sli4_link_event_t link;
+ ocs_hw_linkcfg_e linkcfg; /**< link configuration setting */
+ uint32_t eth_license; /**< Ethernet license; to enable FCoE on Lancer */
+
+ struct {
+ /**
+ * Function + argument used to notify upper layer of domain events.
+ *
+ * The final argument to the callback is a generic data pointer:
+ * - ocs_domain_record_t on OCS_HW_DOMAIN_FOUND
+ * - ocs_domain_t on OCS_HW_DOMAIN_ALLOC_FAIL, OCS_HW_DOMAIN_ALLOC_OK,
+ * OCS_HW_DOMAIN_FREE_FAIL, OCS_HW_DOMAIN_FREE_OK,
+ * OCS_HW_DOMAIN_ATTACH_FAIL, OCS_HW_DOMAIN_ATTACH_OK, and
+ * OCS_HW_DOMAIN_LOST.
+ */
+ int32_t (*domain)(void *, ocs_hw_domain_event_e, void *);
+ /**
+ * Function + argument used to notify upper layers of port events.
+ *
+ * The final argument to the callback is a pointer to the effected
+ * SLI port for all events.
+ */
+ int32_t (*port)(void *, ocs_hw_port_event_e, void *);
+ /** Function + argument used to announce arrival of unsolicited frames */
+ int32_t (*unsolicited)(void *, ocs_hw_sequence_t *);
+ int32_t (*rnode)(void *, ocs_hw_remote_node_event_e, void *);
+ int32_t (*bounce)(void (*)(void *arg), void *arg, uint32_t s_id, uint32_t d_id, uint32_t ox_id);
+ } callback;
+ struct {
+ void *domain;
+ void *port;
+ void *unsolicited;
+ void *rnode;
+ void *bounce;
+ } args;
+
+ /* OCS domain objects index by FCFI */
+ int32_t first_domain_idx; /* Workaround for srb->fcfi == 0 */
+ ocs_domain_t *domains[SLI4_MAX_FCFI];
+
+ /* Table of FCFI values index by FCF_index */
+ uint16_t fcf_index_fcfi[SLI4_MAX_FCF_INDEX];
+
+ uint16_t fcf_indicator;
+
+ ocs_hw_io_t **io; /**< pointer array of IO objects */
+ uint8_t *wqe_buffs; /**< array of WQE buffs mapped to IO objects */
+
+ ocs_lock_t io_lock; /**< IO lock to synchronize list access */
+ ocs_lock_t io_abort_lock; /**< IO lock to synchronize IO aborting */
+ ocs_list_t io_inuse; /**< List of IO objects in use */
+ ocs_list_t io_timed_wqe; /**< List of IO objects with a timed target WQE */
+ ocs_list_t io_wait_free; /**< List of IO objects waiting to be freed */
+ ocs_list_t io_free; /**< List of IO objects available for allocation */
+ ocs_list_t io_port_owned; /**< List of IO objects posted for chip use */
+ ocs_list_t io_port_dnrx; /**< List of IO objects needing auto xfer rdy buffers */
+
+ ocs_dma_t loop_map;
+
+ ocs_dma_t xfer_rdy;
+
+ ocs_dma_t dump_sges;
+
+ ocs_dma_t rnode_mem;
+
+ ocs_dma_t domain_dmem; /*domain dma mem for service params */
+ ocs_dma_t fcf_dmem; /*dma men for fcf */
+
+ ocs_hw_rpi_ref_t *rpi_ref;
+
+ char *hw_war_version;
+ ocs_hw_workaround_t workaround;
+
+ ocs_atomic_t io_alloc_failed_count;
+
+#if defined(OCS_DEBUG_QUEUE_HISTORY)
+ ocs_hw_q_hist_t q_hist;
+#endif
+
+ ocs_list_t sec_hio_wait_list; /**< BZ 161832 Workaround: Secondary HW IO context wait list */
+ uint32_t sec_hio_wait_count; /**< BZ 161832 Workaround: Count of IOs that were put on the
+ * Secondary HW IO wait list
+ */
+
+#define HW_MAX_TCMD_THREADS 16
+ ocs_hw_qtop_t *qtop; /**< pointer to queue topology */
+
+ uint32_t tcmd_wq_submit[OCS_HW_MAX_NUM_WQ]; /**< stat: wq sumbit count */
+ uint32_t tcmd_wq_complete[OCS_HW_MAX_NUM_WQ]; /**< stat: wq complete count */
+
+ ocs_timer_t wqe_timer; /**< Timer to periodically check for WQE timeouts */
+ ocs_timer_t watchdog_timer; /**< Timer for heartbeat */
+ bool expiration_logged;
+ uint32_t in_active_wqe_timer:1, /**< TRUE if currently in active wqe timer handler */
+ active_wqe_timer_shutdown:1, /** TRUE if wqe timer is to be shutdown */
+ :30;
+
+ ocs_list_t iopc_list; /**< list of IO processing contexts */
+ ocs_lock_t iopc_list_lock; /**< lock for iopc_list */
+
+ ocs_pool_t *wq_reqtag_pool; /**< pool of hw_wq_callback_t objects */
+
+ ocs_atomic_t send_frame_seq_id; /**< send frame sequence ID */
+};
+
+
+typedef enum {
+ OCS_HW_IO_INUSE_COUNT,
+ OCS_HW_IO_FREE_COUNT,
+ OCS_HW_IO_WAIT_FREE_COUNT,
+ OCS_HW_IO_PORT_OWNED_COUNT,
+ OCS_HW_IO_N_TOTAL_IO_COUNT,
+} ocs_hw_io_count_type_e;
+
+typedef void (*tcmd_cq_handler)(ocs_hw_t *hw, uint32_t cq_idx, void *cq_handler_arg);
+
+/*
+ * HW queue data structures
+ */
+
+struct hw_eq_s {
+ ocs_list_link_t link; /**< must be first */
+ sli4_qtype_e type; /**< must be second */
+ uint32_t instance;
+ uint32_t entry_count;
+ uint32_t entry_size;
+ ocs_hw_t *hw;
+ sli4_queue_t *queue;
+ ocs_list_t cq_list;
+#if OCS_STAT_ENABLE
+ uint32_t use_count;
+#endif
+ ocs_varray_t *wq_array; /*<< array of WQs */
+};
+
+struct hw_cq_s {
+ ocs_list_link_t link; /*<< must be first */
+ sli4_qtype_e type; /**< must be second */
+ uint32_t instance; /*<< CQ instance (cq_idx) */
+ uint32_t entry_count; /*<< Number of entries */
+ uint32_t entry_size; /*<< entry size */
+ hw_eq_t *eq; /*<< parent EQ */
+ sli4_queue_t *queue; /**< pointer to SLI4 queue */
+ ocs_list_t q_list; /**< list of children queues */
+
+#if OCS_STAT_ENABLE
+ uint32_t use_count;
+#endif
+};
+
+typedef struct {
+ ocs_list_link_t link; /*<< must be first */
+ sli4_qtype_e type; /*<< must be second */
+} hw_q_t;
+
+struct hw_mq_s {
+ ocs_list_link_t link; /*<< must be first */
+ sli4_qtype_e type; /*<< must be second */
+ uint32_t instance;
+
+ uint32_t entry_count;
+ uint32_t entry_size;
+ hw_cq_t *cq;
+ sli4_queue_t *queue;
+
+#if OCS_STAT_ENABLE
+ uint32_t use_count;
+#endif
+};
+
+struct hw_wq_s {
+ ocs_list_link_t link; /*<< must be first */
+ sli4_qtype_e type; /*<< must be second */
+ uint32_t instance;
+ ocs_hw_t *hw;
+
+ uint32_t entry_count;
+ uint32_t entry_size;
+ hw_cq_t *cq;
+ sli4_queue_t *queue;
+ uint32_t class;
+ uint8_t ulp;
+
+ /* WQ consumed */
+ uint32_t wqec_set_count; /*<< how often IOs are submitted with wqce set */
+ uint32_t wqec_count; /*<< current wqce counter */
+ uint32_t free_count; /*<< free count */
+ uint32_t total_submit_count; /*<< total submit count */
+ ocs_list_t pending_list; /*<< list of IOs pending for this WQ */
+
+ /*
+ * ---Skyhawk only ---
+ * BZ 160124 - Driver must quarantine XRIs for target writes and
+ * initiator read when using DIF separates. Throw them on a
+ * queue until another 4 similar requests are completed to ensure they
+ * are flushed from the internal chip cache before being re-used.
+ * The must be a separate queue per CQ because the actual chip completion
+ * order cannot be determined. Since each WQ has a separate CQ, use the wq
+ * associated with the IO.
+ *
+ * Note: Protected by queue->lock
+ */
+ ocs_quarantine_info_t quarantine_info;
+
+ /*
+ * HW IO allocated for use with Send Frame
+ */
+ ocs_hw_io_t *send_frame_io;
+
+ /* Stats */
+#if OCS_STAT_ENABLE
+ uint32_t use_count; /*<< use count */
+ uint32_t wq_pending_count; /*<< count of HW IOs that were queued on the WQ pending list */
+#endif
+};
+
+struct hw_rq_s {
+ ocs_list_link_t link; /*<< must be first */
+ sli4_qtype_e type; /*<< must be second */
+ uint32_t instance;
+
+ uint32_t entry_count;
+ uint32_t hdr_entry_size;
+ uint32_t first_burst_entry_size;
+ uint32_t data_entry_size;
+ uint8_t ulp;
+ bool is_mrq;
+ uint32_t base_mrq_id;
+
+ hw_cq_t *cq;
+
+ uint8_t filter_mask; /* Filter mask value */
+ sli4_queue_t *hdr;
+ sli4_queue_t *first_burst;
+ sli4_queue_t *data;
+
+ ocs_hw_rq_buffer_t *hdr_buf;
+ ocs_hw_rq_buffer_t *fb_buf;
+ ocs_hw_rq_buffer_t *payload_buf;
+
+ ocs_hw_sequence_t **rq_tracker; /* RQ tracker for this RQ */
+#if OCS_STAT_ENABLE
+ uint32_t use_count;
+ uint32_t hdr_use_count;
+ uint32_t fb_use_count;
+ uint32_t payload_use_count;
+#endif
+};
+
+typedef struct ocs_hw_global_s {
+ const char *queue_topology_string; /**< queue topology string */
+} ocs_hw_global_t;
+extern ocs_hw_global_t hw_global;
+
+extern hw_eq_t *hw_new_eq(ocs_hw_t *hw, uint32_t entry_count);
+extern hw_cq_t *hw_new_cq(hw_eq_t *eq, uint32_t entry_count);
+extern uint32_t hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count);
+extern hw_mq_t *hw_new_mq(hw_cq_t *cq, uint32_t entry_count);
+extern hw_wq_t *hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp);
+extern hw_rq_t *hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp);
+extern uint32_t hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp);
+extern void hw_del_eq(hw_eq_t *eq);
+extern void hw_del_cq(hw_cq_t *cq);
+extern void hw_del_mq(hw_mq_t *mq);
+extern void hw_del_wq(hw_wq_t *wq);
+extern void hw_del_rq(hw_rq_t *rq);
+extern void hw_queue_dump(ocs_hw_t *hw);
+extern void hw_queue_teardown(ocs_hw_t *hw);
+extern int32_t hw_route_rqe(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
+extern int32_t ocs_hw_queue_hash_find(ocs_queue_hash_t *, uint16_t);
+extern ocs_hw_rtn_e ocs_hw_setup(ocs_hw_t *, ocs_os_handle_t, sli4_port_type_e);
+extern ocs_hw_rtn_e ocs_hw_init(ocs_hw_t *);
+extern ocs_hw_rtn_e ocs_hw_teardown(ocs_hw_t *);
+extern ocs_hw_rtn_e ocs_hw_reset(ocs_hw_t *, ocs_hw_reset_e);
+extern int32_t ocs_hw_get_num_eq(ocs_hw_t *);
+extern ocs_hw_rtn_e ocs_hw_get(ocs_hw_t *, ocs_hw_property_e, uint32_t *);
+extern void *ocs_hw_get_ptr(ocs_hw_t *, ocs_hw_property_e);
+extern ocs_hw_rtn_e ocs_hw_set(ocs_hw_t *, ocs_hw_property_e, uint32_t);
+extern ocs_hw_rtn_e ocs_hw_set_ptr(ocs_hw_t *, ocs_hw_property_e, void*);
+extern int32_t ocs_hw_event_check(ocs_hw_t *, uint32_t);
+extern int32_t ocs_hw_process(ocs_hw_t *, uint32_t, uint32_t);
+extern ocs_hw_rtn_e ocs_hw_command(ocs_hw_t *, uint8_t *, uint32_t, void *, void *);
+extern ocs_hw_rtn_e ocs_hw_callback(ocs_hw_t *, ocs_hw_callback_e, void *, void *);
+extern ocs_hw_rtn_e ocs_hw_port_alloc(ocs_hw_t *, ocs_sli_port_t *, ocs_domain_t *, uint8_t *);
+extern ocs_hw_rtn_e ocs_hw_port_attach(ocs_hw_t *, ocs_sli_port_t *, uint32_t);
+typedef void (*ocs_hw_port_control_cb_t)(int32_t status, uintptr_t value, void *arg);
+extern ocs_hw_rtn_e ocs_hw_port_control(ocs_hw_t *, ocs_hw_port_e, uintptr_t, ocs_hw_port_control_cb_t, void *);
+extern ocs_hw_rtn_e ocs_hw_port_free(ocs_hw_t *, ocs_sli_port_t *);
+extern ocs_hw_rtn_e ocs_hw_domain_alloc(ocs_hw_t *, ocs_domain_t *, uint32_t, uint32_t);
+extern ocs_hw_rtn_e ocs_hw_domain_attach(ocs_hw_t *, ocs_domain_t *, uint32_t);
+extern ocs_hw_rtn_e ocs_hw_domain_free(ocs_hw_t *, ocs_domain_t *);
+extern ocs_hw_rtn_e ocs_hw_domain_force_free(ocs_hw_t *, ocs_domain_t *);
+extern ocs_domain_t * ocs_hw_domain_get(ocs_hw_t *, uint16_t);
+extern ocs_hw_rtn_e ocs_hw_node_alloc(ocs_hw_t *, ocs_remote_node_t *, uint32_t, ocs_sli_port_t *);
+extern ocs_hw_rtn_e ocs_hw_node_free_all(ocs_hw_t *);
+extern ocs_hw_rtn_e ocs_hw_node_attach(ocs_hw_t *, ocs_remote_node_t *, ocs_dma_t *);
+extern ocs_hw_rtn_e ocs_hw_node_detach(ocs_hw_t *, ocs_remote_node_t *);
+extern ocs_hw_rtn_e ocs_hw_node_free_resources(ocs_hw_t *, ocs_remote_node_t *);
+extern ocs_hw_rtn_e ocs_hw_node_group_alloc(ocs_hw_t *, ocs_remote_node_group_t *);
+extern ocs_hw_rtn_e ocs_hw_node_group_attach(ocs_hw_t *, ocs_remote_node_group_t *, ocs_remote_node_t *);
+extern ocs_hw_rtn_e ocs_hw_node_group_free(ocs_hw_t *, ocs_remote_node_group_t *);
+extern ocs_hw_io_t *ocs_hw_io_alloc(ocs_hw_t *);
+extern ocs_hw_io_t *ocs_hw_io_activate_port_owned(ocs_hw_t *, ocs_hw_io_t *);
+extern int32_t ocs_hw_io_free(ocs_hw_t *, ocs_hw_io_t *);
+extern uint8_t ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io);
+typedef int32_t (*ocs_hw_srrs_cb_t)(ocs_hw_io_t *io, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *arg);
+extern ocs_hw_rtn_e ocs_hw_srrs_send(ocs_hw_t *, ocs_hw_io_type_e, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *, ocs_remote_node_t *, ocs_hw_io_param_t *, ocs_hw_srrs_cb_t, void *);
+extern ocs_hw_rtn_e ocs_hw_io_send(ocs_hw_t *, ocs_hw_io_type_e, ocs_hw_io_t *, uint32_t, ocs_hw_io_param_t *, ocs_remote_node_t *, void *, void *);
+extern ocs_hw_rtn_e _ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
+ uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
+ void *cb, void *arg);
+extern ocs_hw_rtn_e ocs_hw_io_register_sgl(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t);
+extern ocs_hw_rtn_e ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type);
+extern ocs_hw_rtn_e ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info);
+extern ocs_hw_rtn_e ocs_hw_io_add_sge(ocs_hw_t *, ocs_hw_io_t *, uintptr_t, uint32_t);
+extern ocs_hw_rtn_e ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr);
+extern ocs_hw_rtn_e ocs_hw_io_abort(ocs_hw_t *, ocs_hw_io_t *, uint32_t, void *, void *);
+extern int32_t ocs_hw_io_get_xid(ocs_hw_t *, ocs_hw_io_t *);
+extern uint32_t ocs_hw_io_get_count(ocs_hw_t *, ocs_hw_io_count_type_e);
+extern uint32_t ocs_hw_get_rqes_produced_count(ocs_hw_t *hw);
+
+typedef void (*ocs_hw_fw_cb_t)(int32_t status, uint32_t bytes_written, uint32_t change_status, void *arg);
+extern ocs_hw_rtn_e ocs_hw_firmware_write(ocs_hw_t *, ocs_dma_t *, uint32_t, uint32_t, int, ocs_hw_fw_cb_t, void*);
+
+/* Function for retrieving SFP data */
+typedef void (*ocs_hw_sfp_cb_t)(void *, int32_t, uint32_t, uint32_t *, void *);
+extern ocs_hw_rtn_e ocs_hw_get_sfp(ocs_hw_t *, uint16_t, ocs_hw_sfp_cb_t, void *);
+
+/* Function for retrieving temperature data */
+typedef void (*ocs_hw_temp_cb_t)(int32_t status,
+ uint32_t curr_temp,
+ uint32_t crit_temp_thrshld,
+ uint32_t warn_temp_thrshld,
+ uint32_t norm_temp_thrshld,
+ uint32_t fan_off_thrshld,
+ uint32_t fan_on_thrshld,
+ void *arg);
+extern ocs_hw_rtn_e ocs_hw_get_temperature(ocs_hw_t *, ocs_hw_temp_cb_t, void*);
+
+/* Function for retrieving link statistics */
+typedef void (*ocs_hw_link_stat_cb_t)(int32_t status,
+ uint32_t num_counters,
+ ocs_hw_link_stat_counts_t *counters,
+ void *arg);
+extern ocs_hw_rtn_e ocs_hw_get_link_stats(ocs_hw_t *,
+ uint8_t req_ext_counters,
+ uint8_t clear_overflow_flags,
+ uint8_t clear_all_counters,
+ ocs_hw_link_stat_cb_t, void*);
+/* Function for retrieving host statistics */
+typedef void (*ocs_hw_host_stat_cb_t)(int32_t status,
+ uint32_t num_counters,
+ ocs_hw_host_stat_counts_t *counters,
+ void *arg);
+extern ocs_hw_rtn_e ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t, void *arg);
+
+extern ocs_hw_rtn_e ocs_hw_raise_ue(ocs_hw_t *, uint8_t);
+typedef void (*ocs_hw_dump_get_cb_t)(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg);
+extern ocs_hw_rtn_e ocs_hw_dump_get(ocs_hw_t *, ocs_dma_t *, uint32_t, uint32_t, ocs_hw_dump_get_cb_t, void *);
+extern ocs_hw_rtn_e ocs_hw_set_dump_location(ocs_hw_t *, uint32_t, ocs_dma_t *, uint8_t);
+
+typedef void (*ocs_get_port_protocol_cb_t)(int32_t status, ocs_hw_port_protocol_e port_protocol, void *arg);
+extern ocs_hw_rtn_e ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func, ocs_get_port_protocol_cb_t mgmt_cb, void* ul_arg);
+typedef void (*ocs_set_port_protocol_cb_t)(int32_t status, void *arg);
+extern ocs_hw_rtn_e ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e profile,
+ uint32_t pci_func, ocs_set_port_protocol_cb_t mgmt_cb,
+ void* ul_arg);
+
+typedef void (*ocs_get_profile_list_cb_t)(int32_t status, ocs_hw_profile_list_t*, void *arg);
+extern ocs_hw_rtn_e ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t mgmt_cb, void *arg);
+typedef void (*ocs_get_active_profile_cb_t)(int32_t status, uint32_t active_profile, void *arg);
+extern ocs_hw_rtn_e ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t mgmt_cb, void *arg);
+typedef void (*ocs_set_active_profile_cb_t)(int32_t status, void *arg);
+extern ocs_hw_rtn_e ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t mgmt_cb,
+ uint32_t profile_id, void *arg);
+typedef void (*ocs_get_nvparms_cb_t)(int32_t status, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa,
+ uint32_t preferred_d_id, void *arg);
+extern ocs_hw_rtn_e ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t mgmt_cb, void *arg);
+typedef void (*ocs_set_nvparms_cb_t)(int32_t status, void *arg);
+extern ocs_hw_rtn_e ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t mgmt_cb, uint8_t *wwpn,
+ uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void *arg);
+extern int32_t ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec);
+extern void ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq);
+extern void ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid);
+extern void ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid);
+extern int32_t hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe);
+
+typedef void (*ocs_hw_dump_clear_cb_t)(int32_t status, void *arg);
+extern ocs_hw_rtn_e ocs_hw_dump_clear(ocs_hw_t *, ocs_hw_dump_clear_cb_t, void *);
+
+extern uint8_t ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io);
+
+
+extern uint8_t ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri);
+extern ocs_hw_io_t * ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t indicator);
+extern uint32_t ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri);
+extern ocs_hw_rtn_e ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri);
+extern int32_t ocs_hw_reque_xri(ocs_hw_t *hw, ocs_hw_io_t *io);
+
+
+typedef struct {
+ /* structure elements used by HW */
+ ocs_hw_t *hw; /**> pointer to HW */
+ hw_wq_callback_t *wqcb; /**> WQ callback object, request tag */
+ ocs_hw_wqe_t wqe; /**> WQE buffer object (may be queued on WQ pending list) */
+ void (*callback)(int32_t status, void *arg); /**> final callback function */
+ void *arg; /**> final callback argument */
+
+ /* General purpose elements */
+ ocs_hw_sequence_t *seq;
+ ocs_dma_t payload; /**> a payload DMA buffer */
+} ocs_hw_send_frame_context_t;
+
+
+#define OCS_HW_OBJECT_G5 0xfeaa0001
+#define OCS_HW_OBJECT_G6 0xfeaa0003
+#define OCS_FILE_TYPE_GROUP 0xf7
+#define OCS_FILE_ID_GROUP 0xa2
+struct ocs_hw_grp_hdr {
+ uint32_t size;
+ uint32_t magic_number;
+ uint32_t word2;
+ uint8_t rev_name[128];
+ uint8_t date[12];
+ uint8_t revision[32];
+};
+
+
+ocs_hw_rtn_e
+ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
+ ocs_hw_send_frame_context_t *ctx,
+ void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg);
+
+/* RQ completion handlers for RQ pair mode */
+extern int32_t ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe);
+extern ocs_hw_rtn_e ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
+extern int32_t ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe);
+extern int32_t ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe);
+extern ocs_hw_rtn_e ocs_hw_rqpair_init(ocs_hw_t *hw);
+extern ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers);
+extern uint8_t ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf);
+extern ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io);
+extern void ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io);
+extern void ocs_hw_rqpair_teardown(ocs_hw_t *hw);
+
+extern ocs_hw_rtn_e ocs_hw_rx_allocate(ocs_hw_t *hw);
+extern ocs_hw_rtn_e ocs_hw_rx_post(ocs_hw_t *hw);
+extern void ocs_hw_rx_free(ocs_hw_t *hw);
+
+extern void ocs_hw_unsol_process_bounce(void *arg);
+
+typedef int32_t (*ocs_hw_async_cb_t)(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
+extern int32_t ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg);
+
+static inline void
+ocs_hw_sequence_copy(ocs_hw_sequence_t *dst, ocs_hw_sequence_t *src)
+{
+ /* Copy the src to dst, then zero out the linked list link */
+ *dst = *src;
+ ocs_memset(&dst->link, 0, sizeof(dst->link));
+}
+
+static inline ocs_hw_rtn_e
+ocs_hw_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
+{
+ /* Only RQ pair mode is supported */
+ return ocs_hw_rqpair_sequence_free(hw, seq);
+}
+
+/* HW WQ request tag API */
+extern ocs_hw_rtn_e ocs_hw_reqtag_init(ocs_hw_t *hw);
+extern hw_wq_callback_t *ocs_hw_reqtag_alloc(ocs_hw_t *hw,
+ void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg);
+extern void ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb);
+extern hw_wq_callback_t *ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index);
+extern void ocs_hw_reqtag_reset(ocs_hw_t *hw);
+
+
+extern uint32_t ocs_hw_dif_blocksize(ocs_hw_dif_info_t *dif_info);
+extern int32_t ocs_hw_dif_mem_blocksize(ocs_hw_dif_info_t *dif_info, int wiretomem);
+extern int32_t ocs_hw_dif_wire_blocksize(ocs_hw_dif_info_t *dif_info, int wiretomem);
+extern uint32_t ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn);
+
+/* Uncomment to enable CPUTRACE */
+//#define ENABLE_CPUTRACE
+#ifdef ENABLE_CPUTRACE
+#define CPUTRACE(t) ocs_printf("trace: %-20s %2s %-16s cpu %2d\n", __func__, t, \
+ ({ocs_thread_t *self = ocs_thread_self(); self != NULL ? self->name : "unknown";}), ocs_thread_getcpu());
+#else
+#define CPUTRACE(...)
+#endif
+
+
+/* Two levels of macro needed due to expansion */
+#define HW_FWREV(a,b,c,d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) | ((uint64_t)(c) << 16) | ((uint64_t)(d)))
+#define HW_FWREV_1(x) HW_FWREV(x)
+
+#define OCS_FW_VER_STR2(a,b,c,d) #a "." #b "." #c "." #d
+#define OCS_FW_VER_STR(x) OCS_FW_VER_STR2(x)
+
+#define OCS_MIN_FW_VER_LANCER 10,4,255,0
+#define OCS_MIN_FW_VER_SKYHAWK 10,4,255,0
+
+extern void ocs_hw_workaround_setup(struct ocs_hw_s *hw);
+
+
+/**
+ * @brief Defines the number of the RQ buffers for each RQ
+ */
+
+#ifndef OCS_HW_RQ_NUM_HDR
+#define OCS_HW_RQ_NUM_HDR 1024
+#endif
+
+#ifndef OCS_HW_RQ_NUM_PAYLOAD
+#define OCS_HW_RQ_NUM_PAYLOAD 1024
+#endif
+
+/**
+ * @brief Defines the size of the RQ buffers used for each RQ
+ */
+#ifndef OCS_HW_RQ_SIZE_HDR
+#define OCS_HW_RQ_SIZE_HDR 128
+#endif
+
+#ifndef OCS_HW_RQ_SIZE_PAYLOAD
+#define OCS_HW_RQ_SIZE_PAYLOAD 1024
+#endif
+
+/*
+ * @brief Define the maximum number of multi-receive queues
+ */
+#ifndef OCS_HW_MAX_MRQS
+#define OCS_HW_MAX_MRQS 8
+#endif
+
+/*
+ * @brief Define count of when to set the WQEC bit in a submitted
+ * WQE, causing a consummed/released completion to be posted.
+ */
+#ifndef OCS_HW_WQEC_SET_COUNT
+#define OCS_HW_WQEC_SET_COUNT 32
+#endif
+
+/*
+ * @brief Send frame timeout in seconds
+ */
+#ifndef OCS_HW_SEND_FRAME_TIMEOUT
+#define OCS_HW_SEND_FRAME_TIMEOUT 10
+#endif
+
+/*
+ * @brief FDT Transfer Hint value, reads greater than this value
+ * will be segmented to implement fairness. A value of zero disables
+ * the feature.
+ */
+#ifndef OCS_HW_FDT_XFER_HINT
+#define OCS_HW_FDT_XFER_HINT 8192
+#endif
+
+#endif /* !_OCS_HW_H */
diff --git a/sys/dev/ocs_fc/ocs_hw_queues.c b/sys/dev/ocs_fc/ocs_hw_queues.c
new file mode 100644
index 000000000000..2527d72865a7
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_hw_queues.c
@@ -0,0 +1,2602 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ */
+
+#include "ocs_os.h"
+#include "ocs_hw.h"
+#include "ocs_hw_queues.h"
+
+#define HW_QTOP_DEBUG 0
+
+/**
+ * @brief Initialize queues
+ *
+ * Given the parsed queue topology spec, the SLI queues are created and
+ * initialized
+ *
+ * @param hw pointer to HW object
+ * @param qtop pointer to queue topology
+ *
+ * @return returns 0 for success, an error code value for failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
+{
+ uint32_t i, j;
+ uint32_t default_lengths[QTOP_LAST], len;
+ uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0;
+ uint8_t rqset_filter_mask = 0;
+ hw_eq_t *eqs[hw->config.n_rq];
+ hw_cq_t *cqs[hw->config.n_rq];
+ hw_rq_t *rqs[hw->config.n_rq];
+ ocs_hw_qtop_entry_t *qt, *next_qt;
+ ocs_hw_mrq_t mrq;
+ bool use_mrq = FALSE;
+
+ hw_eq_t *eq = NULL;
+ hw_cq_t *cq = NULL;
+ hw_wq_t *wq = NULL;
+ hw_rq_t *rq = NULL;
+ hw_mq_t *mq = NULL;
+
+ mrq.num_pairs = 0;
+ default_lengths[QTOP_EQ] = 1024;
+ default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ];
+ default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ];
+ default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ];
+ default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH;
+
+ ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG);
+
+ hw->eq_count = 0;
+ hw->cq_count = 0;
+ hw->mq_count = 0;
+ hw->wq_count = 0;
+ hw->rq_count = 0;
+ hw->hw_rq_count = 0;
+ ocs_list_init(&hw->eq_list, hw_eq_t, link);
+
+ /* If MRQ is requested, Check if it is supported by SLI. */
+ if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) {
+ ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ if (hw->config.n_rq > 1)
+ use_mrq = TRUE;
+
+ /* Allocate class WQ pools */
+ for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
+ hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
+ if (hw->wq_class_array[i] == NULL) {
+ ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+
+ /* Allocate per CPU WQ pools */
+ for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
+ hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
+ if (hw->wq_cpu_array[i] == NULL) {
+ ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+
+
+ ocs_hw_assert(qtop != NULL);
+
+ for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
+ if (i == qtop->inuse_count - 1)
+ next_qt = NULL;
+ else
+ next_qt = qt + 1;
+
+ switch(qt->entry) {
+ case QTOP_EQ:
+ len = (qt->len) ? qt->len : default_lengths[QTOP_EQ];
+
+ if (qt->set_default) {
+ default_lengths[QTOP_EQ] = len;
+ break;
+ }
+
+ eq = hw_new_eq(hw, len);
+ if (eq == NULL) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ break;
+
+ case QTOP_CQ:
+ len = (qt->len) ? qt->len : default_lengths[QTOP_CQ];
+
+ if (qt->set_default) {
+ default_lengths[QTOP_CQ] = len;
+ break;
+ }
+
+ /* If this CQ is for MRQ, then delay the creation */
+ if (!use_mrq || next_qt->entry != QTOP_RQ) {
+ cq = hw_new_cq(eq, len);
+ if (cq == NULL) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ }
+ break;
+
+ case QTOP_WQ: {
+
+ len = (qt->len) ? qt->len : default_lengths[QTOP_WQ];
+ if (qt->set_default) {
+ default_lengths[QTOP_WQ] = len;
+ break;
+ }
+
+ if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
+ ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp);
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
+ if (wq == NULL) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* Place this WQ on the EQ WQ array */
+ if (ocs_varray_add(eq->wq_array, wq)) {
+ ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n");
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* Place this WQ on the HW class array */
+ if (qt->class < ARRAY_SIZE(hw->wq_class_array)) {
+ if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) {
+ ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n");
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ }
+ } else {
+ ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class);
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /*
+ * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given
+ * by the EQ instance modulo number of CPUs
+ */
+ if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) {
+ ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n");
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ break;
+ }
+ case QTOP_RQ: {
+ len = (qt->len) ? qt->len : default_lengths[QTOP_RQ];
+ if (qt->set_default) {
+ default_lengths[QTOP_RQ] = len;
+ break;
+ }
+
+ if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
+ ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp);
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ if (use_mrq) {
+ mrq.rq_cfg[mrq.num_pairs].len = len;
+ mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp;
+ mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask;
+ mrq.rq_cfg[mrq.num_pairs].eq = eq;
+ mrq.num_pairs ++;
+ } else {
+ rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp);
+ if (rq == NULL) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ rq->filter_mask = qt->filter_mask;
+ }
+ break;
+ }
+
+ case QTOP_MQ:
+ len = (qt->len) ? qt->len : default_lengths[QTOP_MQ];
+ if (qt->set_default) {
+ default_lengths[QTOP_MQ] = len;
+ break;
+ }
+
+ mq = hw_new_mq(cq, len);
+ if (mq == NULL) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ break;
+
+ default:
+ ocs_hw_assert(0);
+ break;
+ }
+ }
+
+ if (mrq.num_pairs) {
+ /* First create normal RQs. */
+ for (i = 0; i < mrq.num_pairs; i++) {
+ for (j = 0; j < mrq.num_pairs; j++) {
+ if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) {
+ /* This should be created using set */
+ if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) {
+ ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n");
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ } else if (!rqset_filter_mask){
+ rqset_filter_mask = mrq.rq_cfg[i].filter_mask;
+ rqset_len = mrq.rq_cfg[i].len;
+ rqset_ulp = mrq.rq_cfg[i].ulp;
+ }
+ eqs[rqset_count] = mrq.rq_cfg[i].eq;
+ rqset_count++;
+ break;
+ }
+ }
+ if (j == mrq.num_pairs) {
+ /* Normal RQ */
+ cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]);
+ if (cq == NULL) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp);
+ if (rq == NULL) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ rq->filter_mask = mrq.rq_cfg[i].filter_mask;
+ }
+ }
+
+ /* Now create RQ Set */
+ if (rqset_count) {
+ if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) {
+ ocs_log_crit(hw->os,
+ "Max Supported MRQ pairs = %d\n",
+ OCE_HW_MAX_NUM_MRQ_PAIRS);
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* Create CQ set */
+ if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* Create RQ set */
+ if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) {
+ hw_queue_teardown(hw);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ for (i = 0; i < rqset_count ; i++) {
+ rqs[i]->filter_mask = rqset_filter_mask;
+ rqs[i]->is_mrq = TRUE;
+ rqs[i]->base_mrq_id = rqs[0]->hdr->id;
+ }
+
+ hw->hw_mrq_count = rqset_count;
+ }
+ }
+
+ return OCS_HW_RTN_SUCCESS;
+
+}
+
+/**
+ * @brief Allocate a new EQ object
+ *
+ * A new EQ object is instantiated
+ *
+ * @param hw pointer to HW object
+ * @param entry_count number of entries in the EQ
+ *
+ * @return pointer to allocated EQ object
+ */
+hw_eq_t*
+hw_new_eq(ocs_hw_t *hw, uint32_t entry_count)
+{
+ hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT);
+
+ if (eq != NULL) {
+ eq->type = SLI_QTYPE_EQ;
+ eq->hw = hw;
+ eq->entry_count = entry_count;
+ eq->instance = hw->eq_count++;
+ eq->queue = &hw->eq[eq->instance];
+ ocs_list_init(&eq->cq_list, hw_cq_t, link);
+
+ eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
+ if (eq->wq_array == NULL) {
+ ocs_free(hw->os, eq, sizeof(*eq));
+ eq = NULL;
+ } else {
+ if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
+ ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance);
+ ocs_free(hw->os, eq, sizeof(*eq));
+ eq = NULL;
+ } else {
+ sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
+ hw->hw_eq[eq->instance] = eq;
+ ocs_list_add_tail(&hw->eq_list, eq);
+ ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
+ eq->entry_count);
+ }
+ }
+ }
+ return eq;
+}
+
+/**
+ * @brief Allocate a new CQ object
+ *
+ * A new CQ object is instantiated
+ *
+ * @param eq pointer to parent EQ object
+ * @param entry_count number of entries in the CQ
+ *
+ * @return pointer to allocated CQ object
+ */
+hw_cq_t*
+hw_new_cq(hw_eq_t *eq, uint32_t entry_count)
+{
+ ocs_hw_t *hw = eq->hw;
+ hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
+
+ if (cq != NULL) {
+ cq->eq = eq;
+ cq->type = SLI_QTYPE_CQ;
+ cq->instance = eq->hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+
+ ocs_list_init(&cq->q_list, hw_q_t, link);
+
+ if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
+ ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
+ eq->instance,
+ eq->entry_count);
+ ocs_free(hw->os, cq, sizeof(*cq));
+ cq = NULL;
+ } else {
+ hw->hw_cq[cq->instance] = cq;
+ ocs_list_add_tail(&eq->cq_list, cq);
+ ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
+ cq->entry_count);
+ }
+ }
+ return cq;
+}
+
+/**
+ * @brief Allocate a new CQ Set of objects.
+ *
+ * @param eqs pointer to a set of EQ objects.
+ * @param cqs pointer to a set of CQ objects to be returned.
+ * @param num_cqs number of CQ queues in the set.
+ * @param entry_count number of entries in the CQ.
+ *
+ * @return 0 on success and -1 on failure.
+ */
+uint32_t
+hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count)
+{
+ uint32_t i;
+ ocs_hw_t *hw = eqs[0]->hw;
+ sli4_t *sli4 = &hw->sli;
+ hw_cq_t *cq = NULL;
+ sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT];
+
+ /* Initialise CQS pointers to NULL */
+ for (i = 0; i < num_cqs; i++) {
+ cqs[i] = NULL;
+ }
+
+ for (i = 0; i < num_cqs; i++) {
+ cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (cq == NULL)
+ goto error;
+
+ cqs[i] = cq;
+ cq->eq = eqs[i];
+ cq->type = SLI_QTYPE_CQ;
+ cq->instance = hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+ qs[i] = cq->queue;
+ assocs[i] = eqs[i]->queue;
+ ocs_list_init(&cq->q_list, hw_q_t, link);
+ }
+
+ if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) {
+ ocs_log_err(NULL, "Failed to create CQ Set. \n");
+ goto error;
+ }
+
+ for (i = 0; i < num_cqs; i++) {
+ hw->hw_cq[cqs[i]->instance] = cqs[i];
+ ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]);
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++) {
+ if (cqs[i]) {
+ ocs_free(hw->os, cqs[i], sizeof(*cqs[i]));
+ cqs[i] = NULL;
+ }
+ }
+ return -1;
+}
+
+
+/**
+ * @brief Allocate a new MQ object
+ *
+ * A new MQ object is instantiated
+ *
+ * @param cq pointer to parent CQ object
+ * @param entry_count number of entries in the MQ
+ *
+ * @return pointer to allocated MQ object
+ */
+hw_mq_t*
+hw_new_mq(hw_cq_t *cq, uint32_t entry_count)
+{
+ ocs_hw_t *hw = cq->eq->hw;
+ hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT);
+
+ if (mq != NULL) {
+ mq->cq = cq;
+ mq->type = SLI_QTYPE_MQ;
+ mq->instance = cq->eq->hw->mq_count++;
+ mq->entry_count = entry_count;
+ mq->entry_size = OCS_HW_MQ_DEPTH;
+ mq->queue = &hw->mq[mq->instance];
+
+ if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ,
+ mq->queue,
+ mq->entry_size,
+ cq->queue, 0)) {
+ ocs_log_err(hw->os, "MQ allocation failure\n");
+ ocs_free(hw->os, mq, sizeof(*mq));
+ mq = NULL;
+ } else {
+ hw->hw_mq[mq->instance] = mq;
+ ocs_list_add_tail(&cq->q_list, mq);
+ ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
+ mq->entry_count);
+ }
+ }
+ return mq;
+}
+
+/**
+ * @brief Allocate a new WQ object
+ *
+ * A new WQ object is instantiated
+ *
+ * @param cq pointer to parent CQ object
+ * @param entry_count number of entries in the WQ
+ * @param class WQ class
+ * @param ulp index of chute
+ *
+ * @return pointer to allocated WQ object
+ */
+hw_wq_t*
+hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp)
+{
+ ocs_hw_t *hw = cq->eq->hw;
+ hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT);
+
+ if (wq != NULL) {
+ wq->hw = cq->eq->hw;
+ wq->cq = cq;
+ wq->type = SLI_QTYPE_WQ;
+ wq->instance = cq->eq->hw->wq_count++;
+ wq->entry_count = entry_count;
+ wq->queue = &hw->wq[wq->instance];
+ wq->ulp = ulp;
+ wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT;
+ wq->wqec_count = wq->wqec_set_count;
+ wq->free_count = wq->entry_count - 1;
+ wq->class = class;
+ ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link);
+
+ if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
+ ocs_log_err(hw->os, "WQ allocation failure\n");
+ ocs_free(hw->os, wq, sizeof(*wq));
+ wq = NULL;
+ } else {
+ hw->hw_wq[wq->instance] = wq;
+ ocs_list_add_tail(&cq->q_list, wq);
+ ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
+ wq->entry_count, wq->class, wq->ulp);
+ }
+ }
+ return wq;
+}
+
+/**
+ * @brief Allocate a hw_rq_t object
+ *
+ * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair)
+ *
+ * @param cq pointer to parent CQ object
+ * @param entry_count number of entries in the RQs
+ * @param ulp ULP index for this RQ
+ *
+ * @return pointer to newly allocated hw_rq_t
+ */
+hw_rq_t*
+hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp)
+{
+ ocs_hw_t *hw = cq->eq->hw;
+ hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
+ uint32_t max_hw_rq;
+
+ ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
+
+
+ if (rq != NULL) {
+ rq->instance = hw->hw_rq_count++;
+ rq->cq = cq;
+ rq->type = SLI_QTYPE_RQ;
+ rq->ulp = ulp;
+
+ rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
+
+ /* Create the header RQ */
+ ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
+ rq->hdr = &hw->rq[hw->rq_count];
+ rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
+
+ if (sli_fc_rq_alloc(&hw->sli, rq->hdr,
+ rq->entry_count,
+ rq->hdr_entry_size,
+ cq->queue,
+ ulp, TRUE)) {
+ ocs_log_err(hw->os, "RQ allocation failure - header\n");
+ ocs_free(hw->os, rq, sizeof(*rq));
+ return NULL;
+ }
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */
+ hw->rq_count++;
+ ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr size %4d ulp %d\n",
+ rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp);
+
+ /* Create the default data RQ */
+ ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
+ rq->data = &hw->rq[hw->rq_count];
+ rq->data_entry_size = hw->config.rq_default_buffer_size;
+
+ if (sli_fc_rq_alloc(&hw->sli, rq->data,
+ rq->entry_count,
+ rq->data_entry_size,
+ cq->queue,
+ ulp, FALSE)) {
+ ocs_log_err(hw->os, "RQ allocation failure - first burst\n");
+ ocs_free(hw->os, rq, sizeof(*rq));
+ return NULL;
+ }
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */
+ hw->rq_count++;
+ ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance,
+ rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp);
+
+ hw->hw_rq[rq->instance] = rq;
+ ocs_list_add_tail(&cq->q_list, rq);
+
+ rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
+ rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (rq->rq_tracker == NULL) {
+ ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
+ return NULL;
+ }
+ }
+ return rq;
+}
+
+
+/**
+ * @brief Allocate a hw_rq_t object SET
+ *
+ * Allocate an RQ object SET, where each element in set
+ * encapsulates 2 SLI queues (for rq pair)
+ *
+ * @param cqs pointers to be associated with RQs.
+ * @param rqs RQ pointers to be returned on success.
+ * @param num_rq_pairs number of rq pairs in the Set.
+ * @param entry_count number of entries in the RQs
+ * @param ulp ULP index for this RQ
+ *
+ * @return 0 in success and -1 on failure.
+ */
+uint32_t
+hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp)
+{
+ ocs_hw_t *hw = cqs[0]->eq->hw;
+ hw_rq_t *rq = NULL;
+ sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL };
+ uint32_t max_hw_rq, i, q_count;
+
+ ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
+
+ /* Initialise RQS pointers */
+ for (i = 0; i < num_rq_pairs; i++) {
+ rqs[i] = NULL;
+ }
+
+ for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
+ rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (rq == NULL)
+ goto error;
+
+ rqs[i] = rq;
+ rq->instance = hw->hw_rq_count++;
+ rq->cq = cqs[i];
+ rq->type = SLI_QTYPE_RQ;
+ rq->ulp = ulp;
+ rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
+
+ /* Header RQ */
+ rq->hdr = &hw->rq[hw->rq_count];
+ rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count] = rq->hdr;
+
+ /* Data RQ */
+ rq->data = &hw->rq[hw->rq_count];
+ rq->data_entry_size = hw->config.rq_default_buffer_size;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count + 1] = rq->data;
+
+ rq->rq_tracker = NULL;
+ }
+
+ if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
+ cqs[0]->queue->id,
+ rqs[0]->entry_count,
+ rqs[0]->hdr_entry_size,
+ rqs[0]->data_entry_size,
+ ulp)) {
+ ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
+ goto error;
+ }
+
+
+ for (i = 0; i < num_rq_pairs; i++) {
+ hw->hw_rq[rqs[i]->instance] = rqs[i];
+ ocs_list_add_tail(&cqs[i]->q_list, rqs[i]);
+ rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
+ rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (rqs[i]->rq_tracker == NULL) {
+ ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rq_pairs; i++) {
+ if (rqs[i] != NULL) {
+ if (rqs[i]->rq_tracker != NULL) {
+ ocs_free(hw->os, rq->rq_tracker,
+ sizeof(ocs_hw_sequence_t*) * rq->entry_count);
+ }
+ ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
+ }
+ }
+
+ return -1;
+}
+
+
+/**
+ * @brief Free an EQ object
+ *
+ * The EQ object and any child queue objects are freed
+ *
+ * @param eq pointer to EQ object
+ *
+ * @return none
+ */
+void
+hw_del_eq(hw_eq_t *eq)
+{
+ if (eq != NULL) {
+ hw_cq_t *cq;
+ hw_cq_t *cq_next;
+
+ ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) {
+ hw_del_cq(cq);
+ }
+ ocs_varray_free(eq->wq_array);
+ ocs_list_remove(&eq->hw->eq_list, eq);
+ eq->hw->hw_eq[eq->instance] = NULL;
+ ocs_free(eq->hw->os, eq, sizeof(*eq));
+ }
+}
+
+/**
+ * @brief Free a CQ object
+ *
+ * The CQ object and any child queue objects are freed
+ *
+ * @param cq pointer to CQ object
+ *
+ * @return none
+ */
+void
+hw_del_cq(hw_cq_t *cq)
+{
+ if (cq != NULL) {
+ hw_q_t *q;
+ hw_q_t *q_next;
+
+ ocs_list_foreach_safe(&cq->q_list, q, q_next) {
+ switch(q->type) {
+ case SLI_QTYPE_MQ:
+ hw_del_mq((hw_mq_t*) q);
+ break;
+ case SLI_QTYPE_WQ:
+ hw_del_wq((hw_wq_t*) q);
+ break;
+ case SLI_QTYPE_RQ:
+ hw_del_rq((hw_rq_t*) q);
+ break;
+ default:
+ break;
+ }
+ }
+ ocs_list_remove(&cq->eq->cq_list, cq);
+ cq->eq->hw->hw_cq[cq->instance] = NULL;
+ ocs_free(cq->eq->hw->os, cq, sizeof(*cq));
+ }
+}
+
+/**
+ * @brief Free a MQ object
+ *
+ * The MQ object is freed
+ *
+ * @param mq pointer to MQ object
+ *
+ * @return none
+ */
+void
+hw_del_mq(hw_mq_t *mq)
+{
+ if (mq != NULL) {
+ ocs_list_remove(&mq->cq->q_list, mq);
+ mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
+ ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq));
+ }
+}
+
+/**
+ * @brief Free a WQ object
+ *
+ * The WQ object is freed
+ *
+ * @param wq pointer to WQ object
+ *
+ * @return none
+ */
+void
+hw_del_wq(hw_wq_t *wq)
+{
+ if (wq != NULL) {
+ ocs_list_remove(&wq->cq->q_list, wq);
+ wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
+ ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq));
+ }
+}
+
+/**
+ * @brief Free an RQ object
+ *
+ * The RQ object is freed
+ *
+ * @param rq pointer to RQ object
+ *
+ * @return none
+ */
+void
+hw_del_rq(hw_rq_t *rq)
+{
+ ocs_hw_t *hw = rq->cq->eq->hw;
+
+ if (rq != NULL) {
+ /* Free RQ tracker */
+ if (rq->rq_tracker != NULL) {
+ ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);
+ rq->rq_tracker = NULL;
+ }
+ ocs_list_remove(&rq->cq->q_list, rq);
+ hw->hw_rq[rq->instance] = NULL;
+ ocs_free(hw->os, rq, sizeof(*rq));
+ }
+}
+
+/**
+ * @brief Display HW queue objects
+ *
+ * The HW queue objects are displayed using ocs_log
+ *
+ * @param hw pointer to HW object
+ *
+ * @return none
+ */
+void
+hw_queue_dump(ocs_hw_t *hw)
+{
+ hw_eq_t *eq;
+ hw_cq_t *cq;
+ hw_q_t *q;
+ hw_mq_t *mq;
+ hw_wq_t *wq;
+ hw_rq_t *rq;
+
+ ocs_list_foreach(&hw->eq_list, eq) {
+ ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
+ ocs_list_foreach(&eq->cq_list, cq) {
+ ocs_printf(" cq[%d] id %2d current\n", cq->instance, cq->queue->id);
+ ocs_list_foreach(&cq->q_list, q) {
+ switch(q->type) {
+ case SLI_QTYPE_MQ:
+ mq = (hw_mq_t *) q;
+ ocs_printf(" mq[%d] id %2d\n", mq->instance, mq->queue->id);
+ break;
+ case SLI_QTYPE_WQ:
+ wq = (hw_wq_t *) q;
+ ocs_printf(" wq[%d] id %2d\n", wq->instance, wq->queue->id);
+ break;
+ case SLI_QTYPE_RQ:
+ rq = (hw_rq_t *) q;
+ ocs_printf(" rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
+
+/**
+ * @brief Teardown HW queue objects
+ *
+ * The HW queue objects are freed
+ *
+ * @param hw pointer to HW object
+ *
+ * @return none
+ */
+void
+hw_queue_teardown(ocs_hw_t *hw)
+{
+ uint32_t i;
+ hw_eq_t *eq;
+ hw_eq_t *eq_next;
+
+ if (ocs_list_valid(&hw->eq_list)) {
+ ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) {
+ hw_del_eq(eq);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
+ ocs_varray_free(hw->wq_cpu_array[i]);
+ hw->wq_cpu_array[i] = NULL;
+ }
+ for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
+ ocs_varray_free(hw->wq_class_array[i]);
+ hw->wq_class_array[i] = NULL;
+ }
+}
+
+/**
+ * @brief Allocate a WQ to an IO object
+ *
+ * The next work queue index is used to assign a WQ to an IO.
+ *
+ * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is
+ * selected.
+ *
+ * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that
+ * the IO request came in on is selected.
+ *
+ * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the
+ * CPU the request is made on is selected.
+ *
+ * @param hw pointer to HW object
+ * @param io pointer to IO object
+ *
+ * @return Return pointer to next WQ
+ */
+hw_wq_t *
+ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ hw_eq_t *eq;
+ hw_wq_t *wq = NULL;
+
+ switch(io->wq_steering) {
+ case OCS_HW_WQ_STEERING_CLASS:
+ if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) {
+ wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]);
+ }
+ break;
+ case OCS_HW_WQ_STEERING_REQUEST:
+ eq = io->eq;
+ if (likely(eq != NULL)) {
+ wq = ocs_varray_iter_next(eq->wq_array);
+ }
+ break;
+ case OCS_HW_WQ_STEERING_CPU: {
+ uint32_t cpuidx = ocs_thread_getcpu();
+
+ if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) {
+ wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]);
+ }
+ break;
+ }
+ }
+
+ if (unlikely(wq == NULL)) {
+ wq = hw->hw_wq[0];
+ }
+
+ return wq;
+}
+
+/**
+ * @brief Return count of EQs for a queue topology object
+ *
+ * The EQ count for in the HWs queue topology (hw->qtop) object is returned
+ *
+ * @param hw pointer to HW object
+ *
+ * @return count of EQs
+ */
+uint32_t
+ocs_hw_qtop_eq_count(ocs_hw_t *hw)
+{
+ return hw->qtop->entry_counts[QTOP_EQ];
+}
+
+#define TOKEN_LEN 32
+
+/**
+ * @brief return string given a QTOP entry
+ *
+ * @param entry QTOP entry
+ *
+ * @return returns string or "unknown"
+ */
+#if HW_QTOP_DEBUG
+static char *
+qtopentry2s(ocs_hw_qtop_entry_e entry) {
+ switch(entry) {
+ #define P(x) case x: return #x;
+ P(QTOP_EQ)
+ P(QTOP_CQ)
+ P(QTOP_WQ)
+ P(QTOP_RQ)
+ P(QTOP_MQ)
+ P(QTOP_THREAD_START)
+ P(QTOP_THREAD_END)
+ P(QTOP_LAST)
+ #undef P
+ }
+ return "unknown";
+}
+#endif
+
+/**
+ * @brief Declare token types
+ */
+typedef enum {
+ TOK_LPAREN = 1,
+ TOK_RPAREN,
+ TOK_COLON,
+ TOK_EQUALS,
+ TOK_QUEUE,
+ TOK_ATTR_NAME,
+ TOK_NUMBER,
+ TOK_NUMBER_VALUE,
+ TOK_NUMBER_LIST,
+} tok_type_e;
+
+/**
+ * @brief Declare token sub-types
+ */
+typedef enum {
+ TOK_SUB_EQ = 100,
+ TOK_SUB_CQ,
+ TOK_SUB_RQ,
+ TOK_SUB_MQ,
+ TOK_SUB_WQ,
+ TOK_SUB_LEN,
+ TOK_SUB_CLASS,
+ TOK_SUB_ULP,
+ TOK_SUB_FILTER,
+} tok_subtype_e;
+
+/**
+ * @brief convert queue subtype to QTOP entry
+ *
+ * @param q queue subtype
+ *
+ * @return QTOP entry or 0
+ */
+static ocs_hw_qtop_entry_e
+subtype2qtop(tok_subtype_e q)
+{
+ switch(q) {
+ case TOK_SUB_EQ: return QTOP_EQ;
+ case TOK_SUB_CQ: return QTOP_CQ;
+ case TOK_SUB_RQ: return QTOP_RQ;
+ case TOK_SUB_MQ: return QTOP_MQ;
+ case TOK_SUB_WQ: return QTOP_WQ;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * @brief Declare token object
+ */
+typedef struct {
+ tok_type_e type;
+ tok_subtype_e subtype;
+ char string[TOKEN_LEN];
+} tok_t;
+
+/**
+ * @brief Declare token array object
+ */
+typedef struct {
+ tok_t *tokens; /* Pointer to array of tokens */
+ uint32_t alloc_count; /* Number of tokens in the array */
+ uint32_t inuse_count; /* Number of tokens posted to array */
+ uint32_t iter_idx; /* Iterator index */
+} tokarray_t;
+
+/**
+ * @brief Declare token match structure
+ */
+typedef struct {
+ char *s;
+ tok_type_e type;
+ tok_subtype_e subtype;
+} tokmatch_t;
+
+/**
+ * @brief test if character is ID start character
+ *
+ * @param c character to test
+ *
+ * @return TRUE if character is an ID start character
+ */
+static int32_t
+idstart(int c)
+{
+ return isalpha(c) || (c == '_') || (c == '$');
+}
+
+/**
+ * @brief test if character is an ID character
+ *
+ * @param c character to test
+ *
+ * @return TRUE if character is an ID character
+ */
+static int32_t
+idchar(int c)
+{
+ return idstart(c) || ocs_isdigit(c);
+}
+
+/**
+ * @brief Declare single character matches
+ */
+static tokmatch_t cmatches[] = {
+ {"(", TOK_LPAREN},
+ {")", TOK_RPAREN},
+ {":", TOK_COLON},
+ {"=", TOK_EQUALS},
+};
+
+/**
+ * @brief Declare identifier match strings
+ */
+static tokmatch_t smatches[] = {
+ {"eq", TOK_QUEUE, TOK_SUB_EQ},
+ {"cq", TOK_QUEUE, TOK_SUB_CQ},
+ {"rq", TOK_QUEUE, TOK_SUB_RQ},
+ {"mq", TOK_QUEUE, TOK_SUB_MQ},
+ {"wq", TOK_QUEUE, TOK_SUB_WQ},
+ {"len", TOK_ATTR_NAME, TOK_SUB_LEN},
+ {"class", TOK_ATTR_NAME, TOK_SUB_CLASS},
+ {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP},
+ {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER},
+};
+
+/**
+ * @brief Scan string and return next token
+ *
+ * The string is scanned and the next token is returned
+ *
+ * @param s input string to scan
+ * @param tok pointer to place scanned token
+ *
+ * @return pointer to input string following scanned token, or NULL
+ */
+static const char *
+tokenize(const char *s, tok_t *tok)
+{
+ uint32_t i;
+
+ memset(tok, 0, sizeof(*tok));
+
+ /* Skip over whitespace */
+ while (*s && ocs_isspace(*s)) {
+ s++;
+ }
+
+ /* Return if nothing left in this string */
+ if (*s == 0) {
+ return NULL;
+ }
+
+ /* Look for single character matches */
+ for (i = 0; i < ARRAY_SIZE(cmatches); i++) {
+ if (cmatches[i].s[0] == *s) {
+ tok->type = cmatches[i].type;
+ tok->subtype = cmatches[i].subtype;
+ tok->string[0] = *s++;
+ return s;
+ }
+ }
+
+ /* Scan for a hex number or decimal */
+ if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) {
+ char *p = tok->string;
+
+ tok->type = TOK_NUMBER;
+
+ *p++ = *s++;
+ *p++ = *s++;
+ while ((*s == '.') || ocs_isxdigit(*s)) {
+ if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
+ *p++ = *s;
+ }
+ if (*s == ',') {
+ tok->type = TOK_NUMBER_LIST;
+ }
+ s++;
+ }
+ *p = 0;
+ return s;
+ } else if (ocs_isdigit(*s)) {
+ char *p = tok->string;
+
+ tok->type = TOK_NUMBER;
+ while ((*s == ',') || ocs_isdigit(*s)) {
+ if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
+ *p++ = *s;
+ }
+ if (*s == ',') {
+ tok->type = TOK_NUMBER_LIST;
+ }
+ s++;
+ }
+ *p = 0;
+ return s;
+ }
+
+ /* Scan for an ID */
+ if (idstart(*s)) {
+ char *p = tok->string;
+
+ for (*p++ = *s++; idchar(*s); s++) {
+ if ((p - tok->string) < TOKEN_LEN) {
+ *p++ = *s;
+ }
+ }
+
+ /* See if this is a $ number value */
+ if (tok->string[0] == '$') {
+ tok->type = TOK_NUMBER_VALUE;
+ } else {
+ /* Look for a string match */
+ for (i = 0; i < ARRAY_SIZE(smatches); i++) {
+ if (strcmp(smatches[i].s, tok->string) == 0) {
+ tok->type = smatches[i].type;
+ tok->subtype = smatches[i].subtype;
+ return s;
+ }
+ }
+ }
+ }
+ return s;
+}
+
+/**
+ * @brief convert token type to string
+ *
+ * @param type token type
+ *
+ * @return string, or "unknown"
+ */
+static const char *
+token_type2s(tok_type_e type)
+{
+ switch(type) {
+ #define P(x) case x: return #x;
+ P(TOK_LPAREN)
+ P(TOK_RPAREN)
+ P(TOK_COLON)
+ P(TOK_EQUALS)
+ P(TOK_QUEUE)
+ P(TOK_ATTR_NAME)
+ P(TOK_NUMBER)
+ P(TOK_NUMBER_VALUE)
+ P(TOK_NUMBER_LIST)
+ #undef P
+ }
+ return "unknown";
+}
+
+/**
+ * @brief convert token sub-type to string
+ *
+ * @param subtype token sub-type
+ *
+ * @return string, or "unknown"
+ */
+static const char *
+token_subtype2s(tok_subtype_e subtype)
+{
+ switch(subtype) {
+ #define P(x) case x: return #x;
+ P(TOK_SUB_EQ)
+ P(TOK_SUB_CQ)
+ P(TOK_SUB_RQ)
+ P(TOK_SUB_MQ)
+ P(TOK_SUB_WQ)
+ P(TOK_SUB_LEN)
+ P(TOK_SUB_CLASS)
+ P(TOK_SUB_ULP)
+ P(TOK_SUB_FILTER)
+ #undef P
+ }
+ return "";
+}
+
+/**
+ * @brief Generate syntax error message
+ *
+ * A syntax error message is found, the input tokens are dumped up to and including
+ * the token that failed as indicated by the current iterator index.
+ *
+ * @param hw pointer to HW object
+ * @param tokarray pointer to token array object
+ *
+ * @return none
+ */
+static void
+tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray)
+{
+ uint32_t i;
+ tok_t *tok;
+
+ ocs_log_test(hw->os, "Syntax error:\n");
+
+ for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) {
+ ocs_log_test(hw->os, "%s [%2d] %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : " ", i,
+ token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string);
+ }
+}
+
+/**
+ * @brief parse a number
+ *
+ * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value
+ *
+ * @param hw pointer to HW object
+ * @param qtop pointer to QTOP object
+ * @param tok pointer to token to parse
+ *
+ * @return numeric value
+ */
+static uint32_t
+tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok)
+{
+ uint32_t rval = 0;
+ uint32_t num_cpus = ocs_get_num_cpus();
+
+ switch(tok->type) {
+ case TOK_NUMBER_VALUE:
+ if (ocs_strcmp(tok->string, "$ncpu") == 0) {
+ rval = num_cpus;
+ } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) {
+ rval = num_cpus - 1;
+ } else if (ocs_strcmp(tok->string, "$nwq") == 0) {
+ if (hw != NULL) {
+ rval = hw->config.n_wq;
+ }
+ } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) {
+ rval = MIN(num_cpus, OCS_HW_MAX_MRQS);
+ } else if (ocs_strcmp(tok->string, "$nulp") == 0) {
+ rval = hw->ulp_max - hw->ulp_start + 1;
+ } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) {
+ rval = qtop->rptcount[qtop->rptcount_idx-1];
+ } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) {
+ rval = qtop->rptcount[qtop->rptcount_idx-2];
+ } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) {
+ rval = qtop->rptcount[qtop->rptcount_idx-3];
+ } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) {
+ rval = qtop->rptcount[qtop->rptcount_idx-4];
+ } else {
+ rval = ocs_strtoul(tok->string, 0, 0);
+ }
+ break;
+ case TOK_NUMBER:
+ rval = ocs_strtoul(tok->string, 0, 0);
+ break;
+ default:
+ break;
+ }
+ return rval;
+}
+
+
+/**
+ * @brief parse an array of tokens
+ *
+ * The tokens are semantically parsed, to generate QTOP entries.
+ *
+ * @param hw pointer to HW object
+ * @param tokarray array array of tokens
+ * @param qtop ouptut QTOP object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+static int32_t
+parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop)
+{
+ ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count;
+ tok_t *tok;
+
+ for (; (tokarray->iter_idx < tokarray->inuse_count) &&
+ ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) {
+ if (qtop->inuse_count >= qtop->alloc_count) {
+ return -1;
+ }
+
+ qt = qtop->entries + qtop->inuse_count;
+
+ switch (tok[0].type)
+ {
+ case TOK_QUEUE:
+ qt->entry = subtype2qtop(tok[0].subtype);
+ qt->set_default = FALSE;
+ qt->len = 0;
+ qt->class = 0;
+ qtop->inuse_count++;
+
+ tokarray->iter_idx++; /* Advance current token index */
+
+ /* Parse for queue attributes, possibly multiple instances */
+ while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) {
+ tok = &tokarray->tokens[tokarray->iter_idx];
+ if( (tok[0].type == TOK_COLON) &&
+ (tok[1].type == TOK_ATTR_NAME) &&
+ (tok[2].type == TOK_EQUALS) &&
+ ((tok[3].type == TOK_NUMBER) ||
+ (tok[3].type == TOK_NUMBER_VALUE) ||
+ (tok[3].type == TOK_NUMBER_LIST))) {
+
+ switch (tok[1].subtype) {
+ case TOK_SUB_LEN:
+ qt->len = tok_getnumber(hw, qtop, &tok[3]);
+ break;
+
+ case TOK_SUB_CLASS:
+ qt->class = tok_getnumber(hw, qtop, &tok[3]);
+ break;
+
+ case TOK_SUB_ULP:
+ qt->ulp = tok_getnumber(hw, qtop, &tok[3]);
+ break;
+
+ case TOK_SUB_FILTER:
+ if (tok[3].type == TOK_NUMBER_LIST) {
+ uint32_t mask = 0;
+ char *p = tok[3].string;
+
+ while ((p != NULL) && *p) {
+ uint32_t v;
+
+ v = ocs_strtoul(p, 0, 0);
+ if (v < 32) {
+ mask |= (1U << v);
+ }
+
+ p = ocs_strchr(p, ',');
+ if (p != NULL) {
+ p++;
+ }
+ }
+ qt->filter_mask = mask;
+ } else {
+ qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3]));
+ }
+ break;
+ default:
+ break;
+ }
+ /* Advance current token index */
+ tokarray->iter_idx += 4;
+ } else {
+ break;
+ }
+ }
+ qtop->entry_counts[qt->entry]++;
+ break;
+
+ case TOK_ATTR_NAME:
+ if ( ((tokarray->iter_idx + 5) <= tokarray->inuse_count) &&
+ (tok[1].type == TOK_COLON) &&
+ (tok[2].type == TOK_QUEUE) &&
+ (tok[3].type == TOK_EQUALS) &&
+ ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) {
+ qt->entry = subtype2qtop(tok[2].subtype);
+ qt->set_default = TRUE;
+ switch(tok[0].subtype) {
+ case TOK_SUB_LEN:
+ qt->len = tok_getnumber(hw, qtop, &tok[4]);
+ break;
+ case TOK_SUB_CLASS:
+ qt->class = tok_getnumber(hw, qtop, &tok[4]);
+ break;
+ case TOK_SUB_ULP:
+ qt->ulp = tok_getnumber(hw, qtop, &tok[4]);
+ break;
+ default:
+ break;
+ }
+ qtop->inuse_count++;
+ tokarray->iter_idx += 5;
+ } else {
+ tok_syntax(hw, tokarray);
+ return -1;
+ }
+ break;
+
+ case TOK_NUMBER:
+ case TOK_NUMBER_VALUE: {
+ uint32_t rpt_count = 1;
+ uint32_t i;
+
+ rpt_count = tok_getnumber(hw, qtop, tok);
+
+ if (tok[1].type == TOK_LPAREN) {
+ uint32_t iter_idx_save;
+
+ tokarray->iter_idx += 2;
+
+ /* save token array iteration index */
+ iter_idx_save = tokarray->iter_idx;
+
+ for (i = 0; i < rpt_count; i++) {
+ uint32_t rptcount_idx = qtop->rptcount_idx;
+
+ if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) {
+ qtop->rptcount[qtop->rptcount_idx++] = i;
+ }
+
+ /* restore token array iteration index */
+ tokarray->iter_idx = iter_idx_save;
+
+ /* parse, append to qtop */
+ parse_topology(hw, tokarray, qtop);
+
+ qtop->rptcount_idx = rptcount_idx;
+ }
+ }
+ break;
+ }
+
+ case TOK_RPAREN:
+ tokarray->iter_idx++;
+ return 0;
+
+ default:
+ tok_syntax(hw, tokarray);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief Parse queue topology string
+ *
+ * The queue topology object is allocated, and filled with the results of parsing the
+ * passed in queue topology string
+ *
+ * @param hw pointer to HW object
+ * @param qtop_string input queue topology string
+ *
+ * @return pointer to allocated QTOP object, or NULL if there was an error
+ */
+ocs_hw_qtop_t *
+ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string)
+{
+ ocs_hw_qtop_t *qtop;
+ tokarray_t tokarray;
+ const char *s;
+#if HW_QTOP_DEBUG
+ uint32_t i;
+ ocs_hw_qtop_entry_t *qt;
+#endif
+
+ ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string);
+
+ /* Allocate a token array */
+ tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (tokarray.tokens == NULL) {
+ return NULL;
+ }
+ tokarray.alloc_count = MAX_TOKENS;
+ tokarray.inuse_count = 0;
+ tokarray.iter_idx = 0;
+
+ /* Parse the tokens */
+ for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) &&
+ ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) {
+ tokarray.inuse_count++;;
+ }
+
+ /* Allocate a queue topology structure */
+ qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (qtop == NULL) {
+ ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
+ ocs_log_err(hw->os, "malloc qtop failed\n");
+ return NULL;
+ }
+ qtop->os = hw->os;
+
+ /* Allocate queue topology entries */
+ qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (qtop->entries == NULL) {
+ ocs_log_err(hw->os, "malloc qtop entries failed\n");
+ ocs_free(hw->os, qtop, sizeof(*qtop));
+ ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
+ return NULL;
+ }
+ qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES;
+ qtop->inuse_count = 0;
+
+ /* Parse the tokens */
+ parse_topology(hw, &tokarray, qtop);
+#if HW_QTOP_DEBUG
+ for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
+ ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len,
+ qt->class, qt->ulp);
+ }
+#endif
+
+ /* Free the tokens array */
+ ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
+
+ return qtop;
+}
+
+/**
+ * @brief free queue topology object
+ *
+ * @param qtop pointer to QTOP object
+ *
+ * @return none
+ */
+void
+ocs_hw_qtop_free(ocs_hw_qtop_t *qtop)
+{
+ if (qtop != NULL) {
+ if (qtop->entries != NULL) {
+ ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries));
+ }
+ ocs_free(qtop->os, qtop, sizeof(*qtop));
+ }
+}
+
+/* Uncomment this to turn on RQ debug */
+// #define ENABLE_DEBUG_RQBUF
+
+static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id);
+static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex);
+static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
+static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
+
+/**
+ * @brief Process receive queue completions for RQ Pair mode.
+ *
+ * @par Description
+ * RQ completions are processed. In RQ pair mode, a single header and single payload
+ * buffer are received, and passed to the function that has registered for unsolicited
+ * callbacks.
+ *
+ * @param hw Hardware context.
+ * @param cq Pointer to HW completion queue.
+ * @param cqe Completion queue entry.
+ *
+ * @return Returns 0 for success, or a negative error code value for failure.
+ */
+
+int32_t
+ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
+{
+ uint16_t rq_id;
+ uint32_t index;
+ int32_t rqindex;
+ int32_t rq_status;
+ uint32_t h_len;
+ uint32_t p_len;
+ ocs_hw_sequence_t *seq;
+
+ rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
+ if (0 != rq_status) {
+ switch (rq_status) {
+ case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
+ case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
+ /* just get RQ buffer then return to chip */
+ rqindex = ocs_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
+ rq_status, rq_id);
+ break;
+ }
+
+ /* get RQ buffer */
+ seq = ocs_hw_rqpair_get(hw, rqindex, index);
+
+ /* return to chip */
+ if (ocs_hw_rqpair_sequence_free(hw, seq)) {
+ ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n",
+ rq_status);
+ break;
+ }
+ break;
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
+ /* since RQ buffers were not consumed, cannot return them to chip */
+ /* fall through */
+ ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
+ default:
+ break;
+ }
+ return -1;
+ }
+
+ rqindex = ocs_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
+ return -1;
+ }
+
+ OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
+ rq->payload_use_count++;})
+
+ seq = ocs_hw_rqpair_get(hw, rqindex, index);
+ ocs_hw_assert(seq != NULL);
+
+ seq->hw = hw;
+ seq->auto_xrdy = 0;
+ seq->out_of_xris = 0;
+ seq->xri = 0;
+ seq->hio = NULL;
+
+ sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
+ seq->header->dma.len = h_len;
+ seq->payload->dma.len = p_len;
+ seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
+ seq->hw_priv = cq->eq;
+
+ /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
+ if (hw->config.bounce) {
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint32_t s_id = fc_be24toh(hdr->s_id);
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+ uint32_t ox_id = ocs_be16toh(hdr->ox_id);
+ if (hw->callback.bounce != NULL) {
+ (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
+ }
+ } else {
+ hw->callback.unsolicited(hw->args.unsolicited, seq);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy
+ *
+ * @par Description
+ * RQ completions are processed. In RQ pair mode, a single header and single payload
+ * buffer are received, and passed to the function that has registered for unsolicited
+ * callbacks.
+ *
+ * @param hw Hardware context.
+ * @param cq Pointer to HW completion queue.
+ * @param cqe Completion queue entry.
+ *
+ * @return Returns 0 for success, or a negative error code value for failure.
+ */
+
+int32_t
+ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
+{
+ /* Seems silly to call a SLI function to decode - use the structure directly for performance */
+ sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe;
+ uint16_t rq_id;
+ uint32_t index;
+ int32_t rqindex;
+ int32_t rq_status;
+ uint32_t h_len;
+ uint32_t p_len;
+ ocs_hw_sequence_t *seq;
+ uint8_t axr_lock_taken = 0;
+#if defined(OCS_DISC_SPIN_DELAY)
+ uint32_t delay = 0;
+ char prop_buf[32];
+#endif
+
+ rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
+ if (0 != rq_status) {
+ switch (rq_status) {
+ case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
+ case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
+ /* just get RQ buffer then return to chip */
+ rqindex = ocs_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
+ rq_status, rq_id);
+ break;
+ }
+
+ /* get RQ buffer */
+ seq = ocs_hw_rqpair_get(hw, rqindex, index);
+
+ /* return to chip */
+ if (ocs_hw_rqpair_sequence_free(hw, seq)) {
+ ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n",
+ rq_status);
+ break;
+ }
+ break;
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
+ /* since RQ buffers were not consumed, cannot return them to chip */
+ ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
+ /* fall through */
+ default:
+ break;
+ }
+ return -1;
+ }
+
+ rqindex = ocs_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
+ return -1;
+ }
+
+ OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
+ rq->payload_use_count++;})
+
+ seq = ocs_hw_rqpair_get(hw, rqindex, index);
+ ocs_hw_assert(seq != NULL);
+
+ seq->hw = hw;
+ seq->auto_xrdy = opt_wr->agxr;
+ seq->out_of_xris = opt_wr->oox;
+ seq->xri = opt_wr->xri;
+ seq->hio = NULL;
+
+ sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
+ seq->header->dma.len = h_len;
+ seq->payload->dma.len = p_len;
+ seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
+ seq->hw_priv = cq->eq;
+
+ if (seq->auto_xrdy) {
+ fc_header_t *fc_hdr = seq->header->dma.virt;
+
+ seq->hio = ocs_hw_io_lookup(hw, seq->xri);
+ ocs_lock(&seq->hio->axr_lock);
+ axr_lock_taken = 1;
+
+ /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */
+ seq->hio->axr_buf->fcfi = seq->fcfi;
+ seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id;
+ seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id;
+ seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id;
+ seq->hio->axr_buf->cmd_cqe = 1;
+
+ /*
+ * Since auto xfer rdy is used for this IO, then clear the sequence
+ * initiative bit in the header so that the upper layers wait for the
+ * data. This should flow exactly like the first burst case.
+ */
+ fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE);
+
+ /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
+ if (seq->hio->type == OCS_HW_IO_TARGET_RSP) {
+ seq->hio->axr_buf->call_axr_cmd = 1;
+ seq->hio->axr_buf->cmd_seq = seq;
+ goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd;
+ }
+ }
+
+ /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
+ if (hw->config.bounce) {
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint32_t s_id = fc_be24toh(hdr->s_id);
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+ uint32_t ox_id = ocs_be16toh(hdr->ox_id);
+ if (hw->callback.bounce != NULL) {
+ (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
+ }
+ } else {
+ hw->callback.unsolicited(hw->args.unsolicited, seq);
+ }
+
+ if (seq->auto_xrdy) {
+ /* If data cqe came before cmd cqe in out of order in case of AXR */
+ if(seq->hio->axr_buf->data_cqe == 1) {
+
+#if defined(OCS_DISC_SPIN_DELAY)
+ if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
+ delay = ocs_strtoul(prop_buf, 0, 0);
+ ocs_udelay(delay);
+ }
+#endif
+ /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
+ if (hw->config.bounce) {
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint32_t s_id = fc_be24toh(hdr->s_id);
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+ uint32_t ox_id = ocs_be16toh(hdr->ox_id);
+ if (hw->callback.bounce != NULL) {
+ (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id);
+ }
+ } else {
+ hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq);
+ }
+ }
+ }
+
+exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd:
+ if(axr_lock_taken) {
+ ocs_unlock(&seq->hio->axr_lock);
+ }
+ return 0;
+}
+
+/**
+ * @brief Process CQ completions for Auto xfer rdy data phases.
+ *
+ * @par Description
+ * The data is DMA'd into the data buffer posted to the SGL prior to the XRI
+ * being assigned to an IO. When the completion is received, All of the data
+ * is in the single buffer.
+ *
+ * @param hw Hardware context.
+ * @param cq Pointer to HW completion queue.
+ * @param cqe Completion queue entry.
+ *
+ * @return Returns 0 for success, or a negative error code value for failure.
+ */
+
+int32_t
+ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
+{
+ /* Seems silly to call a SLI function to decode - use the structure directly for performance */
+ sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe;
+ ocs_hw_sequence_t *seq;
+ ocs_hw_io_t *io;
+ ocs_hw_auto_xfer_rdy_buffer_t *buf;
+#if defined(OCS_DISC_SPIN_DELAY)
+ uint32_t delay = 0;
+ char prop_buf[32];
+#endif
+ /* Look up the IO */
+ io = ocs_hw_io_lookup(hw, opt_wr->xri);
+ ocs_lock(&io->axr_lock);
+ buf = io->axr_buf;
+ buf->data_cqe = 1;
+ seq = &buf->seq;
+ seq->hw = hw;
+ seq->auto_xrdy = 1;
+ seq->out_of_xris = 0;
+ seq->xri = opt_wr->xri;
+ seq->hio = io;
+ seq->header = &buf->header;
+ seq->payload = &buf->payload;
+
+ seq->header->dma.len = sizeof(fc_header_t);
+ seq->payload->dma.len = opt_wr->total_data_placed;
+ seq->fcfi = buf->fcfi;
+ seq->hw_priv = cq->eq;
+
+
+ if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) {
+ seq->status = OCS_HW_UNSOL_SUCCESS;
+ } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) {
+ seq->status = OCS_HW_UNSOL_ABTS_RCVD;
+ } else {
+ seq->status = OCS_HW_UNSOL_ERROR;
+ }
+
+ /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
+ if(io->type == OCS_HW_IO_TARGET_RSP) {
+ io->axr_buf->call_axr_data = 1;
+ goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
+ }
+
+ if(!buf->cmd_cqe) {
+ /* if data cqe came before cmd cqe, return here, cmd cqe will handle */
+ goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
+ }
+#if defined(OCS_DISC_SPIN_DELAY)
+ if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
+ delay = ocs_strtoul(prop_buf, 0, 0);
+ ocs_udelay(delay);
+ }
+#endif
+
+ /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
+ if (hw->config.bounce) {
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint32_t s_id = fc_be24toh(hdr->s_id);
+ uint32_t d_id = fc_be24toh(hdr->d_id);
+ uint32_t ox_id = ocs_be16toh(hdr->ox_id);
+ if (hw->callback.bounce != NULL) {
+ (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
+ }
+ } else {
+ hw->callback.unsolicited(hw->args.unsolicited, seq);
+ }
+
+exit_ocs_hw_rqpair_process_auto_xfr_rdy_data:
+ ocs_unlock(&io->axr_lock);
+ return 0;
+}
+
+/**
+ * @brief Return pointer to RQ buffer entry.
+ *
+ * @par Description
+ * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex.
+ *
+ * @param hw Hardware context.
+ * @param rqindex Index of the RQ that is being processed.
+ * @param bufindex Index into the RQ that is being processed.
+ *
+ * @return Pointer to the sequence structure, or NULL otherwise.
+ */
+static ocs_hw_sequence_t *
+ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex)
+{
+ sli4_queue_t *rq_hdr = &hw->rq[rqindex];
+ sli4_queue_t *rq_payload = &hw->rq[rqindex+1];
+ ocs_hw_sequence_t *seq = NULL;
+ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+
+#if defined(ENABLE_DEBUG_RQBUF)
+ uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff);
+#endif
+
+ if (bufindex >= rq_hdr->length) {
+ ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n",
+ rqindex, bufindex, rq_hdr->length, rq_hdr->id);
+ return NULL;
+ }
+
+ sli_queue_lock(rq_hdr);
+ sli_queue_lock(rq_payload);
+
+#if defined(ENABLE_DEBUG_RQBUF)
+ /* Put a debug value into the rq, to track which entries are still valid */
+ _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value);
+ _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value);
+#endif
+
+ seq = rq->rq_tracker[bufindex];
+ rq->rq_tracker[bufindex] = NULL;
+
+ if (seq == NULL ) {
+ ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n",
+ rqindex, bufindex, rq_hdr->index);
+ }
+
+ sli_queue_unlock(rq_payload);
+ sli_queue_unlock(rq_hdr);
+ return seq;
+}
+
+/**
+ * @brief Posts an RQ buffer to a queue and update the verification structures
+ *
+ * @param hw hardware context
+ * @param seq Pointer to sequence object.
+ *
+ * @return Returns 0 on success, or a non-zero value otherwise.
+ */
+static int32_t
+ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
+{
+ sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex];
+ sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex];
+ uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
+ hw_rq_t *rq = hw->hw_rq[hw_rq_index];
+ uint32_t phys_hdr[2];
+ uint32_t phys_payload[2];
+ int32_t qindex_hdr;
+ int32_t qindex_payload;
+
+ /* Update the RQ verification lookup tables */
+ phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys);
+ phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys);
+ phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys);
+ phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys);
+
+ sli_queue_lock(rq_hdr);
+ sli_queue_lock(rq_payload);
+
+ /*
+ * Note: The header must be posted last for buffer pair mode because
+ * posting on the header queue posts the payload queue as well.
+ * We do not ring the payload queue independently in RQ pair mode.
+ */
+ qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload);
+ qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr);
+ if (qindex_hdr < 0 ||
+ qindex_payload < 0) {
+ ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
+ sli_queue_unlock(rq_payload);
+ sli_queue_unlock(rq_hdr);
+ return OCS_HW_RTN_ERROR;
+ }
+
+ /* ensure the indexes are the same */
+ ocs_hw_assert(qindex_hdr == qindex_payload);
+
+ /* Update the lookup table */
+ if (rq->rq_tracker[qindex_hdr] == NULL) {
+ rq->rq_tracker[qindex_hdr] = seq;
+ } else {
+ ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
+ hw_rq_index, qindex_hdr);
+ }
+
+ sli_queue_unlock(rq_payload);
+ sli_queue_unlock(rq_hdr);
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @brief Return RQ buffers (while in RQ pair mode).
+ *
+ * @par Description
+ * The header and payload buffers are returned to the Receive Queue.
+ *
+ * @param hw Hardware context.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure.
+ */
+
+ocs_hw_rtn_e
+ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
+{
+ ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
+
+ /* Check for auto xfer rdy dummy buffers and call the proper release function. */
+ if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) {
+ return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq);
+ }
+
+ /*
+ * Post the data buffer first. Because in RQ pair mode, ringing the
+ * doorbell of the header ring will post the data buffer as well.
+ */
+ if (ocs_hw_rqpair_put(hw, seq)) {
+ ocs_log_err(hw->os, "error writing buffers\n");
+ return OCS_HW_RTN_ERROR;
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Find the RQ index of RQ_ID.
+ *
+ * @param hw Hardware context.
+ * @param rq_id RQ ID to find.
+ *
+ * @return Returns the RQ index, or -1 if not found
+ */
+static inline int32_t
+ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id)
+{
+ return ocs_hw_queue_hash_find(hw->rq_hash, rq_id);
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Allocate auto xfer rdy buffers.
+ *
+ * @par Description
+ * Allocates the auto xfer rdy buffers and places them on the free list.
+ *
+ * @param hw Hardware context allocated by the caller.
+ * @param num_buffers Number of buffers to allocate.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers)
+{
+ ocs_hw_auto_xfer_rdy_buffer_t *buf;
+ uint32_t i;
+
+ hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE);
+ if (hw->auto_xfer_rdy_buf_pool == NULL) {
+ ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ /* allocate the wrapper object */
+ buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
+ ocs_hw_assert(buf != NULL);
+
+ /* allocate the auto xfer ready buffer */
+ if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) {
+ ocs_log_err(hw->os, "DMA allocation failed\n");
+ ocs_free(hw->os, buf, sizeof(*buf));
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+
+ /* build a fake data header in big endian */
+ buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
+ buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
+ buf->hdr.type = FC_TYPE_FCP;
+ buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
+ FC_FCTL_FIRST_SEQUENCE |
+ FC_FCTL_LAST_SEQUENCE |
+ FC_FCTL_END_SEQUENCE |
+ FC_FCTL_SEQUENCE_INITIATIVE);
+
+ /* build the fake header DMA object */
+ buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
+ buf->header.dma.virt = &buf->hdr;
+ buf->header.dma.alloc = buf;
+ buf->header.dma.size = sizeof(buf->hdr);
+ buf->header.dma.len = sizeof(buf->hdr);
+
+ buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
+ }
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX.
+ *
+ * @par Description
+ * When new buffers are freed, check existing XRIs waiting for buffers.
+ *
+ * @param hw Hardware context allocated by the caller.
+ */
+static void
+ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw)
+{
+ ocs_hw_io_t *io;
+ int32_t rc;
+
+ ocs_lock(&hw->io_lock);
+
+ while (!ocs_list_empty(&hw->io_port_dnrx)) {
+ io = ocs_list_remove_head(&hw->io_port_dnrx);
+ rc = ocs_hw_reque_xri(hw, io);
+ if(rc) {
+ break;
+ }
+ }
+
+ ocs_unlock(&hw->io_lock);
+}
+
+/**
+ * @brief Called when the POST_SGL_PAGE command completes.
+ *
+ * @par Description
+ * Free the mailbox command buffer.
+ *
+ * @param hw Hardware context.
+ * @param status Status field from the mbox completion.
+ * @param mqe Mailbox response structure.
+ * @param arg Pointer to a callback function that signals the caller that the command is done.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ if (status != 0) {
+ ocs_log_debug(hw->os, "Status 0x%x\n", status);
+ }
+
+ ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
+ return 0;
+}
+
+/**
+ * @brief Prepares an XRI to move to the chip.
+ *
+ * @par Description
+ * Puts the data SGL into the SGL list for the IO object and possibly registers
+ * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are
+ * mailbox commands, we don't need to wait for completion before preceding.
+ *
+ * @param hw Hardware context allocated by the caller.
+ * @param io Pointer to the IO object.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ /* We only need to preregister the SGL if it has not yet been done. */
+ if (!sli_get_sgl_preregister(&hw->sli)) {
+ uint8_t *post_sgl;
+ ocs_dma_t *psgls = &io->def_sgl;
+ ocs_dma_t **sgls = &psgls;
+
+ /* non-local buffer required for mailbox queue */
+ post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
+ if (post_sgl == NULL) {
+ ocs_log_err(hw->os, "no buffer for command\n");
+ return OCS_HW_RTN_NO_MEMORY;
+ }
+ if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE,
+ io->indicator, 1, sgls, NULL, NULL)) {
+ if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT,
+ ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) {
+ ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE);
+ ocs_log_err(hw->os, "SGL post failed\n");
+ return OCS_HW_RTN_ERROR;
+ }
+ }
+ }
+
+ ocs_lock(&hw->io_lock);
+ if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */
+ ocs_unlock(&hw->io_lock);
+ return OCS_HW_RTN_ERROR;
+ }
+ ocs_unlock(&hw->io_lock);
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @brief Prepares an XRI to move back to the host.
+ *
+ * @par Description
+ * Releases any attached buffer back to the pool.
+ *
+ * @param hw Hardware context allocated by the caller.
+ * @param io Pointer to the IO object.
+ */
+void
+ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io)
+{
+ if (io->axr_buf != NULL) {
+ ocs_lock(&hw->io_lock);
+ /* check list and remove if there */
+ if (ocs_list_on_list(&io->dnrx_link)) {
+ ocs_list_remove(&hw->io_port_dnrx, io);
+ io->auto_xfer_rdy_dnrx = 0;
+
+ /* release the count for waiting for a buffer */
+ ocs_hw_io_free(hw, io);
+ }
+
+ ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
+ io->axr_buf = NULL;
+ ocs_unlock(&hw->io_lock);
+
+ ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
+ }
+ return;
+}
+
+
+/**
+ * @brief Posts an auto xfer rdy buffer to an IO.
+ *
+ * @par Description
+ * Puts the data SGL into the SGL list for the IO object
+ * @n @name
+ * @b Note: io_lock must be held.
+ *
+ * @param hw Hardware context allocated by the caller.
+ * @param io Pointer to the IO object.
+ *
+ * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs.
+ */
+uint8_t
+ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf)
+{
+ ocs_hw_auto_xfer_rdy_buffer_t *buf;
+ sli4_sge_t *data;
+
+ if(!reuse_buf) {
+ buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool);
+ io->axr_buf = buf;
+ }
+
+ data = io->def_sgl.virt;
+ data[0].sge_type = SLI4_SGE_TYPE_SKIP;
+ data[0].last = 0;
+
+ /*
+ * Note: if we are doing DIF assists, then the SGE[1] must contain the
+ * DI_SEED SGE. The host is responsible for programming:
+ * SGE Type (Word 2, bits 30:27)
+ * Replacement App Tag (Word 2 bits 15:0)
+ * App Tag (Word 3 bits 15:0)
+ * New Ref Tag (Word 3 bit 23)
+ * Metadata Enable (Word 3 bit 20)
+ * Auto-Increment RefTag (Word 3 bit 19)
+ * Block Size (Word 3 bits 18:16)
+ * The following fields are managed by the SLI Port:
+ * Ref Tag Compare (Word 0)
+ * Replacement Ref Tag (Word 1) - In not the LBA
+ * NA (Word 2 bit 25)
+ * Opcode RX (Word 3 bits 27:24)
+ * Checksum Enable (Word 3 bit 22)
+ * RefTag Enable (Word 3 bit 21)
+ *
+ * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared.
+ */
+ if (hw->config.auto_xfer_rdy_p_type) {
+ sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1];
+
+ diseed->sge_type = SLI4_SGE_TYPE_DISEED;
+ diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value;
+ diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value;
+ diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid;
+ diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */
+ diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
+ } else {
+ data[1].sge_type = SLI4_SGE_TYPE_SKIP;
+ data[1].last = 0;
+ }
+
+ data[2].sge_type = SLI4_SGE_TYPE_DATA;
+ data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys);
+ data[2].buffer_address_low = ocs_addr32_lo(io->axr_buf->payload.dma.phys);
+ data[2].buffer_length = io->axr_buf->payload.dma.size;
+ data[2].last = TRUE;
+ data[3].sge_type = SLI4_SGE_TYPE_SKIP;
+
+ return 0;
+}
+
+/**
+ * @brief Return auto xfer ready buffers (while in RQ pair mode).
+ *
+ * @par Description
+ * The header and payload buffers are returned to the auto xfer rdy pool.
+ *
+ * @param hw Hardware context.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure.
+ */
+
+static ocs_hw_rtn_e
+ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
+{
+ ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc;
+
+ buf->data_cqe = 0;
+ buf->cmd_cqe = 0;
+ buf->fcfi = 0;
+ buf->call_axr_cmd = 0;
+ buf->call_axr_data = 0;
+
+ /* build a fake data header in big endian */
+ buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
+ buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
+ buf->hdr.type = FC_TYPE_FCP;
+ buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
+ FC_FCTL_FIRST_SEQUENCE |
+ FC_FCTL_LAST_SEQUENCE |
+ FC_FCTL_END_SEQUENCE |
+ FC_FCTL_SEQUENCE_INITIATIVE);
+
+ /* build the fake header DMA object */
+ buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
+ buf->header.dma.virt = &buf->hdr;
+ buf->header.dma.alloc = buf;
+ buf->header.dma.size = sizeof(buf->hdr);
+ buf->header.dma.len = sizeof(buf->hdr);
+ buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
+
+ ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
+
+ return OCS_HW_RTN_SUCCESS;
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Free auto xfer rdy buffers.
+ *
+ * @par Description
+ * Frees the auto xfer rdy buffers.
+ *
+ * @param hw Hardware context allocated by the caller.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static void
+ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw)
+{
+ ocs_hw_auto_xfer_rdy_buffer_t *buf;
+ uint32_t i;
+
+ if (hw->auto_xfer_rdy_buf_pool != NULL) {
+ ocs_lock(&hw->io_lock);
+ for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) {
+ buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
+ if (buf != NULL) {
+ ocs_dma_free(hw->os, &buf->payload.dma);
+ }
+ }
+ ocs_unlock(&hw->io_lock);
+
+ ocs_pool_free(hw->auto_xfer_rdy_buf_pool);
+ hw->auto_xfer_rdy_buf_pool = NULL;
+ }
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Configure the rq_pair function from ocs_hw_init().
+ *
+ * @par Description
+ * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature.
+ *
+ * @param hw Hardware context allocated by the caller.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+ocs_hw_rtn_e
+ocs_hw_rqpair_init(ocs_hw_t *hw)
+{
+ ocs_hw_rtn_e rc;
+ uint32_t xris_posted;
+
+ ocs_log_debug(hw->os, "RQ Pair mode\n");
+
+ /*
+ * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would
+ * return with an error. So allocate the buffers based on the initial XRI pool required to support this
+ * feature.
+ */
+ if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
+ hw->config.auto_xfer_rdy_size > 0) {
+ if (hw->auto_xfer_rdy_buf_pool == NULL) {
+ /*
+ * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have
+ * one to post back for the case where the response phase is started in the context of
+ * the data completion.
+ */
+ rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ return rc;
+ }
+ } else {
+ ocs_pool_reset(hw->auto_xfer_rdy_buf_pool);
+ }
+
+ /* Post the auto XFR_RDY XRIs */
+ xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt);
+ if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) {
+ ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted);
+ return OCS_HW_RTN_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup devInitShutdown
+ * @brief Tear down the rq_pair function from ocs_hw_teardown().
+ *
+ * @par Description
+ * Frees the buffers to auto xfer rdy.
+ *
+ * @param hw Hardware context allocated by the caller.
+ */
+void
+ocs_hw_rqpair_teardown(ocs_hw_t *hw)
+{
+ /* We need to free any auto xfer ready buffers */
+ ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw);
+}
diff --git a/sys/dev/ocs_fc/ocs_hw_queues.h b/sys/dev/ocs_fc/ocs_hw_queues.h
new file mode 100644
index 000000000000..5005afe33592
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_hw_queues.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ */
+
+#ifndef __OCS_HW_QUEUES_H__
+#define __OCS_HW_QUEUES_H__
+
+#define OCS_HW_MQ_DEPTH 128
+
+typedef enum {
+ QTOP_EQ = 0,
+ QTOP_CQ,
+ QTOP_WQ,
+ QTOP_RQ,
+ QTOP_MQ,
+ QTOP_LAST,
+} ocs_hw_qtop_entry_e;
+
+typedef struct {
+ ocs_hw_qtop_entry_e entry;
+ uint8_t set_default;
+ uint32_t len;
+ uint8_t class;
+ uint8_t ulp;
+ uint8_t filter_mask;
+} ocs_hw_qtop_entry_t;
+
+typedef struct {
+ struct rq_config {
+ hw_eq_t *eq;
+ uint32_t len;
+ uint8_t class;
+ uint8_t ulp;
+ uint8_t filter_mask;
+ } rq_cfg[16];
+ uint32_t num_pairs;
+} ocs_hw_mrq_t;
+
+
+#define MAX_TOKENS 256
+#define OCS_HW_MAX_QTOP_ENTRIES 200
+
+typedef struct {
+ ocs_os_handle_t os;
+ ocs_hw_qtop_entry_t *entries;
+ uint32_t alloc_count;
+ uint32_t inuse_count;
+ uint32_t entry_counts[QTOP_LAST];
+ uint32_t rptcount[10];
+ uint32_t rptcount_idx;
+} ocs_hw_qtop_t;
+
+extern ocs_hw_qtop_t *ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string);
+extern void ocs_hw_qtop_free(ocs_hw_qtop_t *qtop);
+extern const char *ocs_hw_qtop_entry_name(ocs_hw_qtop_entry_e entry);
+extern uint32_t ocs_hw_qtop_eq_count(ocs_hw_t *hw);
+
+extern ocs_hw_rtn_e ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop);
+extern void hw_thread_eq_handler(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec);
+extern void hw_thread_cq_handler(ocs_hw_t *hw, hw_cq_t *cq);
+extern hw_wq_t *ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io);
+
+#endif /* __OCS_HW_QUEUES_H__ */
diff --git a/sys/dev/ocs_fc/ocs_io.c b/sys/dev/ocs_fc/ocs_io.c
new file mode 100644
index 000000000000..2951500efc36
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_io.c
@@ -0,0 +1,491 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Provide IO object allocation.
+ */
+
+/*!
+ * @defgroup io_alloc IO allocation
+ */
+
+#include "ocs.h"
+#include "ocs_scsi.h"
+#include "ocs_els.h"
+#include "ocs_utils.h"
+
+void ocs_mgmt_io_list(ocs_textbuf_t *textbuf, void *io);
+void ocs_mgmt_io_get_all(ocs_textbuf_t *textbuf, void *io);
+int ocs_mgmt_io_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *io);
+
+static ocs_mgmt_functions_t io_mgmt_functions = {
+ .get_list_handler = ocs_mgmt_io_list,
+ .get_handler = ocs_mgmt_io_get,
+ .get_all_handler = ocs_mgmt_io_get_all,
+};
+
+/**
+ * @brief IO pool.
+ *
+ * Structure encapsulating a pool of IO objects.
+ *
+ */
+
+struct ocs_io_pool_s {
+ ocs_t *ocs; /* Pointer to device object */
+ ocs_lock_t lock; /* IO pool lock */
+ uint32_t io_num_ios; /* Total IOs allocated */
+ ocs_pool_t *pool;
+};
+
+/**
+ * @brief Create a pool of IO objects.
+ *
+ * @par Description
+ * This function allocates memory in larger chucks called
+ * "slabs" which are a fixed size. It calculates the number of IO objects that
+ * fit within each "slab" and determines the number of "slabs" required to
+ * allocate the number of IOs requested. Each of the slabs is allocated and
+ * then it grabs each IO object within the slab and adds it to the free list.
+ * Individual command, response and SGL DMA buffers are allocated for each IO.
+ *
+ * "Slabs"
+ * +----------------+
+ * | |
+ * +----------------+ |
+ * | IO | |
+ * +----------------+ |
+ * | ... | |
+ * +----------------+__+
+ * | IO |
+ * +----------------+
+ *
+ * @param ocs Driver instance's software context.
+ * @param num_io Number of IO contexts to allocate.
+ * @param num_sgl Number of SGL entries to allocate for each IO.
+ *
+ * @return Returns a pointer to a new ocs_io_pool_t on success,
+ * or NULL on failure.
+ */
+
+ocs_io_pool_t *
+ocs_io_pool_create(ocs_t *ocs, uint32_t num_io, uint32_t num_sgl)
+{
+ uint32_t i = 0;
+ int32_t rc = -1;
+ ocs_io_pool_t *io_pool;
+
+ /* Allocate the IO pool */
+ io_pool = ocs_malloc(ocs, sizeof(*io_pool), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (io_pool == NULL) {
+ ocs_log_err(ocs, "allocate of IO pool failed\n");
+ return NULL;;
+ }
+
+ io_pool->ocs = ocs;
+ io_pool->io_num_ios = num_io;
+
+ /* initialize IO pool lock */
+ ocs_lock_init(ocs, &io_pool->lock, "io_pool lock[%d]", ocs->instance_index);
+
+ io_pool->pool = ocs_pool_alloc(ocs, sizeof(ocs_io_t), io_pool->io_num_ios, FALSE);
+
+ for (i = 0; i < io_pool->io_num_ios; i++) {
+ ocs_io_t *io = ocs_pool_get_instance(io_pool->pool, i);
+
+ io->tag = i;
+ io->instance_index = i;
+ io->ocs = ocs;
+
+ /* allocate a command/response dma buffer */
+ if (ocs->enable_ini) {
+ rc = ocs_dma_alloc(ocs, &io->cmdbuf, SCSI_CMD_BUF_LENGTH, OCS_MIN_DMA_ALIGNMENT);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_dma_alloc cmdbuf failed\n");
+ ocs_io_pool_free(io_pool);
+ return NULL;
+ }
+ }
+
+ /* Allocate a response buffer */
+ rc = ocs_dma_alloc(ocs, &io->rspbuf, SCSI_RSP_BUF_LENGTH, OCS_MIN_DMA_ALIGNMENT);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_dma_alloc cmdbuf failed\n");
+ ocs_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ /* Allocate SGL */
+ io->sgl = ocs_malloc(ocs, sizeof(*io->sgl) * num_sgl, OCS_M_NOWAIT | OCS_M_ZERO);
+ if (io->sgl == NULL) {
+ ocs_log_err(ocs, "malloc sgl's failed\n");
+ ocs_io_pool_free(io_pool);
+ return NULL;
+ }
+ io->sgl_allocated = num_sgl;
+ io->sgl_count = 0;
+
+ /* Make IO backend call to initialize IO */
+ ocs_scsi_tgt_io_init(io);
+ ocs_scsi_ini_io_init(io);
+
+ rc = ocs_dma_alloc(ocs, &io->els_req, OCS_ELS_REQ_LEN, OCS_MIN_DMA_ALIGNMENT);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_dma_alloc els_req failed\n");
+ ocs_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ rc = ocs_dma_alloc(ocs, &io->els_rsp, OCS_ELS_GID_PT_RSP_LEN, OCS_MIN_DMA_ALIGNMENT);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_dma_alloc els_rsp failed\n");
+ ocs_io_pool_free(io_pool);
+ return NULL;
+ }
+ }
+
+ return io_pool;
+}
+
+/**
+ * @brief Free IO objects pool
+ *
+ * @par Description
+ * The pool of IO objects are freed.
+ *
+ * @param io_pool Pointer to IO pool object.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_io_pool_free(ocs_io_pool_t *io_pool)
+{
+ ocs_t *ocs;
+ uint32_t i;
+ ocs_io_t *io;
+
+ if (io_pool != NULL) {
+ ocs = io_pool->ocs;
+ for (i = 0; i < io_pool->io_num_ios; i++) {
+ io = ocs_pool_get_instance(io_pool->pool, i);
+ if (!io)
+ continue;
+ ocs_scsi_tgt_io_exit(io);
+ ocs_scsi_ini_io_exit(io);
+ if (io->sgl) {
+ ocs_free(ocs, io->sgl, sizeof(*io->sgl) * io->sgl_allocated);
+ }
+ ocs_dma_free(ocs, &io->cmdbuf);
+ ocs_dma_free(ocs, &io->rspbuf);
+ ocs_dma_free(ocs, &io->els_req);
+ ocs_dma_free(ocs, &io->els_rsp);
+ }
+
+ if (io_pool->pool != NULL) {
+ ocs_pool_free(io_pool->pool);
+ }
+ ocs_lock_free(&io_pool->lock);
+ ocs_free(ocs, io_pool, sizeof(*io_pool));
+ ocs->xport->io_pool = NULL;
+ }
+
+ return 0;
+}
+
+uint32_t ocs_io_pool_allocated(ocs_io_pool_t *io_pool)
+{
+ return io_pool->io_num_ios;
+}
+
+/**
+ * @ingroup io_alloc
+ * @brief Allocate an object used to track an IO.
+ *
+ * @param io_pool Pointer to the IO pool.
+ *
+ * @return Returns the pointer to a new object, or NULL if none available.
+ */
+ocs_io_t *
+ocs_io_pool_io_alloc(ocs_io_pool_t *io_pool)
+{
+ ocs_io_t *io = NULL;
+ ocs_t *ocs;
+
+ ocs_assert(io_pool, NULL);
+
+ ocs = io_pool->ocs;
+
+ ocs_lock(&io_pool->lock);
+ if ((io = ocs_pool_get(io_pool->pool)) != NULL) {
+ ocs_unlock(&io_pool->lock);
+
+ io->io_type = OCS_IO_TYPE_MAX;
+ io->hio_type = OCS_HW_IO_MAX;
+ io->hio = NULL;
+ io->transferred = 0;
+ io->ocs = ocs;
+ io->timeout = 0;
+ io->sgl_count = 0;
+ io->tgt_task_tag = 0;
+ io->init_task_tag = 0;
+ io->hw_tag = 0;
+ io->display_name = "pending";
+ io->seq_init = 0;
+ io->els_req_free = 0;
+ io->mgmt_functions = &io_mgmt_functions;
+ io->io_free = 0;
+ ocs_atomic_add_return(&ocs->xport->io_active_count, 1);
+ ocs_atomic_add_return(&ocs->xport->io_total_alloc, 1);
+ } else {
+ ocs_unlock(&io_pool->lock);
+ }
+ return io;
+}
+
+/**
+ * @ingroup io_alloc
+ * @brief Free an object used to track an IO.
+ *
+ * @param io_pool Pointer to IO pool object.
+ * @param io Pointer to the IO object.
+ */
+void
+ocs_io_pool_io_free(ocs_io_pool_t *io_pool, ocs_io_t *io)
+{
+ ocs_t *ocs;
+ ocs_hw_io_t *hio = NULL;
+
+ ocs_assert(io_pool);
+
+ ocs = io_pool->ocs;
+
+ ocs_lock(&io_pool->lock);
+ hio = io->hio;
+ io->hio = NULL;
+ ocs_pool_put(io_pool->pool, io);
+ ocs_unlock(&io_pool->lock);
+
+ if (hio) {
+ ocs_hw_io_free(&ocs->hw, hio);
+ }
+ io->io_free = 1;
+ ocs_atomic_sub_return(&ocs->xport->io_active_count, 1);
+ ocs_atomic_add_return(&ocs->xport->io_total_free, 1);
+}
+
+/**
+ * @ingroup io_alloc
+ * @brief Find an I/O given it's node and ox_id.
+ *
+ * @param ocs Driver instance's software context.
+ * @param node Pointer to node.
+ * @param ox_id OX_ID to find.
+ * @param rx_id RX_ID to find (0xffff for unassigned).
+ */
+ocs_io_t *
+ocs_io_find_tgt_io(ocs_t *ocs, ocs_node_t *node, uint16_t ox_id, uint16_t rx_id)
+{
+ ocs_io_t *io = NULL;
+
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach(&node->active_ios, io)
+ if ((io->cmd_tgt && (io->init_task_tag == ox_id)) &&
+ ((rx_id == 0xffff) || (io->tgt_task_tag == rx_id))) {
+ break;
+ }
+ ocs_unlock(&node->active_ios_lock);
+ return io;
+}
+
+/**
+ * @ingroup io_alloc
+ * @brief Return IO context given the instance index.
+ *
+ * @par Description
+ * Returns a pointer to the IO context given by the instance index.
+ *
+ * @param ocs Pointer to driver structure.
+ * @param index IO instance index to return.
+ *
+ * @return Returns a pointer to the IO context, or NULL if not found.
+ */
+ocs_io_t *
+ocs_io_get_instance(ocs_t *ocs, uint32_t index)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_io_pool_t *io_pool = xport->io_pool;
+ return ocs_pool_get_instance(io_pool->pool, index);
+}
+
+/**
+ * @brief Generate IO context ddump data.
+ *
+ * The ddump data for an IO context is generated.
+ *
+ * @param textbuf Pointer to text buffer.
+ * @param io Pointer to IO context.
+ *
+ * @return None.
+ */
+
+void
+ocs_ddump_io(ocs_textbuf_t *textbuf, ocs_io_t *io)
+{
+ ocs_ddump_section(textbuf, "io", io->instance_index);
+ ocs_ddump_value(textbuf, "display_name", "%s", io->display_name);
+ ocs_ddump_value(textbuf, "node_name", "%s", io->node->display_name);
+
+ ocs_ddump_value(textbuf, "ref_count", "%d", ocs_ref_read_count(&io->ref));
+ ocs_ddump_value(textbuf, "io_type", "%d", io->io_type);
+ ocs_ddump_value(textbuf, "hio_type", "%d", io->hio_type);
+ ocs_ddump_value(textbuf, "cmd_tgt", "%d", io->cmd_tgt);
+ ocs_ddump_value(textbuf, "cmd_ini", "%d", io->cmd_ini);
+ ocs_ddump_value(textbuf, "send_abts", "%d", io->send_abts);
+ ocs_ddump_value(textbuf, "init_task_tag", "0x%x", io->init_task_tag);
+ ocs_ddump_value(textbuf, "tgt_task_tag", "0x%x", io->tgt_task_tag);
+ ocs_ddump_value(textbuf, "hw_tag", "0x%x", io->hw_tag);
+ ocs_ddump_value(textbuf, "tag", "0x%x", io->tag);
+ ocs_ddump_value(textbuf, "timeout", "%d", io->timeout);
+ ocs_ddump_value(textbuf, "tmf_cmd", "%d", io->tmf_cmd);
+ ocs_ddump_value(textbuf, "abort_rx_id", "0x%x", io->abort_rx_id);
+
+ ocs_ddump_value(textbuf, "busy", "%d", ocs_io_busy(io));
+ ocs_ddump_value(textbuf, "transferred", "%zu", io->transferred);
+ ocs_ddump_value(textbuf, "auto_resp", "%d", io->auto_resp);
+ ocs_ddump_value(textbuf, "exp_xfer_len", "%d", io->exp_xfer_len);
+ ocs_ddump_value(textbuf, "xfer_req", "%d", io->xfer_req);
+ ocs_ddump_value(textbuf, "seq_init", "%d", io->seq_init);
+
+ ocs_ddump_value(textbuf, "alloc_link", "%d", ocs_list_on_list(&io->io_alloc_link));
+ ocs_ddump_value(textbuf, "pending_link", "%d", ocs_list_on_list(&io->io_pending_link));
+ ocs_ddump_value(textbuf, "backend_link", "%d", ocs_list_on_list(&io->link));
+
+ if (io->hio) {
+ ocs_ddump_value(textbuf, "hw_tag", "%#x", io->hio->reqtag);
+ ocs_ddump_value(textbuf, "hw_xri", "%#x", io->hio->indicator);
+ ocs_ddump_value(textbuf, "hw_type", "%#x", io->hio->type);
+ } else {
+ ocs_ddump_value(textbuf, "hw_tag", "%s", "pending");
+ ocs_ddump_value(textbuf, "hw_xri", "%s", "pending");
+ ocs_ddump_value(textbuf, "hw_type", "%s", "pending");
+ }
+
+ ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_IO, io);
+ ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_IO, io);
+
+ ocs_ddump_endsection(textbuf, "io", io->instance_index);
+}
+
+
+void
+ocs_mgmt_io_list(ocs_textbuf_t *textbuf, void *object)
+{
+
+ /* Readonly values */
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init_task_tag");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "tag");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "transferred");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "auto_resp");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "exp_xfer_len");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "xfer_req");
+}
+
+int
+ocs_mgmt_io_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
+{
+ char qualifier[80];
+ int retval = -1;
+ ocs_io_t *io = (ocs_io_t *) object;
+
+ snprintf(qualifier, sizeof(qualifier), "%s/io[%d]", parent, io->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = name + strlen(qualifier) +1;
+
+ /* See if it's a value I can supply */
+ if (ocs_strcmp(unqualified_name, "display_name") == 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", io->display_name);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "init_task_tag") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "init_task_tag", "0x%x", io->init_task_tag);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "tgt_task_tag") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tgt_task_tag", "0x%x", io->tgt_task_tag);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "hw_tag") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_tag", "0x%x", io->hw_tag);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "tag") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tag", "0x%x", io->tag);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "transferred") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "transferred", "%zu", io->transferred);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "auto_resp") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "auto_resp", io->auto_resp);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "exp_xfer_len") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "exp_xfer_len", "%d", io->exp_xfer_len);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "xfer_req") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "xfer_req", "%d", io->xfer_req);
+ retval = 0;
+ }
+ }
+
+ return retval;
+}
+
+void
+ocs_mgmt_io_get_all(ocs_textbuf_t *textbuf, void *object)
+{
+ ocs_io_t *io = (ocs_io_t *) object;
+
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", io->display_name);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "init_task_tag", "0x%x", io->init_task_tag);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tgt_task_tag", "0x%x", io->tgt_task_tag);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_tag", "0x%x", io->hw_tag);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tag", "0x%x", io->tag);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "transferred", "%zu", io->transferred);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "auto_resp", io->auto_resp);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "exp_xfer_len", "%d", io->exp_xfer_len);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "xfer_req", "%d", io->xfer_req);
+
+}
+
+
+
+
diff --git a/sys/dev/ocs_fc/ocs_io.h b/sys/dev/ocs_fc/ocs_io.h
new file mode 100644
index 000000000000..b5a9544ace09
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_io.h
@@ -0,0 +1,196 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS linux driver IO declarations
+ */
+
+#if !defined(__OCS_IO_H__)
+#define __OCS_IO_H__
+
+#define io_error_log(io, fmt, ...) \
+ do { \
+ if (OCS_LOG_ENABLE_IO_ERRORS(io->ocs)) \
+ ocs_log_warn(io->ocs, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * @brief FCP IO context
+ *
+ * This structure is used for transport and backend IO requests and responses.
+ */
+
+#define SCSI_CMD_BUF_LENGTH 48
+#define SCSI_RSP_BUF_LENGTH sizeof(fcp_rsp_iu_t)
+
+/**
+ * @brief OCS IO types
+ */
+typedef enum {
+ OCS_IO_TYPE_IO = 0,
+ OCS_IO_TYPE_ELS,
+ OCS_IO_TYPE_CT,
+ OCS_IO_TYPE_CT_RESP,
+ OCS_IO_TYPE_BLS_RESP,
+ OCS_IO_TYPE_ABORT,
+
+ OCS_IO_TYPE_MAX, /**< must be last */
+} ocs_io_type_e;
+
+struct ocs_io_s {
+
+ ocs_t *ocs; /**< pointer back to ocs */
+ uint32_t instance_index; /**< unique instance index value */
+ const char *display_name; /**< display name */
+ ocs_node_t *node; /**< pointer to node */
+ ocs_list_link_t io_alloc_link; /**< (io_pool->io_free_list) free list link */
+ uint32_t init_task_tag; /**< initiator task tag (OX_ID) for back-end and SCSI logging */
+ uint32_t tgt_task_tag; /**< target task tag (RX_ID) - for back-end and SCSI logging */
+ uint32_t hw_tag; /**< HW layer unique IO id - for back-end and SCSI logging */
+ uint32_t tag; /**< unique IO identifier */
+ ocs_scsi_sgl_t *sgl; /**< SGL */
+ uint32_t sgl_allocated; /**< Number of allocated SGEs */
+ uint32_t sgl_count; /**< Number of SGEs in this SGL */
+ ocs_scsi_ini_io_t ini_io; /**< backend initiator private IO data */
+ ocs_scsi_tgt_io_t tgt_io; /**< backend target private IO data */
+ uint32_t exp_xfer_len; /**< expected data transfer length, based on FC or iSCSI header */
+ ocs_mgmt_functions_t *mgmt_functions;
+
+ /* Declarations private to HW/SLI */
+ void *hw_priv; /**< HW private context */
+
+ /* Declarations private to FC Transport */
+ ocs_io_type_e io_type; /**< indicates what this ocs_io_t structure is used for */
+ ocs_ref_t ref; /**< refcount object */
+ void *dslab_item; /**< pointer back to dslab allocation object */
+ ocs_hw_io_t *hio; /**< HW IO context */
+ size_t transferred; /**< Number of bytes transferred so far */
+ uint32_t auto_resp:1, /**< set if auto_trsp was set */
+ low_latency:1, /**< set if low latency request */
+ wq_steering:4, /**< selected WQ steering request */
+ wq_class:4; /**< selected WQ class if steering is class */
+ uint32_t xfer_req; /**< transfer size for current request */
+ ocs_scsi_rsp_io_cb_t scsi_ini_cb; /**< initiator callback function */
+ void *scsi_ini_cb_arg; /**< initiator callback function argument */
+ ocs_scsi_io_cb_t scsi_tgt_cb; /**< target callback function */
+ void *scsi_tgt_cb_arg; /**< target callback function argument */
+ ocs_scsi_io_cb_t abort_cb; /**< abort callback function */
+ void *abort_cb_arg; /**< abort callback function argument */
+ ocs_scsi_io_cb_t bls_cb; /**< BLS callback function */
+ void *bls_cb_arg; /**< BLS callback function argument */
+ ocs_scsi_tmf_cmd_e tmf_cmd; /**< TMF command being processed */
+ uint16_t abort_rx_id; /**< rx_id from the ABTS that initiated the command abort */
+
+ uint32_t cmd_tgt:1, /**< True if this is a Target command */
+ send_abts:1, /**< when aborting, indicates ABTS is to be sent */
+ cmd_ini:1, /**< True if this is an Initiator command */
+ seq_init:1; /**< True if local node has sequence initiative */
+ ocs_hw_io_param_t iparam; /**< iparams for hw io send call */
+ ocs_hw_dif_info_t hw_dif; /**< HW formatted DIF parameters */
+ ocs_scsi_dif_info_t scsi_dif_info; /**< DIF info saved for DIF error recovery */
+ ocs_hw_io_type_e hio_type; /**< HW IO type */
+ uint32_t wire_len; /**< wire length */
+ void *hw_cb; /**< saved HW callback */
+ ocs_list_link_t io_pending_link;/**< link list link pending */
+
+ ocs_dma_t ovfl_sgl; /**< Overflow SGL */
+
+ /* for ELS requests/responses */
+ uint32_t els_pend:1, /**< True if ELS is pending */
+ els_active:1; /**< True if ELS is active */
+ ocs_dma_t els_req; /**< ELS request payload buffer */
+ ocs_dma_t els_rsp; /**< ELS response payload buffer */
+ ocs_sm_ctx_t els_sm; /**< EIO IO state machine context */
+ uint32_t els_evtdepth; /**< current event posting nesting depth */
+ uint32_t els_req_free:1; /**< this els is to be free'd */
+ uint32_t els_retries_remaining; /*<< Retries remaining */
+ void (*els_callback)(ocs_node_t *node, ocs_node_cb_t *cbdata, void *cbarg);
+ void *els_callback_arg;
+ uint32_t els_timeout_sec; /**< timeout */
+
+ ocs_timer_t delay_timer; /**< delay timer */
+
+ /* for abort handling */
+ ocs_io_t *io_to_abort; /**< pointer to IO to abort */
+
+ ocs_list_link_t link; /**< linked list link */
+ ocs_dma_t cmdbuf; /**< SCSI Command buffer, used for CDB (initiator) */
+ ocs_dma_t rspbuf; /**< SCSI Response buffer (i+t) */
+ uint32_t timeout; /**< Timeout value in seconds for this IO */
+ uint8_t cs_ctl; /**< CS_CTL priority for this IO */
+ uint8_t io_free; /**< Is io object in freelist > */
+ uint32_t app_id;
+};
+
+/**
+ * @brief common IO callback argument
+ *
+ * Callback argument used as common I/O callback argument
+ */
+
+typedef struct {
+ int32_t status; /**< completion status */
+ int32_t ext_status; /**< extended completion status */
+ void *app; /**< application argument */
+} ocs_io_cb_arg_t;
+
+/**
+ * @brief Test if IO object is busy
+ *
+ * Return True if IO object is busy. Busy is defined as the IO object not being on
+ * the free list
+ *
+ * @param io Pointer to IO object
+ *
+ * @return returns True if IO is busy
+ */
+
+static inline int32_t
+ocs_io_busy(ocs_io_t *io)
+{
+ return !(io->io_free);
+}
+
+typedef struct ocs_io_pool_s ocs_io_pool_t;
+
+extern ocs_io_pool_t *ocs_io_pool_create(ocs_t *ocs, uint32_t num_io, uint32_t num_sgl);
+extern int32_t ocs_io_pool_free(ocs_io_pool_t *io_pool);
+extern uint32_t ocs_io_pool_allocated(ocs_io_pool_t *io_pool);
+
+extern ocs_io_t *ocs_io_pool_io_alloc(ocs_io_pool_t *io_pool);
+extern void ocs_io_pool_io_free(ocs_io_pool_t *io_pool, ocs_io_t *io);
+extern ocs_io_t *ocs_io_find_tgt_io(ocs_t *ocs, ocs_node_t *node, uint16_t ox_id, uint16_t rx_id);
+extern void ocs_ddump_io(ocs_textbuf_t *textbuf, ocs_io_t *io);
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_ioctl.c b/sys/dev/ocs_fc/ocs_ioctl.c
new file mode 100644
index 000000000000..2a9157cfbc4e
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_ioctl.c
@@ -0,0 +1,1253 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "ocs.h"
+#include "ocs_utils.h"
+
+#include <sys/conf.h>
+#include <sys/sysctl.h>
+#include <sys/ioccom.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/linker.h>
+#include <sys/firmware.h>
+
+static d_open_t ocs_open;
+static d_close_t ocs_close;
+static d_ioctl_t ocs_ioctl;
+
+static struct cdevsw ocs_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = ocs_open,
+ .d_close = ocs_close,
+ .d_ioctl = ocs_ioctl,
+ .d_name = "ocs_fc"
+};
+
+int
+ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len, uint8_t *change_status);
+
+static int
+ocs_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
+{
+#if 0
+ struct ocs_softc *ocs = cdev->si_drv1;
+
+ device_printf(ocs->dev, "%s\n", __func__);
+#endif
+ return 0;
+}
+
+static int
+ocs_close(struct cdev *cdev, int flag, int fmt, struct thread *td)
+{
+#if 0
+ struct ocs_softc *ocs = cdev->si_drv1;
+
+ device_printf(ocs->dev, "%s\n", __func__);
+#endif
+ return 0;
+}
+
+static int32_t
+__ocs_ioctl_mbox_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ struct ocs_softc *ocs = arg;
+
+ /* wait for the ioctl to sleep before calling wakeup */
+ mtx_lock(&ocs->dbg_lock);
+
+ mtx_unlock(&ocs->dbg_lock);
+
+ wakeup(arg);
+
+ return 0;
+}
+
+static int
+ocs_process_sli_config (ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd, ocs_dma_t *dma){
+
+ sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)mcmd->payload;
+
+ if (sli_config->emb) {
+ sli4_req_hdr_t *req = (sli4_req_hdr_t *)sli_config->payload.embed;
+
+ switch (req->opcode) {
+ case SLI4_OPC_COMMON_READ_OBJECT:
+ if (mcmd->out_bytes) {
+ sli4_req_common_read_object_t *rdobj =
+ (sli4_req_common_read_object_t *)sli_config->payload.embed;
+
+ if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) {
+ device_printf(ocs->dev, "%s: COMMON_READ_OBJECT - %lld allocation failed\n",
+ __func__, (unsigned long long)mcmd->out_bytes);
+ return ENXIO;
+ }
+
+ memset(dma->virt, 0, mcmd->out_bytes);
+
+ rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64;
+ rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes;
+ rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+
+ }
+ break;
+ case SLI4_OPC_COMMON_WRITE_OBJECT:
+ {
+ sli4_req_common_write_object_t *wrobj =
+ (sli4_req_common_write_object_t *)sli_config->payload.embed;
+
+ if (ocs_dma_alloc(ocs, dma, wrobj->desired_write_length, 4096)) {
+ device_printf(ocs->dev, "%s: COMMON_WRITE_OBJECT - %d allocation failed\n",
+ __func__, wrobj->desired_write_length);
+ return ENXIO;
+ }
+ /* setup the descriptor */
+ wrobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64;
+ wrobj->host_buffer_descriptor[0].buffer_length = wrobj->desired_write_length;
+ wrobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ wrobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+
+ /* copy the data into the DMA buffer */
+ copyin((void *)mcmd->in_addr, dma->virt, mcmd->in_bytes);
+ }
+ break;
+ case SLI4_OPC_COMMON_DELETE_OBJECT:
+ break;
+ case SLI4_OPC_COMMON_READ_OBJECT_LIST:
+ if (mcmd->out_bytes) {
+ sli4_req_common_read_object_list_t *rdobj =
+ (sli4_req_common_read_object_list_t *)sli_config->payload.embed;
+
+ if (ocs_dma_alloc(ocs, dma, mcmd->out_bytes, 4096)) {
+ device_printf(ocs->dev, "%s: COMMON_READ_OBJECT_LIST - %lld allocation failed\n",
+ __func__,(unsigned long long) mcmd->out_bytes);
+ return ENXIO;
+ }
+
+ memset(dma->virt, 0, mcmd->out_bytes);
+
+ rdobj->host_buffer_descriptor[0].bde_type = SLI4_BDE_TYPE_BDE_64;
+ rdobj->host_buffer_descriptor[0].buffer_length = mcmd->out_bytes;
+ rdobj->host_buffer_descriptor[0].u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ rdobj->host_buffer_descriptor[0].u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+
+ }
+ break;
+ case SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA:
+ break;
+ default:
+ device_printf(ocs->dev, "%s: in=%p (%lld) out=%p (%lld)\n", __func__,
+ (void *)mcmd->in_addr, (unsigned long long)mcmd->in_bytes,
+ (void *)mcmd->out_addr, (unsigned long long)mcmd->out_bytes);
+ device_printf(ocs->dev, "%s: unknown (opc=%#x)\n", __func__,
+ req->opcode);
+ hexdump(mcmd, mcmd->size, NULL, 0);
+ break;
+ }
+ } else {
+ uint32_t max_bytes = max(mcmd->in_bytes, mcmd->out_bytes);
+ if (ocs_dma_alloc(ocs, dma, max_bytes, 4096)) {
+ device_printf(ocs->dev, "%s: non-embedded - %u allocation failed\n",
+ __func__, max_bytes);
+ return ENXIO;
+ }
+
+ copyin((void *)mcmd->in_addr, dma->virt, mcmd->in_bytes);
+
+ sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys);
+ sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys);
+ sli_config->payload.mem.length = max_bytes;
+ }
+
+ return 0;
+}
+
+static int
+ocs_process_mbx_ioctl(ocs_t *ocs, ocs_ioctl_elxu_mbox_t *mcmd)
+{
+ ocs_dma_t dma = { 0 };
+
+ if ((ELXU_BSD_MAGIC != mcmd->magic) ||
+ (sizeof(ocs_ioctl_elxu_mbox_t) != mcmd->size)) {
+ device_printf(ocs->dev, "%s: malformed command m=%08x s=%08x\n",
+ __func__, mcmd->magic, mcmd->size);
+ return EINVAL;
+ }
+
+ switch(((sli4_mbox_command_header_t *)mcmd->payload)->command) {
+ case SLI4_MBOX_COMMAND_SLI_CONFIG:
+ if (ENXIO == ocs_process_sli_config(ocs, mcmd, &dma))
+ return ENXIO;
+ break;
+
+ case SLI4_MBOX_COMMAND_READ_REV:
+ case SLI4_MBOX_COMMAND_READ_STATUS:
+ case SLI4_MBOX_COMMAND_READ_LNK_STAT:
+ break;
+
+ default:
+ device_printf(ocs->dev, "command %d\n",((sli4_mbox_command_header_t *)mcmd->payload)->command);
+ device_printf(ocs->dev, "%s, command not support\n", __func__);
+ goto no_support;
+ break;
+
+ }
+
+ /*
+ * The dbg_lock usage here insures the command completion code
+ * (__ocs_ioctl_mbox_cb), which calls wakeup(), does not run until
+ * after first calling msleep()
+ *
+ * 1. ioctl grabs dbg_lock
+ * 2. ioctl issues command
+ * if the command completes before msleep(), the
+ * command completion code (__ocs_ioctl_mbox_cb) will spin
+ * on dbg_lock before calling wakeup()
+ * 3. ioctl calls msleep which releases dbg_lock before sleeping
+ * and reacquires it before waking
+ * 4. command completion handler acquires the dbg_lock, immediately
+ * releases it, and calls wakeup
+ * 5. msleep returns, re-acquiring the lock
+ * 6. ioctl code releases the lock
+ */
+ mtx_lock(&ocs->dbg_lock);
+ ocs_hw_command(&ocs->hw, mcmd->payload, OCS_CMD_NOWAIT,
+ __ocs_ioctl_mbox_cb, ocs);
+ msleep(ocs, &ocs->dbg_lock, 0, "ocsmbx", 0);
+ mtx_unlock(&ocs->dbg_lock);
+
+ if( SLI4_MBOX_COMMAND_SLI_CONFIG == ((sli4_mbox_command_header_t *)mcmd->payload)->command
+ && mcmd->out_bytes && dma.virt) {
+ copyout(dma.virt, (void *)mcmd->out_addr, mcmd->out_bytes);
+ }
+
+no_support:
+ ocs_dma_free(ocs, &dma);
+
+ return 0;
+}
+
+/**
+ * @brief perform requested Elx CoreDump helper function
+ *
+ * The Elx CoreDump facility used for BE3 diagnostics uses the OCS_IOCTL_CMD_ECD_HELPER
+ * ioctl function to execute requested "help" functions
+ *
+ * @param ocs pointer to ocs structure
+ * @param req pointer to helper function request
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+static int
+ocs_process_ecd_helper (ocs_t *ocs, ocs_ioctl_ecd_helper_t *req)
+{
+ int32_t rc = 0;
+ uint8_t v8;
+ uint16_t v16;
+ uint32_t v32;
+
+
+ /* Check the BAR read/write commands for valid bar */
+ switch(req->cmd) {
+ case OCS_ECD_HELPER_BAR_READ8:
+ case OCS_ECD_HELPER_BAR_READ16:
+ case OCS_ECD_HELPER_BAR_READ32:
+ case OCS_ECD_HELPER_BAR_WRITE8:
+ case OCS_ECD_HELPER_BAR_WRITE16:
+ case OCS_ECD_HELPER_BAR_WRITE32:
+ if (req->bar >= PCI_MAX_BAR) {
+ device_printf(ocs->dev, "Error: bar %d out of range\n", req->bar);
+ return -EFAULT;
+ }
+ if (ocs->reg[req->bar].res == NULL) {
+ device_printf(ocs->dev, "Error: bar %d not defined\n", req->bar);
+ return -EFAULT;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch(req->cmd) {
+ case OCS_ECD_HELPER_CFG_READ8:
+ v8 = ocs_config_read8(ocs, req->offset);
+ req->data = v8;
+ break;
+ case OCS_ECD_HELPER_CFG_READ16:
+ v16 = ocs_config_read16(ocs, req->offset);
+ req->data = v16;
+ break;
+ case OCS_ECD_HELPER_CFG_READ32:
+ v32 = ocs_config_read32(ocs, req->offset);
+ req->data = v32;
+ break;
+ case OCS_ECD_HELPER_CFG_WRITE8:
+ ocs_config_write8(ocs, req->offset, req->data);
+ break;
+ case OCS_ECD_HELPER_CFG_WRITE16:
+ ocs_config_write16(ocs, req->offset, req->data);
+ break;
+ case OCS_ECD_HELPER_CFG_WRITE32:
+ ocs_config_write32(ocs, req->offset, req->data);
+ break;
+ case OCS_ECD_HELPER_BAR_READ8:
+ req->data = ocs_reg_read8(ocs, req->bar, req->offset);
+ break;
+ case OCS_ECD_HELPER_BAR_READ16:
+ req->data = ocs_reg_read16(ocs, req->bar, req->offset);
+ break;
+ case OCS_ECD_HELPER_BAR_READ32:
+ req->data = ocs_reg_read32(ocs, req->bar, req->offset);
+ break;
+ case OCS_ECD_HELPER_BAR_WRITE8:
+ ocs_reg_write8(ocs, req->bar, req->offset, req->data);
+ break;
+ case OCS_ECD_HELPER_BAR_WRITE16:
+ ocs_reg_write16(ocs, req->bar, req->offset, req->data);
+ break;
+ case OCS_ECD_HELPER_BAR_WRITE32:
+ ocs_reg_write32(ocs, req->bar, req->offset, req->data);
+ break;
+ default:
+ device_printf(ocs->dev, "Invalid helper command=%d\n", req->cmd);
+ break;
+ }
+
+ return rc;
+}
+
+static int
+ocs_ioctl(struct cdev *cdev, u_long cmd, caddr_t addr, int flag, struct thread *td)
+{
+ int status = 0;
+ struct ocs_softc *ocs = cdev->si_drv1;
+ device_t dev = ocs->dev;
+
+ switch (cmd) {
+ case OCS_IOCTL_CMD_ELXU_MBOX: {
+ /* "copyin" done by kernel; thus, just dereference addr */
+ ocs_ioctl_elxu_mbox_t *mcmd = (void *)addr;
+ status = ocs_process_mbx_ioctl(ocs, mcmd);
+ break;
+ }
+ case OCS_IOCTL_CMD_ECD_HELPER: {
+ /* "copyin" done by kernel; thus, just dereference addr */
+ ocs_ioctl_ecd_helper_t *req = (void *)addr;
+ status = ocs_process_ecd_helper(ocs, req);
+ break;
+ }
+
+ case OCS_IOCTL_CMD_VPORT: {
+ int32_t rc = 0;
+ ocs_ioctl_vport_t *req = (ocs_ioctl_vport_t*) addr;
+ ocs_domain_t *domain;
+
+ domain = ocs_domain_get_instance(ocs, req->domain_index);
+ if (domain == NULL) {
+ device_printf(ocs->dev, "domain [%d] nod found\n",
+ req->domain_index);
+ return -EFAULT;
+ }
+
+ if (req->req_create) {
+ rc = ocs_sport_vport_new(domain, req->wwpn, req->wwnn,
+ UINT32_MAX, req->enable_ini,
+ req->enable_tgt, NULL, NULL, TRUE);
+ } else {
+ rc = ocs_sport_vport_del(ocs, domain, req->wwpn, req->wwnn);
+ }
+
+ return rc;
+ }
+
+ case OCS_IOCTL_CMD_GET_DDUMP: {
+ ocs_ioctl_ddump_t *req = (ocs_ioctl_ddump_t*) addr;
+ ocs_textbuf_t textbuf;
+ int x;
+
+ /* Build a text buffer */
+ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
+ device_printf(ocs->dev, "Error: ocs_textbuf_alloc failed\n");
+ return -EFAULT;
+ }
+
+ switch (req->args.action) {
+ case OCS_IOCTL_DDUMP_GET:
+ case OCS_IOCTL_DDUMP_GET_SAVED: {
+ uint32_t remaining;
+ uint32_t written;
+ uint32_t idx;
+ int32_t n;
+ ocs_textbuf_t *ptbuf = NULL;
+ uint32_t flags = 0;
+
+ if (req->args.action == OCS_IOCTL_DDUMP_GET_SAVED) {
+ if (ocs_textbuf_initialized(&ocs->ddump_saved)) {
+ ptbuf = &ocs->ddump_saved;
+ }
+ } else {
+ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
+ ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
+ return -EFAULT;
+ }
+
+ /* translate IOCTL ddump flags to ddump flags */
+ if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_WQES) {
+ flags |= OCS_DDUMP_FLAGS_WQES;
+ }
+ if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_CQES) {
+ flags |= OCS_DDUMP_FLAGS_CQES;
+ }
+ if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_MQES) {
+ flags |= OCS_DDUMP_FLAGS_MQES;
+ }
+ if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_RQES) {
+ flags |= OCS_DDUMP_FLAGS_RQES;
+ }
+ if (req->args.flags & OCS_IOCTL_DDUMP_FLAGS_EQES) {
+ flags |= OCS_DDUMP_FLAGS_EQES;
+ }
+
+ /* Try 3 times to get the dump */
+ for(x=0; x<3; x++) {
+ if (ocs_ddump(ocs, &textbuf, flags, req->args.q_entries) != 0) {
+ ocs_textbuf_reset(&textbuf);
+ } else {
+ /* Success */
+ x = 0;
+ break;
+ }
+ }
+ if (x != 0 ) {
+ /* Retries failed */
+ ocs_log_test(ocs, "ocs_ddump failed\n");
+ } else {
+ ptbuf = &textbuf;
+ }
+
+ }
+ written = 0;
+ if (ptbuf != NULL) {
+ /* Process each textbuf segment */
+ remaining = req->user_buffer_len;
+ for (idx = 0; remaining; idx++) {
+ n = ocs_textbuf_ext_get_written(ptbuf, idx);
+ if (n < 0) {
+ break;
+ }
+ if ((uint32_t)n >= remaining) {
+ n = (int32_t)remaining;
+ }
+ if (ocs_copy_to_user(req->user_buffer + written,
+ ocs_textbuf_ext_get_buffer(ptbuf, idx), n)) {
+ ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__);
+ }
+ written += n;
+ remaining -= (uint32_t)n;
+ }
+ }
+ req->bytes_written = written;
+ if (ptbuf == &textbuf) {
+ ocs_textbuf_free(ocs, &textbuf);
+ }
+
+ break;
+ }
+ case OCS_IOCTL_DDUMP_CLR_SAVED:
+ ocs_clear_saved_ddump(ocs);
+ break;
+ default:
+ ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
+ break;
+ }
+ break;
+ }
+ case OCS_IOCTL_CMD_DRIVER_INFO: {
+ ocs_ioctl_driver_info_t *req = (ocs_ioctl_driver_info_t*)addr;
+
+ ocs_memset(req, 0, sizeof(*req));
+
+ req->pci_vendor = ocs->pci_vendor;
+ req->pci_device = ocs->pci_device;
+ ocs_strncpy(req->businfo, ocs->businfo, sizeof(req->businfo));
+
+ req->sli_intf = ocs_config_read32(ocs, SLI4_INTF_REG);
+ ocs_strncpy(req->desc, device_get_desc(dev), sizeof(req->desc));
+ ocs_strncpy(req->fw_rev, ocs->fwrev, sizeof(req->fw_rev));
+ if (ocs->domain && ocs->domain->sport) {
+ *((uint64_t*)req->hw_addr.fc.wwnn) = ocs_htobe64(ocs->domain->sport->wwnn);
+ *((uint64_t*)req->hw_addr.fc.wwpn) = ocs_htobe64(ocs->domain->sport->wwpn);
+ }
+ ocs_strncpy(req->serialnum, ocs->serialnum, sizeof(req->serialnum));
+ break;
+ }
+
+ case OCS_IOCTL_CMD_MGMT_LIST: {
+ ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr;
+ ocs_textbuf_t textbuf;
+
+ /* Build a text buffer */
+ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
+ ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
+ return -EFAULT;
+ }
+
+ ocs_mgmt_get_list(ocs, &textbuf);
+
+ if (ocs_textbuf_get_written(&textbuf)) {
+ if (ocs_copy_to_user(req->user_buffer,
+ ocs_textbuf_get_buffer(&textbuf),
+ ocs_textbuf_get_written(&textbuf))) {
+ ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__);
+ }
+ }
+ req->bytes_written = ocs_textbuf_get_written(&textbuf);
+
+ ocs_textbuf_free(ocs, &textbuf);
+
+ break;
+
+ }
+
+ case OCS_IOCTL_CMD_MGMT_GET_ALL: {
+ ocs_ioctl_mgmt_buffer_t* req = (ocs_ioctl_mgmt_buffer_t *)addr;
+ ocs_textbuf_t textbuf;
+ int32_t n;
+ uint32_t idx;
+ uint32_t copied = 0;
+
+ /* Build a text buffer */
+ if (ocs_textbuf_alloc(ocs, &textbuf, req->user_buffer_len)) {
+ ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
+ return -EFAULT;
+ }
+
+ ocs_mgmt_get_all(ocs, &textbuf);
+
+ for (idx = 0; (n = ocs_textbuf_ext_get_written(&textbuf, idx)) > 0; idx++) {
+ if(ocs_copy_to_user(req->user_buffer + copied,
+ ocs_textbuf_ext_get_buffer(&textbuf, idx),
+ ocs_textbuf_ext_get_written(&textbuf, idx))) {
+
+ ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
+ }
+ copied += n;
+ }
+ req->bytes_written = copied;
+
+ ocs_textbuf_free(ocs, &textbuf);
+
+ break;
+ }
+
+ case OCS_IOCTL_CMD_MGMT_GET: {
+ ocs_ioctl_cmd_get_t* req = (ocs_ioctl_cmd_get_t*)addr;
+ ocs_textbuf_t textbuf;
+ char name[OCS_MGMT_MAX_NAME];
+
+ /* Copy the name value in from user space */
+ if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) {
+ ocs_log_test(ocs, "ocs_copy_from_user failed\n");
+ ocs_ioctl_free(ocs, req, sizeof(ocs_ioctl_cmd_get_t));
+ return -EFAULT;
+ }
+
+ /* Build a text buffer */
+ if (ocs_textbuf_alloc(ocs, &textbuf, req->value_length)) {
+ ocs_log_err(ocs, "Error: ocs_textbuf_alloc failed\n");
+ return -EFAULT;
+ }
+
+ ocs_mgmt_get(ocs, name, &textbuf);
+
+ if (ocs_textbuf_get_written(&textbuf)) {
+ if (ocs_copy_to_user(req->value,
+ ocs_textbuf_get_buffer(&textbuf),
+ ocs_textbuf_get_written(&textbuf))) {
+ ocs_log_test(ocs, "Error: (%d) ocs_copy_to_user failed\n", __LINE__);
+
+ }
+ }
+ req->value_length = ocs_textbuf_get_written(&textbuf);
+
+ ocs_textbuf_free(ocs, &textbuf);
+
+ break;
+ }
+
+ case OCS_IOCTL_CMD_MGMT_SET: {
+ char name[OCS_MGMT_MAX_NAME];
+ char value[OCS_MGMT_MAX_VALUE];
+ ocs_ioctl_cmd_set_t* req = (ocs_ioctl_cmd_set_t*)addr;
+
+ // Copy the name in from user space
+ if (ocs_copy_from_user(name, req->name, OCS_MGMT_MAX_NAME)) {
+ ocs_log_test(ocs, "Error: copy from user failed\n");
+ ocs_ioctl_free(ocs, req, sizeof(*req));
+ return -EFAULT;
+ }
+
+ // Copy the value in from user space
+ if (ocs_copy_from_user(value, req->value, OCS_MGMT_MAX_VALUE)) {
+ ocs_log_test(ocs, "Error: copy from user failed\n");
+ ocs_ioctl_free(ocs, req, sizeof(*req));
+ return -EFAULT;
+ }
+
+ req->result = ocs_mgmt_set(ocs, req->name, req->value);
+
+ break;
+ }
+
+ case OCS_IOCTL_CMD_MGMT_EXEC: {
+ ocs_ioctl_action_t* req = (ocs_ioctl_action_t*) addr;
+ char action_name[OCS_MGMT_MAX_NAME];
+
+ if (ocs_copy_from_user(action_name, req->name, sizeof(action_name))) {
+ ocs_log_test(ocs, "Error: copy req.name from user failed\n");
+ ocs_ioctl_free(ocs, req, sizeof(*req));
+ return -EFAULT;
+ }
+
+ req->result = ocs_mgmt_exec(ocs, action_name, req->arg_in, req->arg_in_length,
+ req->arg_out, req->arg_out_length);
+
+ break;
+ }
+
+ default:
+ ocs_log_test(ocs, "Error: unknown cmd %#lx\n", cmd);
+ status = -ENOTTY;
+ break;
+ }
+ return status;
+}
+
+static void
+ocs_fw_write_cb(int32_t status, uint32_t actual_write_length,
+ uint32_t change_status, void *arg)
+{
+ ocs_mgmt_fw_write_result_t *result = arg;
+
+ result->status = status;
+ result->actual_xfer = actual_write_length;
+ result->change_status = change_status;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+int
+ocs_firmware_write(ocs_t *ocs, const uint8_t *buf, size_t buf_len,
+ uint8_t *change_status)
+{
+ int rc = 0;
+ uint32_t bytes_left;
+ uint32_t xfer_size;
+ uint32_t offset;
+ ocs_dma_t dma;
+ int last = 0;
+ ocs_mgmt_fw_write_result_t result;
+
+ ocs_sem_init(&(result.semaphore), 0, "fw_write");
+
+ bytes_left = buf_len;
+ offset = 0;
+
+ if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) {
+ ocs_log_err(ocs, "ocs_firmware_write: malloc failed\n");
+ return -ENOMEM;
+ }
+
+ while (bytes_left > 0) {
+
+ if (bytes_left > FW_WRITE_BUFSIZE) {
+ xfer_size = FW_WRITE_BUFSIZE;
+ } else {
+ xfer_size = bytes_left;
+ }
+
+ ocs_memcpy(dma.virt, buf + offset, xfer_size);
+
+ if (bytes_left == xfer_size) {
+ last = 1;
+ }
+
+ ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset,
+ last, ocs_fw_write_cb, &result);
+
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ rc = -ENXIO;
+ break;
+ }
+
+ if (result.actual_xfer == 0 || result.status != 0) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (last) {
+ *change_status = result.change_status;
+ }
+
+ bytes_left -= result.actual_xfer;
+ offset += result.actual_xfer;
+ }
+
+ ocs_dma_free(ocs, &dma);
+ return rc;
+}
+
+static int
+ocs_sys_fwupgrade(SYSCTL_HANDLER_ARGS)
+{
+ char file_name[256] = {0};
+ char fw_change_status;
+ uint32_t rc = 1;
+ ocs_t *ocs = (ocs_t *)arg1;
+ const struct firmware *fw;
+ const struct ocs_hw_grp_hdr *fw_image;
+
+ rc = sysctl_handle_string(oidp, file_name, sizeof(file_name), req);
+ if (rc || !req->newptr)
+ return rc;
+
+ fw = firmware_get(file_name);
+ if (fw == NULL) {
+ device_printf(ocs->dev, "Unable to get Firmware. "
+ "Make sure %s is copied to /boot/modules\n", file_name);
+ return ENOENT;
+ }
+
+ fw_image = (const struct ocs_hw_grp_hdr *)fw->data;
+
+ /* Check if firmware provided is compatible with this particular
+ * Adapter of not*/
+ if ((ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G5) &&
+ (ocs_be32toh(fw_image->magic_number) != OCS_HW_OBJECT_G6)) {
+ device_printf(ocs->dev,
+ "Invalid FW image found Magic: 0x%x Size: %zu \n",
+ ocs_be32toh(fw_image->magic_number), fw->datasize);
+ rc = -1;
+ goto exit;
+
+ }
+
+ if (!strncmp(ocs->fw_version, fw_image->revision,
+ strnlen(fw_image->revision, 16))) {
+ device_printf(ocs->dev, "No update req. "
+ "Firmware is already up to date. \n");
+ rc = 0;
+ goto exit;
+ }
+
+ device_printf(ocs->dev, "Upgrading Firmware from %s to %s \n",
+ ocs->fw_version, fw_image->revision);
+
+ rc = ocs_firmware_write(ocs, fw->data, fw->datasize, &fw_change_status);
+ if (rc) {
+ ocs_log_err(ocs, "Firmware update failed with status = %d\n", rc);
+ } else {
+ ocs_log_info(ocs, "Firmware updated successfully\n");
+ switch (fw_change_status) {
+ case 0x00:
+ device_printf(ocs->dev,
+ "No reset needed, new firmware is active.\n");
+ break;
+ case 0x01:
+ device_printf(ocs->dev,
+ "A physical device reset (host reboot) is "
+ "needed to activate the new firmware\n");
+ break;
+ case 0x02:
+ case 0x03:
+ device_printf(ocs->dev,
+ "firmware is resetting to activate the new "
+ "firmware, Host reboot is needed \n");
+ break;
+ default:
+ ocs_log_warn(ocs,
+ "Unexected value change_status: %d\n",
+ fw_change_status);
+ break;
+ }
+
+ }
+
+exit:
+ /* Release Firmware*/
+ firmware_put(fw, FIRMWARE_UNLOAD);
+
+ return rc;
+
+}
+
+static int
+ocs_sysctl_wwnn(SYSCTL_HANDLER_ARGS)
+{
+ uint32_t rc = 1;
+ ocs_t *ocs = oidp->oid_arg1;
+ char old[64];
+ char new[64];
+ uint64_t *wwnn = NULL;
+ ocs_xport_t *xport = ocs->xport;
+
+ if (xport->req_wwnn) {
+ wwnn = &xport->req_wwnn;
+ memset(old, 0, sizeof(old));
+ snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) *wwnn);
+
+ } else {
+ wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE);
+
+ memset(old, 0, sizeof(old));
+ snprintf(old, sizeof(old), "0x%llx" , (unsigned long long) ocs_htobe64(*wwnn));
+ }
+
+ /*Read wwnn*/
+ if (!req->newptr) {
+
+ return (sysctl_handle_string(oidp, old, sizeof(old), req));
+ }
+
+ /*Configure port wwn*/
+ rc = sysctl_handle_string(oidp, new, sizeof(new), req);
+ if (rc)
+ return (rc);
+
+ if (strncmp(old, new, strlen(old)) == 0) {
+ return 0;
+ }
+
+ return (set_req_wwnn(ocs, NULL, new));
+}
+
+static int
+ocs_sysctl_wwpn(SYSCTL_HANDLER_ARGS)
+{
+ uint32_t rc = 1;
+ ocs_t *ocs = oidp->oid_arg1;
+ char old[64];
+ char new[64];
+ uint64_t *wwpn = NULL;
+ ocs_xport_t *xport = ocs->xport;
+
+ if (xport->req_wwpn) {
+ wwpn = &xport->req_wwpn;
+ memset(old, 0, sizeof(old));
+ snprintf(old, sizeof(old), "0x%llx",(unsigned long long) *wwpn);
+ } else {
+ wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT);
+ memset(old, 0, sizeof(old));
+ snprintf(old, sizeof(old), "0x%llx",(unsigned long long) ocs_htobe64(*wwpn));
+ }
+
+
+ /*Read wwpn*/
+ if (!req->newptr) {
+ return (sysctl_handle_string(oidp, old, sizeof(old), req));
+ }
+
+ /*Configure port wwn*/
+ rc = sysctl_handle_string(oidp, new, sizeof(new), req);
+ if (rc)
+ return (rc);
+
+ if (strncmp(old, new, strlen(old)) == 0) {
+ return 0;
+ }
+
+ return (set_req_wwpn(ocs, NULL, new));
+}
+
+static int
+ocs_sysctl_current_topology(SYSCTL_HANDLER_ARGS)
+{
+ ocs_t *ocs = oidp->oid_arg1;
+ uint32_t value;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_TOPOLOGY, &value);
+
+ return (sysctl_handle_int(oidp, &value, 0, req));
+}
+
+static int
+ocs_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ ocs_t *ocs = oidp->oid_arg1;
+ uint32_t value;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &value);
+
+ return (sysctl_handle_int(oidp, &value, 0, req));
+}
+
+static int
+ocs_sysctl_config_topology(SYSCTL_HANDLER_ARGS)
+{
+ uint32_t rc = 1;
+ ocs_t *ocs = oidp->oid_arg1;
+ uint32_t old_value;
+ uint32_t new_value;
+ char buf[64];
+
+ ocs_hw_get(&ocs->hw, OCS_HW_CONFIG_TOPOLOGY, &old_value);
+
+ /*Read topo*/
+ if (!req->newptr) {
+ return (sysctl_handle_int(oidp, &old_value, 0, req));
+ }
+
+ /*Configure port wwn*/
+ rc = sysctl_handle_int(oidp, &new_value, 0, req);
+ if (rc)
+ return (rc);
+
+ if (new_value == old_value) {
+ return 0;
+ }
+
+ snprintf(buf, sizeof(buf), "%d",new_value);
+ rc = set_configured_topology(ocs, NULL, buf);
+ return rc;
+}
+
+static int
+ocs_sysctl_config_speed(SYSCTL_HANDLER_ARGS)
+{
+ uint32_t rc = 1;
+ ocs_t *ocs = oidp->oid_arg1;
+ uint32_t old_value;
+ uint32_t new_value;
+ char buf[64];
+
+ ocs_hw_get(&ocs->hw, OCS_HW_LINK_CONFIG_SPEED, &old_value);
+
+ /*Read topo*/
+ if (!req->newptr) {
+ return (sysctl_handle_int(oidp, &old_value, 0, req));
+ }
+
+ /*Configure port wwn*/
+ rc = sysctl_handle_int(oidp, &new_value, 0, req);
+ if (rc)
+ return (rc);
+
+ if (new_value == old_value) {
+ return 0;
+ }
+
+ snprintf(buf, sizeof(buf), "%d",new_value);
+ rc = set_configured_speed(ocs, NULL,buf);
+ return rc;
+}
+
+static int
+ocs_sysctl_fcid(SYSCTL_HANDLER_ARGS)
+{
+ ocs_t *ocs = oidp->oid_arg1;
+ char buf[64];
+
+ memset(buf, 0, sizeof(buf));
+ if (ocs->domain && ocs->domain->attached) {
+ snprintf(buf, sizeof(buf), "0x%06x",
+ ocs->domain->sport->fc_id);
+ }
+
+ return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
+}
+
+
+static int
+ocs_sysctl_port_state(SYSCTL_HANDLER_ARGS)
+{
+
+ char new[256] = {0};
+ uint32_t rc = 1;
+ ocs_xport_stats_t old;
+ ocs_t *ocs = (ocs_t *)arg1;
+
+ ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &old);
+
+ /*Read port state */
+ if (!req->newptr) {
+ snprintf(new, sizeof(new), "%s",
+ (old.value == OCS_XPORT_PORT_OFFLINE) ?
+ "offline" : "online");
+ return (sysctl_handle_string(oidp, new, sizeof(new), req));
+ }
+
+ /*Configure port state*/
+ rc = sysctl_handle_string(oidp, new, sizeof(new), req);
+ if (rc)
+ return (rc);
+
+ if (ocs_strcasecmp(new, "offline") == 0) {
+ if (old.value == OCS_XPORT_PORT_OFFLINE) {
+ return (0);
+ }
+ ocs_log_debug(ocs, "Setting port to %s\n", new);
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
+ if (rc != 0) {
+ ocs_log_err(ocs, "Setting port to offline failed\n");
+ }
+ } else if (ocs_strcasecmp(new, "online") == 0) {
+ if (old.value == OCS_XPORT_PORT_ONLINE) {
+ return (0);
+ }
+ ocs_log_debug(ocs, "Setting port to %s\n", new);
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+ if (rc != 0) {
+ ocs_log_err(ocs, "Setting port to online failed\n");
+ }
+ } else {
+ ocs_log_err(ocs, "Unsupported link state %s\n", new);
+ rc = 1;
+ }
+
+ return (rc);
+
+}
+
+static int
+ocs_sysctl_vport_wwpn(SYSCTL_HANDLER_ARGS)
+{
+ ocs_fcport *fcp = oidp->oid_arg1;
+ char str_wwpn[64];
+
+ memset(str_wwpn, 0, sizeof(str_wwpn));
+ snprintf(str_wwpn, sizeof(str_wwpn), "0x%llx", (unsigned long long)fcp->vport->wwpn);
+
+ return (sysctl_handle_string(oidp, str_wwpn, sizeof(str_wwpn), req));
+}
+
+static int
+ocs_sysctl_vport_wwnn(SYSCTL_HANDLER_ARGS)
+{
+ ocs_fcport *fcp = oidp->oid_arg1;
+ char str_wwnn[64];
+
+ memset(str_wwnn, 0, sizeof(str_wwnn));
+ snprintf(str_wwnn, sizeof(str_wwnn), "0x%llx", (unsigned long long)fcp->vport->wwnn);
+
+ return (sysctl_handle_string(oidp, str_wwnn, sizeof(str_wwnn), req));
+}
+
+/**
+ * @brief Initialize sysctl
+ *
+ * Initialize sysctl so elxsdkutil can query device information.
+ *
+ * @param ocs pointer to ocs
+ * @return void
+ */
+static void
+ocs_sysctl_init(ocs_t *ocs)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(ocs->dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(ocs->dev);
+ struct sysctl_oid *vtree;
+ const char *str = NULL;
+ char sli_intf[16], name[16];
+ uint32_t rev, if_type, family, i;
+ ocs_fcport *fcp = NULL;
+
+ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "devid", CTLFLAG_RD, NULL,
+ pci_get_devid(ocs->dev), "Device ID");
+
+ memset(ocs->modeldesc, 0, sizeof(ocs->modeldesc));
+ if (0 == pci_get_vpd_ident(ocs->dev, &str)) {
+ snprintf(ocs->modeldesc, sizeof(ocs->modeldesc), "%s", str);
+ }
+ SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "modeldesc", CTLFLAG_RD,
+ ocs->modeldesc,
+ 0, "Model Description");
+
+ memset(ocs->serialnum, 0, sizeof(ocs->serialnum));
+ if (0 == pci_get_vpd_readonly(ocs->dev, "SN", &str)) {
+ snprintf(ocs->serialnum, sizeof(ocs->serialnum), "%s", str);
+ }
+ SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "sn", CTLFLAG_RD,
+ ocs->serialnum,
+ 0, "Serial Number");
+
+ ocs_hw_get(&ocs->hw, OCS_HW_SLI_REV, &rev);
+ ocs_hw_get(&ocs->hw, OCS_HW_IF_TYPE, &if_type);
+ ocs_hw_get(&ocs->hw, OCS_HW_SLI_FAMILY, &family);
+
+ memset(ocs->fwrev, 0, sizeof(ocs->fwrev));
+ snprintf(ocs->fwrev, sizeof(ocs->fwrev), "%s, sli-%d:%d:%x",
+ (char *)ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV),
+ rev, if_type, family);
+ SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "fwrev", CTLFLAG_RD,
+ ocs->fwrev,
+ 0, "Firmware Revision");
+
+ memset(ocs->sli_intf, 0, sizeof(ocs->sli_intf));
+ snprintf(ocs->sli_intf, sizeof(sli_intf), "%08x",
+ ocs_config_read32(ocs, SLI4_INTF_REG));
+ SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "sli_intf", CTLFLAG_RD,
+ ocs->sli_intf,
+ 0, "SLI Interface");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fw_upgrade",
+ CTLTYPE_STRING | CTLFLAG_RW, (void *)ocs, 0,
+ ocs_sys_fwupgrade, "A", "Firmware grp file");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "wwnn", CTLTYPE_STRING | CTLFLAG_RW,
+ ocs, 0, ocs_sysctl_wwnn, "A",
+ "World Wide Node Name, wwnn should be in the format 0x<XXXXXXXXXXXXXXXX>");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "wwpn", CTLTYPE_STRING | CTLFLAG_RW,
+ ocs, 0, ocs_sysctl_wwpn, "A",
+ "World Wide Port Name, wwpn should be in the format 0x<XXXXXXXXXXXXXXXX>");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "current_topology", CTLTYPE_UINT | CTLFLAG_RD,
+ ocs, 0, ocs_sysctl_current_topology, "IU",
+ "Current Topology, 1-NPort; 2-Loop; 3-None");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "current_speed", CTLTYPE_UINT | CTLFLAG_RD,
+ ocs, 0, ocs_sysctl_current_speed, "IU",
+ "Current Speed");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "configured_topology", CTLTYPE_UINT | CTLFLAG_RW,
+ ocs, 0, ocs_sysctl_config_topology, "IU",
+ "Configured Topology, 0-Auto; 1-NPort; 2-Loop");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "configured_speed", CTLTYPE_UINT | CTLFLAG_RW,
+ ocs, 0, ocs_sysctl_config_speed, "IU",
+ "Configured Speed, 0-Auto, 2000, 4000, 8000, 16000, 32000");
+
+ SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "businfo", CTLFLAG_RD,
+ ocs->businfo,
+ 0, "Bus Info");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "fcid", CTLTYPE_STRING | CTLFLAG_RD,
+ ocs, 0, ocs_sysctl_fcid, "A",
+ "Port FC ID");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "port_state", CTLTYPE_STRING | CTLFLAG_RW,
+ ocs, 0, ocs_sysctl_port_state, "A",
+ "configured port state");
+
+ for (i = 0; i < ocs->num_vports; i++) {
+ fcp = FCPORT(ocs, i+1);
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, sizeof(name), "vport%d", i);
+ vtree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
+ OID_AUTO, name, CTLFLAG_RW, 0, "Virtual port");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO,
+ "wwnn", CTLTYPE_STRING | CTLFLAG_RW,
+ fcp, 0, ocs_sysctl_vport_wwnn, "A",
+ "World Wide Node Name");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(vtree), OID_AUTO,
+ "wwpn", CTLTYPE_STRING | CTLFLAG_RW,
+ fcp, 0, ocs_sysctl_vport_wwpn, "A",
+ "World Wide Port Name");
+
+ }
+
+}
+
+/**
+ * @brief Initialize the debug module
+ *
+ * Parse device hints (similar to Linux module parameters) here. To use,
+ * run the command
+ * kenv hint.ocs.U.P=V
+ * from the command line replacing U with the unit # (0,1,...),
+ * P with the parameter name (debug_mask), and V with the value
+ */
+void
+ocs_debug_attach(void *os)
+{
+ struct ocs_softc *ocs = os;
+ int error = 0;
+ char *resname = NULL;
+ int32_t unit = INT32_MAX;
+ uint32_t ocs_debug_mask = 0;
+
+ resname = "debug_mask";
+ if (0 == (error = resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ resname, &ocs_debug_mask))) {
+ device_printf(ocs->dev, "setting %s to %010x\n", resname, ocs_debug_mask);
+ ocs_debug_enable(ocs_debug_mask);
+ }
+
+ unit = device_get_unit(ocs->dev);
+ ocs->cdev = make_dev(&ocs_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
+ "ocs%d", unit);
+ if (ocs->cdev) {
+ ocs->cdev->si_drv1 = ocs;
+ }
+
+ /* initialize sysctl interface */
+ ocs_sysctl_init(ocs);
+ mtx_init(&ocs->dbg_lock, "ocs_dbg_lock", NULL, MTX_DEF);
+}
+
+/**
+ * @brief Free the debug module
+ */
+void
+ocs_debug_detach(void *os)
+{
+ struct ocs_softc *ocs = os;
+
+ mtx_destroy(&ocs->dbg_lock);
+
+ if (ocs->cdev) {
+ ocs->cdev->si_drv1 = NULL;
+ destroy_dev(ocs->cdev);
+ }
+}
+
diff --git a/sys/dev/ocs_fc/ocs_ioctl.h b/sys/dev/ocs_fc/ocs_ioctl.h
new file mode 100644
index 000000000000..6ae5d36aeb3a
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_ioctl.h
@@ -0,0 +1,368 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Common declartaions for the driver's IOCTL interface
+ */
+
+#if !defined(__OCS_IOCTL_H__)
+#define __OCS_IOCTL_H__
+
+/**
+ * @brief OCS test ioctl
+ *
+ * Simple structure for testing the IOCTL interface
+ */
+
+typedef struct {
+ char string[32]; /**< fixed string buffer */
+} ocs_ioctl_test_t;
+
+/**
+ * @brief DRIVER_INFO ioctl structure
+ *
+ * Structure is returned whtn the OCS_IOCTL_CMD_DRIVER_INFO is issued by a user space program.
+ */
+
+typedef struct {
+ uint16_t pci_vendor; /**< PCI vender ID value (binary) */
+ uint16_t pci_device; /**< PCI device ID value (binary) */
+ char businfo[16]; /**< Bus information (text) */
+ uint32_t sli_intf; /**< SLI_INTF register value (binary) */
+ char desc[64]; /**< description (text) */
+ char fw_rev[32]; /**< firmware revision (text) */
+ union {
+ struct {
+ uint8_t wwnn[8]; /**< WWNN (binary) */
+ uint8_t wwpn[8]; /**< WWPN (binary) */
+ } fc;
+ struct {
+ uint8_t mac_addr[6]; /**< MAC address (binary) */
+ uint8_t reserved[10];
+ } iscsi;
+ } hw_addr;
+ char serialnum[32]; /**< board serial number (text) */
+} ocs_ioctl_driver_info_t;
+
+#define ELXU_BSD_MAGIC 0x30584c45
+
+/**
+ * @brief IOCTL_CMD_IOCTL_ELXU_MBOX ioctl structure
+ *
+ * Structure used to submit elxsdkutil mailbox requests for firmware download and
+ * dump file retrieveal.
+ */
+
+typedef struct {
+ uint32_t magic; /**< magic number */
+ uint32_t size; /**< size of MBX command */
+ uint8_t payload[256]; /**< MBX command in/out payload buffer */
+ uint64_t in_addr; /**< user space address of input buffer */
+ uint64_t in_bytes; /**< length of user space input buffer in bytes */
+ uint64_t out_addr; /**< user space address of output buffer */
+ uint64_t out_bytes; /**< length of user space output buffer in bytes */
+} ocs_ioctl_elxu_mbox_t;
+
+enum {
+ ocs_ioctl_scsi_cmd_loop, /**< Start command loop */
+ ocs_ioctl_scsi_cmd_loop_wait, /**< Start command loop and wait for completion */
+ ocs_ioctl_scsi_cmd_stop, /**< Stop command loop */
+ ocs_ioctl_scsi_cmd, /**< Start one command */
+ ocs_ioctl_scsi_cmd_wait, /**< Wait for a command to complete */
+ ocs_ioctl_scsi_cmd_abort, /**< Start an abort */
+ ocs_ioctl_scsi_cmd_tmf, /**< Start a tmf */
+ ocs_ioctl_els_send, /**< Start an ELS */
+ ocs_ioctl_tgt_logout, /**< logout of a target */
+ ocs_ioctl_scsi_cmd_wait_any, /**< Wait for any command to complete */
+};
+
+enum {
+ ocs_ioctl_scsi_cmd_rd = (1U << 0), /**< direction is read */
+ ocs_ioctl_scsi_cmd_wr = (1U << 1), /**< direction is write */
+};
+
+/**
+ * @brief OCS_IOCTL_CMD_SCSI_CMD ioctl command structure
+ */
+
+typedef enum {
+ DIF_OP_DISABLE = 0,
+ DIF_OP_IN_NODIF_OUT_CRC,
+ DIF_OP_IN_CRC_OUT_NODIF,
+ DIF_OP_IN_NODIF_OUT_CHKSUM,
+ DIF_OP_IN_CHKSUM_OUT_NODIF,
+ DIF_OP_IN_CRC_OUT_CRC,
+ DIF_OP_IN_CHKSUM_OUT_CHKSUM,
+ DIF_OP_IN_CRC_OUT_CHKSUM,
+ DIF_OP_IN_CHKSUM_OUT_CRC,
+ DIF_OP_IN_RAW_OUT_RAW,
+ } dif_op_t;
+
+#define DIF_OP_PASS_THRU DIF_OP_IN_CRC_OUT_CRC
+#define DIF_OP_STRIP DIF_OP_IN_CRC_OUT_NODIF
+#define DIF_OP_INSERT DIF_OP_IN_NODIF_OUT_CRC
+
+typedef struct {
+ dif_op_t dif_op;
+ uint32_t
+ check_ref_tag:1, /* check reference tag on initiator writes */
+ check_app_tag:1, /* check application tag on initiator writes */
+ check_guard:1, /* check CRC on initiator writes */
+ dif_separate:1; /* use DIF separate transfers */
+ uint32_t ref_tag; /* DIF reference tag */
+ uint16_t app_tag; /* DIF application tag */
+ uint32_t blocksize; /* DIF blocksize */
+} dif_info_t;
+
+typedef struct {
+ int command; /**< SCSI command request command */
+ uint32_t target_idx; /**< Target device index */
+ uint32_t dir; /**< rd or wr */
+ uint32_t lun; /**< lun value */
+ int32_t tmf; /**< TMF */
+ uint8_t cdb[32]; /**< SCSI CDB */
+ uint32_t cdb_len; /**< SCSI CDB length in bytes */
+ uint32_t els_cmd; /**< ELS command */
+ uint32_t flags; /**< command flags */
+ uint32_t queue_depth; /**< queue depth for command looping */
+ uint32_t payload_length; /**< payload length for command */
+ uint32_t dif_payload_length; /**< DIF payload length for command if separate */
+ uint32_t io_count; /**< command count for looping */
+ uint32_t io_timeout; /**< IO timeout in seconds (0 = no timeout) */
+
+ uint32_t directio; /**< If set, DMA to and from user buffers */
+
+ uint32_t first_burst:1; /**< If true send IO writes with first burst */
+ uint32_t first_burst_size; /**< If first burst is enabled, then this size */
+
+ int32_t wait_timeout_usec; /**< Wait timeout (usec) for wait, wait_any */
+
+ /* T10-PI */
+ dif_info_t dif; /* DIF info */
+
+ /* user space buffers */
+ void *payload; /**< pointer to user space payload buffer */
+ void *dif_payload; /**< pointer to DIF block data if separate */
+ uint8_t scsi_status; /**< SCSI status */
+ uint16_t scsi_status_qualifier; /**< SCSI status qualifier */
+ void *sense_data; /**< pointer to sense data buffer */
+ uint32_t sense_data_length; /**< length of sense data buffer (call=buffer leng, return=data written) */
+ int32_t residual; /**< residual */
+ uint32_t tag_to_abort; /**< tag to abort for an abort task request */
+
+ /* return value */
+ int32_t status; /**< returned status */
+ uint32_t data_transferred; /**< updated with data transferred */
+ uint32_t tag; /**< returned unique I/O context tag */
+
+ /* for scsi loop */
+ uint32_t submit_count; /**< count of submitted IOs */
+ uint32_t complete_count; /**< count of completed IOs */
+} ocs_ioctl_scsi_cmd_t;
+
+/**
+ * @brief coredump helper function command values
+ */
+
+typedef enum {
+ OCS_ECD_HELPER_CFG_READ8,
+ OCS_ECD_HELPER_CFG_READ16,
+ OCS_ECD_HELPER_CFG_READ32,
+ OCS_ECD_HELPER_CFG_WRITE8,
+ OCS_ECD_HELPER_CFG_WRITE16,
+ OCS_ECD_HELPER_CFG_WRITE32,
+ OCS_ECD_HELPER_BAR_READ8,
+ OCS_ECD_HELPER_BAR_READ16,
+ OCS_ECD_HELPER_BAR_READ32,
+ OCS_ECD_HELPER_BAR_WRITE8,
+ OCS_ECD_HELPER_BAR_WRITE16,
+ OCS_ECD_HELPER_BAR_WRITE32,
+} ocs_ecd_helper_cmd_t;
+
+/**
+ * @brief OCS_IOCTL_CMD_ECD_HELPER ioctl structure
+ */
+
+typedef struct {
+ ocs_ecd_helper_cmd_t cmd; /*<< coredump helper function command */
+ uint32_t bar; /*<< BAR value to use */
+ uint32_t offset; /*<< offset value to use */
+ uint32_t data; /*<< 32 bit data value to write or return read data in */
+ int status; /*<< status of helper function request */
+} ocs_ioctl_ecd_helper_t;
+
+/**
+ * @brief OCS_IOCTL_CMD_VPORT ioctl structure
+ */
+
+typedef struct {
+ uint32_t domain_index; /*<< domain instance index */
+ uint32_t req_create:1, /*<< 1 = create vport, zero = remove vport */
+ enable_ini:1, /*<< 1 = enable vport as an initiator */
+ enable_tgt:1; /*<< 1 = enable vport as a target */
+ uint64_t wwpn; /*<< wwpn to create or delete */
+ uint64_t wwnn; /*<< wwnn to create or delete */
+ int status; /*<< status of helper function request */
+} ocs_ioctl_vport_t;
+
+/**
+ * @brief connection info ioctl structure
+ *
+ * Structure is returned when the OCS_IOCTL_CMD_CONNECTION_INFO is issued by a user space program.
+ */
+typedef struct {
+ uint32_t connection_handle;
+ uint16_t connection_id;
+ uint8_t source_ip_type;
+ uint8_t source_ip[16];
+ uint16_t source_port;
+ uint8_t dest_ip_type;
+ uint8_t dest_ip[16];
+ uint16_t dest_port;
+} ocs_ioctl_connection_info_t;
+
+typedef struct {
+ uint32_t max_connections;
+ uint32_t num_connections;
+ ocs_ioctl_connection_info_t *connections;
+} ocs_ioctl_connections_t;
+
+/**
+ * @brief driver-dump actions
+ */
+
+typedef enum {
+ OCS_IOCTL_DDUMP_GET,
+ OCS_IOCTL_DDUMP_GET_SAVED,
+ OCS_IOCTL_DDUMP_CLR_SAVED,
+} ocs_ddump_action_t;
+
+#define OCS_IOCTL_DDUMP_FLAGS_WQES (1U << 0)
+#define OCS_IOCTL_DDUMP_FLAGS_CQES (1U << 1)
+#define OCS_IOCTL_DDUMP_FLAGS_MQES (1U << 2)
+#define OCS_IOCTL_DDUMP_FLAGS_RQES (1U << 3)
+#define OCS_IOCTL_DDUMP_FLAGS_EQES (1U << 4)
+
+typedef struct {
+ ocs_ddump_action_t action;
+ uint32_t flags;
+ uint32_t q_entries;
+} ocs_ioctl_ddump_arg_t;
+
+/**
+ * @brief OCS_CTL_CMD_GET_DDUMP ioctl structure
+ */
+
+typedef struct {
+ ocs_ioctl_ddump_arg_t args; /*<< arguments for ddump */
+ uint8_t *user_buffer; /*<< pointer to user space buffer */
+ uint32_t user_buffer_len; /*<< length in bytes of user space buffer */
+ uint32_t bytes_written; /*<< number of bytes written */
+} ocs_ioctl_ddump_t;
+
+/**
+ * @brief OCS_CTL_CMD_GET_STATUS, OCS_CTL_CMD_GET_CONFIG
+ */
+
+typedef struct {
+ uint8_t *user_buffer; /*<< pointer to user space buffer */
+ uint32_t user_buffer_len; /*<< length in bytes of user space buffer */
+ uint32_t bytes_written; /*<< number of bytes written */
+} ocs_ioctl_mgmt_buffer_t;
+
+typedef struct {
+ uint8_t *name; /*<< Input: name of property to retrieve */
+ uint8_t *value; /*<< Output: user space buffer in which to place the response */
+ uint32_t value_length; /*<< Input: size of the user space buffer */
+} ocs_ioctl_cmd_get_t;
+
+typedef struct {
+ uint8_t *name; /*<< Input: name of property to set */
+ uint8_t *value; /*<< Input: user space buffer which contains the new value */
+ int32_t result; /*<< Output: result */
+} ocs_ioctl_cmd_set_t;
+
+typedef struct {
+ uint8_t *name; /*<< Input: name of action to execute */
+ void *arg_in; /*<< Input: pointer to argument in user space */
+ uint32_t arg_in_length; /*<< Input: size of arg_in in bytes */
+ void *arg_out; /*<< Output: pointer to argument from kernel to user */
+ uint32_t arg_out_length; /*<< Input: size of arg_out in bytes */
+ int32_t result; /*<< Output: result */
+} ocs_ioctl_action_t;
+
+#define FC_HEADER_LEN 24
+typedef struct {
+ uint8_t fc_header[FC_HEADER_LEN]; /*<< FC Header to send */
+ uint8_t *payload; /*<< payload */
+ uint32_t payload_len; /*<< payload length (bytes) */
+ uint8_t sof; /*<< SOF value */
+ uint8_t eof; /*<< EOF Value */
+} ocs_ioctl_send_frame_t;
+
+/**
+ * @brief linkcfg strings
+ */
+#define OCS_CONFIG_LINKCFG_4X10G "ETH_4x10G"
+#define OCS_CONFIG_LINKCFG_1X40G "ETH_1x40G"
+#define OCS_CONFIG_LINKCFG_2X16G "FC_2x16G"
+#define OCS_CONFIG_LINKCFG_4X8G "FC_4x8G"
+#define OCS_CONFIG_LINKCFG_4X1G "FC_4x1G"
+#define OCS_CONFIG_LINKCFG_2X10G "ETH_2x10G"
+#define OCS_CONFIG_LINKCFG_2X10G_2X8G "ETH_2x10G_FC_2x8G"
+#define OCS_CONFIG_LINKCFG_UNKNOWN "UNKNOWN"
+
+#define OCS_IOCTL_CMD_BASE 'o'
+#define OCS_IOCTL_CMD_TEST _IOWR(OCS_IOCTL_CMD_BASE, 1, ocs_ioctl_test_t)
+#define OCS_IOCTL_CMD_ELXU_MBOX _IOWR(OCS_IOCTL_CMD_BASE, 2, ocs_ioctl_elxu_mbox_t)
+#define OCS_IOCTL_CMD_SCSI_CMD _IOWR(OCS_IOCTL_CMD_BASE, 3, ocs_ioctl_scsi_cmd_t)
+#define OCS_IOCTL_CMD_DRIVER_INFO _IOWR(OCS_IOCTL_CMD_BASE, 4, ocs_ioctl_driver_info_t)
+#define OCS_IOCTL_CMD_ECD_HELPER _IOWR(OCS_IOCTL_CMD_BASE, 5, ocs_ioctl_ecd_helper_t)
+#define OCS_IOCTL_CMD_CONNECTION_INFO _IOWR(OCS_IOCTL_CMD_BASE, 6, ocs_ioctl_connection_info_t)
+#define OCS_IOCTL_CMD_VPORT _IOWR(OCS_IOCTL_CMD_BASE, 7, ocs_ioctl_vport_t)
+#define OCS_IOCTL_CMD_GET_DDUMP _IOWR(OCS_IOCTL_CMD_BASE, 8, ocs_ioctl_ddump_t)
+#define OCS_IOCTL_CMD_MGMT_GET _IOWR(OCS_IOCTL_CMD_BASE, 9, ocs_ioctl_cmd_get_t)
+#define OCS_IOCTL_CMD_MGMT_GET_ALL _IOWR(OCS_IOCTL_CMD_BASE, 10, ocs_ioctl_mgmt_buffer_t)
+#define OCS_IOCTL_CMD_MGMT_SET _IOWR(OCS_IOCTL_CMD_BASE, 11, ocs_ioctl_cmd_set_t)
+#define OCS_IOCTL_CMD_MGMT_LIST _IOWR(OCS_IOCTL_CMD_BASE, 12, ocs_ioctl_mgmt_buffer_t)
+#define OCS_IOCTL_CMD_MGMT_EXEC _IOWR(OCS_IOCTL_CMD_BASE, 13, ocs_ioctl_action_t)
+#define OCS_IOCTL_CMD_LINK_ONLINE _IOWR(OCS_IOCTL_CMD_BASE, 16, int)
+#define OCS_IOCTL_CMD_GEN_DUMP _IOWR(OCS_IOCTL_CMD_BASE, 17, int)
+#define OCS_IOCTL_CMD_UNLOAD _IO(OCS_IOCTL_CMD_BASE, 18)
+#define OCS_IOCTL_CMD_SEND_FRAME _IOWR(OCS_IOCTL_CMD_BASE, 19, ocs_ioctl_send_frame_t)
+
+
+extern void ocs_info_get_xport_address(ocs_t *ocs, ocs_ioctl_driver_info_t *info);
+extern int32_t ocs_device_ioctl_xport(ocs_t *ocs, unsigned int cmd, unsigned long arg);
+#endif
diff --git a/sys/dev/ocs_fc/ocs_list.h b/sys/dev/ocs_fc/ocs_list.h
new file mode 100644
index 000000000000..f94660fd2d74
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_list.h
@@ -0,0 +1,449 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ * OCS linked list API
+ *
+ */
+
+#if !defined(__OCS_LIST_H__)
+#define __OCS_LIST_H__
+
+#define OCS_LIST_DEBUG
+
+#if defined(OCS_LIST_DEBUG)
+
+
+#define ocs_list_magic_decl uint32_t magic;
+#define OCS_LIST_LIST_MAGIC 0xcafe0000
+#define OCS_LIST_LINK_MAGIC 0xcafe0001
+#define ocs_list_set_list_magic list->magic = OCS_LIST_LIST_MAGIC
+#define ocs_list_set_link_magic list->magic = OCS_LIST_LINK_MAGIC
+
+#define ocs_list_assert(cond, ...) \
+ if (!(cond)) { \
+ return __VA_ARGS__; \
+ }
+#else
+#define ocs_list_magic_decl
+#define ocs_list_assert(cond, ...)
+#define ocs_list_set_list_magic
+#define ocs_list_set_link_magic
+#endif
+
+/**
+ * @brief list/link structure
+ *
+ * used for both the list object, and the link object(s). offset
+ * is specified when the list is initialized; this implies that a list
+ * will always point to objects of the same type. offset is not used
+ * when ocs_list_t is used as a link (ocs_list_link_t).
+ *
+ */
+
+typedef struct ocs_list_s ocs_list_t;
+struct ocs_list_s {
+ ocs_list_magic_decl /*<< used if debugging is enabled */
+ ocs_list_t *next; /*<< pointer to head of list (or next if link) */
+ ocs_list_t *prev; /*<< pointer to tail of list (or previous if link) */
+ uint32_t offset; /*<< offset in bytes to the link element of the objects in list */
+};
+typedef ocs_list_t ocs_list_link_t;
+
+/* item2link - return pointer to link given pointer to an item */
+#define item2link(list, item) ((ocs_list_t*) (((uint8_t*)(item)) + (list)->offset))
+
+/* link2item - return pointer to item given pointer to a link */
+#define link2item(list, link) ((void*) (((uint8_t*)(link)) - (list)->offset))
+
+/**
+ * @brief Initialize a list
+ *
+ * A list object is initialized. Helper define is used to call _ocs_list_init() with
+ * offsetof(type, link)
+ *
+ * @param list Pointer to list
+ * @param offset Offset in bytes in item to the link element
+ *
+ * @return none
+ */
+static inline void
+_ocs_list_init(ocs_list_t *list, uint32_t offset)
+{
+ ocs_list_assert(list);
+ ocs_list_set_list_magic;
+
+ list->next = list;
+ list->prev = list;
+ list->offset = offset;
+}
+#define ocs_list_init(head, type, link) _ocs_list_init(head, offsetof(type, link))
+
+
+/**
+ * @ingroup os
+ * @brief Test if a list is empty
+ *
+ * @param list Pointer to list head
+ *
+ * @return 1 if empty, 0 otherwise
+ */
+static inline int32_t
+ocs_list_empty(ocs_list_t *list)
+{
+ ocs_list_assert(list, 1);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC, 1);
+ return list->next == list;
+}
+
+/**
+ * @ingroup os
+ * @brief Test if a list is valid (ready for use)
+ *
+ * @param list Pointer to list head
+ *
+ * @return true if list is usable, false otherwise
+ */
+static inline int
+ocs_list_valid(ocs_list_t *list)
+{
+ return (list->magic == OCS_LIST_LIST_MAGIC);
+}
+
+/**
+ * @brief Insert link between two other links
+ *
+ * Inserts a link in between two other links
+ *
+ * @param a Pointer to first link
+ * @param b Pointer to next link
+ * @param c Pointer to link to insert between a and b
+ *
+ * @return none
+ */
+static inline void
+_ocs_list_insert_link(ocs_list_t *a, ocs_list_t *b, ocs_list_t *c)
+{
+ ocs_list_assert(a);
+ ocs_list_assert((a->magic == OCS_LIST_LIST_MAGIC) || (a->magic == OCS_LIST_LINK_MAGIC));
+ ocs_list_assert(a->next);
+ ocs_list_assert(a->prev);
+ ocs_list_assert(b);
+ ocs_list_assert((b->magic == OCS_LIST_LIST_MAGIC) || (b->magic == OCS_LIST_LINK_MAGIC));
+ ocs_list_assert(b->next);
+ ocs_list_assert(b->prev);
+ ocs_list_assert(c);
+ ocs_list_assert((c->magic == OCS_LIST_LIST_MAGIC) || (c->magic == OCS_LIST_LINK_MAGIC));
+ ocs_list_assert(!c->next);
+ ocs_list_assert(!c->prev);
+
+ ocs_list_assert(a->offset == b->offset);
+ ocs_list_assert(b->offset == c->offset);
+
+ c->next = a->next;
+ c->prev = b->prev;
+ a->next = c;
+ b->prev = c;
+}
+
+#if defined(OCS_LIST_DEBUG)
+/**
+ * @brief Initialize a list link for debug purposes
+ *
+ * For debugging a linked list link element has a magic number that is initialized,
+ * and the offset value initialzied and used for subsequent assertions.
+ *
+ *
+ * @param list Pointer to list head
+ * @param link Pointer to link to be initialized
+ *
+ * @return none
+ */
+static inline void
+ocs_list_init_link(ocs_list_t *list, ocs_list_t *link)
+{
+ ocs_list_assert(list);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC);
+ ocs_list_assert(link);
+
+ if (link->magic == 0) {
+ link->magic = OCS_LIST_LINK_MAGIC;
+ link->offset = list->offset;
+ link->next = NULL;
+ link->prev = NULL;
+ }
+}
+#else
+#define ocs_list_init_link(...)
+#endif
+
+/**
+ * @ingroup os
+ * @brief Add an item to the head of the list
+ *
+ * @param list Pointer to list head
+ * @param item Item to add
+ */
+static inline void
+ocs_list_add_head(ocs_list_t *list, void *item)
+{
+ ocs_list_t *link;
+
+ ocs_list_assert(list);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC);
+ ocs_list_assert(item);
+
+ link = item2link(list, item);
+ ocs_list_init_link(list, link);
+
+ ocs_list_assert(link->magic == OCS_LIST_LINK_MAGIC);
+ ocs_list_assert(link->offset == list->offset);
+ ocs_list_assert(link->next == NULL);
+ ocs_list_assert(link->prev == NULL);
+
+ _ocs_list_insert_link(list, list->next, item2link(list, item));
+}
+
+
+/**
+ * @ingroup os
+ * @brief Add an item to the tail of the list
+ *
+ * @param list Head of the list
+ * @param item Item to add
+ */
+static inline void
+ocs_list_add_tail(ocs_list_t *list, void *item)
+{
+ ocs_list_t *link;
+
+ ocs_list_assert(list);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC);
+ ocs_list_assert(item);
+
+ link = item2link(list, item);
+ ocs_list_init_link(list, link);
+
+ ocs_list_assert(link->magic == OCS_LIST_LINK_MAGIC);
+ ocs_list_assert(link->offset == list->offset);
+ ocs_list_assert(link->next == NULL);
+ ocs_list_assert(link->prev == NULL);
+
+ _ocs_list_insert_link(list->prev, list, link);
+}
+
+
+/**
+ * @ingroup os
+ * @brief Return the first item in the list
+ *
+ * @param list Head of the list
+ *
+ * @return pointer to the first item, NULL otherwise
+ */
+static inline void *
+ocs_list_get_head(ocs_list_t *list)
+{
+ ocs_list_assert(list, NULL);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC, NULL);
+ return ocs_list_empty(list) ? NULL : link2item(list, list->next);
+}
+
+/**
+ * @ingroup os
+ * @brief Return the first item in the list
+ *
+ * @param list head of the list
+ *
+ * @return pointer to the last item, NULL otherwise
+ */
+static inline void *
+ocs_list_get_tail(ocs_list_t *list)
+{
+ ocs_list_assert(list, NULL);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC, NULL);
+ return ocs_list_empty(list) ? NULL : link2item(list, list->prev);
+}
+
+/**
+ * @ingroup os
+ * @brief Return the last item in the list
+ *
+ * @param list Pointer to list head
+ *
+ * @return pointer to the last item, NULL otherwise
+ */
+static inline void *ocs_list_tail(ocs_list_t *list)
+{
+ ocs_list_assert(list, NULL);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC, NULL);
+ return ocs_list_empty(list) ? NULL : link2item(list, list->prev);
+}
+
+/**
+ * @ingroup os
+ * @brief Get the next item on the list
+ *
+ * @param list head of the list
+ * @param item current item
+ *
+ * @return pointer to the next item, NULL otherwise
+ */
+static inline void *ocs_list_next(ocs_list_t *list, void *item)
+{
+ ocs_list_t *link;
+
+ if (item == NULL) {
+ return NULL;
+ }
+
+ ocs_list_assert(list, NULL);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC, NULL);
+ ocs_list_assert(item, NULL);
+
+ link = item2link(list, item);
+
+ ocs_list_assert(link->magic == OCS_LIST_LINK_MAGIC, NULL);
+ ocs_list_assert(link->offset == list->offset, NULL);
+ ocs_list_assert(link->next, NULL);
+ ocs_list_assert(link->prev, NULL);
+
+ if ((link->next) == list) {
+ return NULL;
+ }
+
+ return link2item(list, link->next);
+}
+
+/**
+ * @ingroup os
+ * @brief Remove and return an item from the head of the list
+ *
+ * @param list head of the list
+ *
+ * @return pointer to returned item, or NULL if list is empty
+ */
+#define ocs_list_remove_head(list) ocs_list_remove(list, ocs_list_get_head(list))
+
+/**
+ * @ingroup os
+ * @brief Remove an item from the list
+ *
+ * @param list Head of the list
+ * @param item Item to remove
+ *
+ * @return pointer to item, or NULL if item is not found.
+ */
+static inline void *ocs_list_remove(ocs_list_t *list, void *item)
+{
+ ocs_list_t *link;
+ ocs_list_t *prev;
+ ocs_list_t *next;
+
+ if (item == NULL) {
+ return NULL;
+ }
+ ocs_list_assert(list, NULL);
+ ocs_list_assert(list->magic == OCS_LIST_LIST_MAGIC, NULL);
+
+ link = item2link(list, item);
+
+ ocs_list_assert(link->magic == OCS_LIST_LINK_MAGIC, NULL);
+ ocs_list_assert(link->offset == list->offset, NULL);
+ ocs_list_assert(link->next, NULL);
+ ocs_list_assert(link->prev, NULL);
+
+ prev = link->prev;
+ next = link->next;
+
+ prev->next = next;
+ next->prev = prev;
+
+ link->next = link->prev = NULL;
+
+ return item;
+}
+
+/**
+ * @brief Iterate a linked list
+ *
+ * Iterate a linked list.
+ *
+ * @param list Pointer to list
+ * @param item Pointer to iterated item
+ *
+ * note, item is NULL after full list is traversed.
+
+ * @return none
+ */
+
+#define ocs_list_foreach(list, item) \
+ for (item = ocs_list_get_head((list)); item; item = ocs_list_next((list), item) )
+
+/**
+ * @brief Iterate a linked list safely
+ *
+ * Iterate a linked list safely, meaning that the iterated item
+ * may be safely removed from the list.
+ *
+ * @param list Pointer to list
+ * @param item Pointer to iterated item
+ * @param nxt Pointer to saveed iterated item
+ *
+ * note, item is NULL after full list is traversed.
+ *
+ * @return none
+ */
+
+#define ocs_list_foreach_safe(list, item, nxt) \
+ for (item = ocs_list_get_head(list), nxt = item ? ocs_list_next(list, item) : NULL; item; \
+ item = nxt, nxt = ocs_list_next(list, item))
+
+/**
+ * @brief Test if object is on a list
+ *
+ * Returns True if object is on a list
+ *
+ * @param link Pointer to list link
+ *
+ * @return returns True if object is on a list
+ */
+static inline int32_t
+ocs_list_on_list(ocs_list_link_t *link)
+{
+ return (link->next != NULL);
+}
+
+#endif /* __OCS_LIST_H__ */
diff --git a/sys/dev/ocs_fc/ocs_mgmt.c b/sys/dev/ocs_fc/ocs_mgmt.c
new file mode 100644
index 000000000000..170dff6e931c
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_mgmt.c
@@ -0,0 +1,2923 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * The ocs_mgmt top level functions for Fibre Channel.
+ */
+
+/**
+ * @defgroup mgmt Management Functions
+ */
+
+#include "ocs.h"
+#include "ocs_mgmt.h"
+#include "ocs_vpd.h"
+
+#define SFP_PAGE_SIZE 128
+
+/* Executables*/
+
+static int ocs_mgmt_firmware_write(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t);
+static int ocs_mgmt_firmware_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t);
+static int ocs_mgmt_function_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t);
+
+static void ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg);
+static int ocs_mgmt_force_assert(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t);
+
+#if defined(OCS_INCLUDE_RAMD)
+static int32_t
+ocs_mgmt_read_phys(ocs_t *ocs, char *, void *, uint32_t , void *, uint32_t);
+#endif
+
+
+/* Getters */
+
+static void get_nodes_count(ocs_t *, char *, ocs_textbuf_t*);
+static void get_desc(ocs_t *, char *, ocs_textbuf_t*);
+static void get_fw_rev(ocs_t *, char *, ocs_textbuf_t*);
+static void get_fw_rev2(ocs_t *, char *, ocs_textbuf_t*);
+static void get_ipl(ocs_t *, char *, ocs_textbuf_t*);
+static void get_wwnn(ocs_t *, char *, ocs_textbuf_t*);
+static void get_wwpn(ocs_t *, char *, ocs_textbuf_t*);
+static void get_fcid(ocs_t *, char *, ocs_textbuf_t *);
+static void get_sn(ocs_t *, char *, ocs_textbuf_t*);
+static void get_pn(ocs_t *, char *, ocs_textbuf_t*);
+static void get_sli4_intf_reg(ocs_t *, char *, ocs_textbuf_t*);
+static void get_phy_port_num(ocs_t *, char *, ocs_textbuf_t*);
+static void get_asic_id(ocs_t *, char *, ocs_textbuf_t*);
+static void get_pci_vendor(ocs_t *, char *, ocs_textbuf_t*);
+static void get_pci_device(ocs_t *, char *, ocs_textbuf_t*);
+static void get_pci_subsystem_vendor(ocs_t *, char *, ocs_textbuf_t*);
+static void get_pci_subsystem_device(ocs_t *, char *, ocs_textbuf_t*);
+static void get_businfo(ocs_t *, char *, ocs_textbuf_t*);
+static void get_sfp_a0(ocs_t *, char *, ocs_textbuf_t*);
+static void get_sfp_a2(ocs_t *, char *, ocs_textbuf_t*);
+static void get_hw_rev1(ocs_t *, char *, ocs_textbuf_t*);
+static void get_hw_rev2(ocs_t *, char *, ocs_textbuf_t*);
+static void get_hw_rev3(ocs_t *, char *, ocs_textbuf_t*);
+static void get_debug_mq_dump(ocs_t*, char*, ocs_textbuf_t*);
+static void get_debug_cq_dump(ocs_t*, char*, ocs_textbuf_t*);
+static void get_debug_wq_dump(ocs_t*, char*, ocs_textbuf_t*);
+static void get_debug_eq_dump(ocs_t*, char*, ocs_textbuf_t*);
+static void get_logmask(ocs_t*, char*, ocs_textbuf_t*);
+static void get_current_speed(ocs_t*, char*, ocs_textbuf_t*);
+static void get_current_topology(ocs_t*, char*, ocs_textbuf_t*);
+static void get_current_link_state(ocs_t*, char*, ocs_textbuf_t*);
+static void get_configured_speed(ocs_t*, char*, ocs_textbuf_t*);
+static void get_configured_topology(ocs_t*, char*, ocs_textbuf_t*);
+static void get_configured_link_state(ocs_t*, char*, ocs_textbuf_t*);
+static void get_linkcfg(ocs_t*, char*, ocs_textbuf_t*);
+static void get_req_wwnn(ocs_t*, char*, ocs_textbuf_t*);
+static void get_req_wwpn(ocs_t*, char*, ocs_textbuf_t*);
+static void get_nodedb_mask(ocs_t*, char*, ocs_textbuf_t*);
+static void get_profile_list(ocs_t*, char*, ocs_textbuf_t*);
+static void get_active_profile(ocs_t*, char*, ocs_textbuf_t*);
+static void get_port_protocol(ocs_t*, char*, ocs_textbuf_t*);
+static void get_driver_version(ocs_t*, char*, ocs_textbuf_t*);
+static void get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+static void get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+
+/* Setters */
+static int set_debug_mq_dump(ocs_t*, char*, char*);
+static int set_debug_cq_dump(ocs_t*, char*, char*);
+static int set_debug_wq_dump(ocs_t*, char*, char*);
+static int set_debug_eq_dump(ocs_t*, char*, char*);
+static int set_logmask(ocs_t*, char*, char*);
+static int set_configured_link_state(ocs_t*, char*, char*);
+static int set_linkcfg(ocs_t*, char*, char*);
+static int set_nodedb_mask(ocs_t*, char*, char*);
+static int set_port_protocol(ocs_t*, char*, char*);
+static int set_active_profile(ocs_t*, char*, char*);
+static int set_tgt_rscn_delay(ocs_t*, char*, char*);
+static int set_tgt_rscn_period(ocs_t*, char*, char*);
+static int set_inject_drop_cmd(ocs_t*, char*, char*);
+static int set_inject_free_drop_cmd(ocs_t*, char*, char*);
+static int set_inject_drop_data(ocs_t*, char*, char*);
+static int set_inject_drop_resp(ocs_t*, char*, char*);
+static int set_cmd_err_inject(ocs_t*, char*, char*);
+static int set_cmd_delay_value(ocs_t*, char*, char*);
+static int set_nv_wwn(ocs_t*, char*, char*);
+static int set_loglevel(ocs_t*, char*, char*);
+
+static void ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
+#if defined(OCS_INCLUDE_RAMD)
+static void* find_address_in_target(ocs_ramdisc_t **ramdisc_array, uint32_t ramdisc_count, uintptr_t target_addr);
+#endif
+
+ocs_mgmt_table_entry_t mgmt_table[] = {
+ {"nodes_count", get_nodes_count, NULL, NULL},
+ {"desc", get_desc, NULL, NULL},
+ {"fw_rev", get_fw_rev, NULL, NULL},
+ {"fw_rev2", get_fw_rev2, NULL, NULL},
+ {"ipl", get_ipl, NULL, NULL},
+ {"hw_rev1", get_hw_rev1, NULL, NULL},
+ {"hw_rev2", get_hw_rev2, NULL, NULL},
+ {"hw_rev3", get_hw_rev3, NULL, NULL},
+ {"wwnn", get_wwnn, NULL, NULL},
+ {"wwpn", get_wwpn, NULL, NULL},
+ {"fc_id", get_fcid, NULL, NULL},
+ {"sn", get_sn, NULL, NULL},
+ {"pn", get_pn, NULL, NULL},
+ {"sli4_intf_reg", get_sli4_intf_reg, NULL, NULL},
+ {"phy_port_num", get_phy_port_num, NULL, NULL},
+ {"asic_id_reg", get_asic_id, NULL, NULL},
+ {"pci_vendor", get_pci_vendor, NULL, NULL},
+ {"pci_device", get_pci_device, NULL, NULL},
+ {"pci_subsystem_vendor", get_pci_subsystem_vendor, NULL, NULL},
+ {"pci_subsystem_device", get_pci_subsystem_device, NULL, NULL},
+ {"businfo", get_businfo, NULL, NULL},
+ {"sfp_a0", get_sfp_a0, NULL, NULL},
+ {"sfp_a2", get_sfp_a2, NULL, NULL},
+ {"profile_list", get_profile_list, NULL, NULL},
+ {"driver_version", get_driver_version, NULL, NULL},
+ {"current_speed", get_current_speed, NULL, NULL},
+ {"current_topology", get_current_topology, NULL, NULL},
+ {"current_link_state", get_current_link_state, NULL, NULL},
+ {"chip_type", get_chip_type, NULL, NULL},
+ {"configured_speed", get_configured_speed, set_configured_speed, NULL},
+ {"configured_topology", get_configured_topology, set_configured_topology, NULL},
+ {"configured_link_state", get_configured_link_state, set_configured_link_state, NULL},
+ {"debug_mq_dump", get_debug_mq_dump, set_debug_mq_dump, NULL},
+ {"debug_cq_dump", get_debug_cq_dump, set_debug_cq_dump, NULL},
+ {"debug_wq_dump", get_debug_wq_dump, set_debug_wq_dump, NULL},
+ {"debug_eq_dump", get_debug_eq_dump, set_debug_eq_dump, NULL},
+ {"logmask", get_logmask, set_logmask, NULL},
+ {"loglevel", get_loglevel, set_loglevel, NULL},
+ {"linkcfg", get_linkcfg, set_linkcfg, NULL},
+ {"requested_wwnn", get_req_wwnn, set_req_wwnn, NULL},
+ {"requested_wwpn", get_req_wwpn, set_req_wwpn, NULL},
+ {"nodedb_mask", get_nodedb_mask, set_nodedb_mask, NULL},
+ {"port_protocol", get_port_protocol, set_port_protocol, NULL},
+ {"active_profile", get_active_profile, set_active_profile, NULL},
+ {"firmware_write", NULL, NULL, ocs_mgmt_firmware_write},
+ {"firmware_reset", NULL, NULL, ocs_mgmt_firmware_reset},
+ {"function_reset", NULL, NULL, ocs_mgmt_function_reset},
+#if defined(OCS_INCLUDE_RAMD)
+ {"read_phys", NULL, NULL, ocs_mgmt_read_phys},
+#endif
+ {"force_assert", NULL, NULL, ocs_mgmt_force_assert},
+
+ {"tgt_rscn_delay", get_tgt_rscn_delay, set_tgt_rscn_delay, NULL},
+ {"tgt_rscn_period", get_tgt_rscn_period, set_tgt_rscn_period, NULL},
+ {"inject_drop_cmd", get_inject_drop_cmd, set_inject_drop_cmd, NULL},
+ {"inject_free_drop_cmd", get_inject_free_drop_cmd, set_inject_free_drop_cmd, NULL},
+ {"inject_drop_data", get_inject_drop_data, set_inject_drop_data, NULL},
+ {"inject_drop_resp", get_inject_drop_resp, set_inject_drop_resp, NULL},
+ {"cmd_err_inject", get_cmd_err_inject, set_cmd_err_inject, NULL},
+ {"cmd_delay_value", get_cmd_delay_value, set_cmd_delay_value, NULL},
+ {"nv_wwpn", get_nv_wwpn, NULL, NULL},
+ {"nv_wwnn", get_nv_wwnn, NULL, NULL},
+ {"nv_wwn", NULL, set_nv_wwn, NULL},
+ {"node_abort_cnt", get_node_abort_cnt, NULL, NULL},
+};
+
+/**
+ * @ingroup mgmt
+ * @brief Get a list of options supported by the driver.
+ *
+ * @par Description
+ * This is the top level "get list" handler for the driver. It
+ * performs the following:
+ * - Adds entries to the textbuf for any actions supported by this level in the driver.
+ * - Calls a back-end function to add any actions supported by the back-end.
+ * - Calls a function on each child (domain) to recursively add supported actions.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param textbuf Pointer to an ocs_textbuf, which is used to accumulate the results.
+ *
+ * @return Returns 0 on success, or a negative value on failure.
+ */
+
+void
+ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf)
+{
+ ocs_domain_t *domain;
+ uint32_t i;
+ int access;
+
+ ocs_mgmt_start_unnumbered_section(textbuf, "ocs");
+
+ for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ access = 0;
+ if (mgmt_table[i].get_handler) {
+ access |= MGMT_MODE_RD;
+ }
+ if (mgmt_table[i].set_handler) {
+ access |= MGMT_MODE_WR;
+ }
+ if (mgmt_table[i].action_handler) {
+ access |= MGMT_MODE_EX;
+ }
+ ocs_mgmt_emit_property_name(textbuf, access, mgmt_table[i].name);
+ }
+
+ if ((ocs->mgmt_functions) && (ocs->mgmt_functions->get_list_handler)) {
+ ocs->mgmt_functions->get_list_handler(textbuf, ocs);
+ }
+
+ if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_list_handler)) {
+ ocs->tgt_mgmt_functions->get_list_handler(textbuf, &(ocs->tgt_ocs));
+ }
+
+ /* Have each of my children add their actions */
+ if (ocs_device_lock_try(ocs) == TRUE) {
+
+ /* If we get here then we are holding the device lock */
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if ((domain->mgmt_functions) && (domain->mgmt_functions->get_list_handler)) {
+ domain->mgmt_functions->get_list_handler(textbuf, domain);
+ }
+ }
+ ocs_device_unlock(ocs);
+ }
+
+ ocs_mgmt_end_unnumbered_section(textbuf, "ocs");
+
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Return the value of a management item.
+ *
+ * @par Description
+ * This is the top level "get" handler for the driver. It
+ * performs the following:
+ * - Checks that the qualifier portion of the name begins with my qualifier (ocs).
+ * - If the remaining part of the name matches a parameter that is known at this level,
+ * writes the value into textbuf.
+ * - If the name is not known, sends the request to the back-ends to fulfill (if possible).
+ * - If the request has not been fulfilled by the back-end,
+ * passes the request to each of the children (domains) to
+ * have them (recursively) try to respond.
+ *
+ * In passing the request to other entities, the request is considered to be answered
+ * when a response has been written into textbuf, indicated by textbuf->buffer_written
+ * being non-zero.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the status item to be retrieved.
+ * @param textbuf Pointer to an ocs_textbuf, which is used to return the results.
+ *
+ * @return Returns 0 if the value was found and returned, or -1 if an error occurred.
+ */
+
+
+int
+ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_domain_t *domain;
+ char qualifier[6];
+ int retval = -1;
+ uint32_t i;
+
+ ocs_mgmt_start_unnumbered_section(textbuf, "ocs");
+
+
+ snprintf(qualifier, sizeof(qualifier), "/ocs");
+
+ /* See if the name starts with my qualifier. If not then this request isn't for me */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = name + strlen(qualifier) + 1;
+
+ for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
+ if (mgmt_table[i].get_handler) {
+ mgmt_table[i].get_handler(ocs, name, textbuf);
+ ocs_mgmt_end_unnumbered_section(textbuf, "ocs");
+ return 0;
+ }
+ }
+ }
+
+ if ((ocs->mgmt_functions) && (ocs->mgmt_functions->get_handler)) {
+ retval = ocs->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, ocs);
+ }
+
+ if (retval != 0) {
+ if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_handler)) {
+ retval = ocs->tgt_mgmt_functions->get_handler(textbuf, qualifier,
+ (char*)name, &(ocs->tgt_ocs));
+ }
+ }
+
+ if (retval != 0) {
+ /* The driver didn't handle it, pass it to each domain */
+
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if ((domain->mgmt_functions) && (domain->mgmt_functions->get_handler)) {
+ retval = domain->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, domain);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+
+
+ }
+ ocs_device_unlock(ocs);
+ }
+
+ }
+
+ ocs_mgmt_end_unnumbered_section(textbuf, "ocs");
+
+ return retval;
+}
+
+
+/**
+ * @ingroup mgmt
+ * @brief Set the value of a mgmt item.
+ *
+ * @par Description
+ * This is the top level "set" handler for the driver. It
+ * performs the following:
+ * - Checks that the qualifier portion of the name begins with my qualifier (ocs).
+ * - If the remaining part of the name matches a parameter that is known at this level,
+ * calls the correct function to change the configuration.
+ * - If the name is not known, sends the request to the back-ends to fulfill (if possible).
+ * - If the request has not been fulfilled by the back-end, passes the request to each of the
+ * children (domains) to have them (recursively) try to respond.
+ *
+ * In passing the request to other entities, the request is considered to be handled
+ * if the function returns 0.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the property to be changed.
+ * @param value Requested new value of the property.
+ *
+ * @return Returns 0 if the configuration value was updated, or -1 otherwise.
+ */
+
+int
+ocs_mgmt_set(ocs_t *ocs, char *name, char *value)
+{
+ ocs_domain_t *domain;
+ int result = -1;
+ char qualifier[80];
+ uint32_t i;
+
+ snprintf(qualifier, sizeof(qualifier), "/ocs");
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = name + strlen(qualifier) +1;
+
+ /* See if it's a value I can set */
+ for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
+ if (mgmt_table[i].set_handler) {
+ return mgmt_table[i].set_handler(ocs, name, value);
+ }
+ }
+ }
+
+ if ((ocs->mgmt_functions) && (ocs->mgmt_functions->set_handler)) {
+ result = ocs->mgmt_functions->set_handler(qualifier, name, (char *)value, ocs);
+ }
+
+ if (result != 0) {
+ if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->set_handler)) {
+ result = ocs->tgt_mgmt_functions->set_handler(qualifier, name,
+ (char *)value, &(ocs->tgt_ocs));
+ }
+ }
+
+ /* If I didn't know how to set this config value pass the request to each of my children */
+ if (result != 0) {
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if ((domain->mgmt_functions) && (domain->mgmt_functions->set_handler)) {
+ result = domain->mgmt_functions->set_handler(qualifier, name, (char*)value, domain);
+ }
+ if (result == 0) {
+ break;
+ }
+ }
+ ocs_device_unlock(ocs);
+ }
+
+
+ }
+
+ return result;
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Perform a management action.
+ *
+ * @par Description
+ * This is the top level "exec" handler for the driver. It
+ * performs the following:
+ * - Checks that the qualifier portion of the name begins with my qualifier (ocs).
+ * - If the remaining part of the name matches an action that is known at this level,
+ * calls the correct function to perform the action.
+ * - If the name is not known, sends the request to the back-ends to fulfill (if possible).
+ * - If the request has not been fulfilled by the back-end, passes the request to each of the
+ * children (domains) to have them (recursively) try to respond.
+ *
+ * In passing the request to other entities, the request is considered to be handled
+ * if the function returns 0.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param action Name of the action to be performed.
+ * @param arg_in Pointer to an argument being passed to the action.
+ * @param arg_in_length Length of the argument pointed to by @c arg_in.
+ * @param arg_out Pointer to an argument being passed to the action.
+ * @param arg_out_length Length of the argument pointed to by @c arg_out.
+ *
+ * @return Returns 0 if the action was completed, or -1 otherwise.
+ *
+ *
+ */
+
+int
+ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in,
+ uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length)
+{
+ ocs_domain_t *domain;
+ int result = -1;
+ char qualifier[80];
+ uint32_t i;
+
+ snprintf(qualifier, sizeof(qualifier), "/ocs");
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = action + strlen(qualifier) +1;
+
+ /* See if it's an action I can perform */
+ for (i=0;i<ARRAY_SIZE(mgmt_table); i++) {
+ if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
+ if (mgmt_table[i].action_handler) {
+ return mgmt_table[i].action_handler(ocs, action, arg_in, arg_in_length,
+ arg_out, arg_out_length);
+ }
+
+ }
+ }
+
+ if ((ocs->mgmt_functions) && (ocs->mgmt_functions->exec_handler)) {
+ result = ocs->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
+ arg_out, arg_out_length, ocs);
+ }
+
+ if (result != 0) {
+ if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->exec_handler)) {
+ result = ocs->tgt_mgmt_functions->exec_handler(qualifier, action,
+ arg_in, arg_in_length, arg_out, arg_out_length,
+ &(ocs->tgt_ocs));
+ }
+ }
+
+ /* If I didn't know how to do this action pass the request to each of my children */
+ if (result != 0) {
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if ((domain->mgmt_functions) && (domain->mgmt_functions->exec_handler)) {
+ result = domain->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out,
+ arg_out_length, domain);
+ }
+ if (result == 0) {
+ break;
+ }
+ }
+ ocs_device_unlock(ocs);
+ }
+
+ }
+
+ return result;
+}
+
+void
+ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf)
+{
+ ocs_domain_t *domain;
+ uint32_t i;
+
+ ocs_mgmt_start_unnumbered_section(textbuf, "ocs");
+
+ for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ if (mgmt_table[i].get_handler) {
+ mgmt_table[i].get_handler(ocs, mgmt_table[i].name, textbuf);
+ } else if (mgmt_table[i].action_handler) {
+ /* No get_handler, but there's an action_handler. Just report
+ the name */
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, mgmt_table[i].name);
+ }
+ }
+
+ if ((ocs->mgmt_functions) && (ocs->mgmt_functions->get_all_handler)) {
+ ocs->mgmt_functions->get_all_handler(textbuf, ocs);
+ }
+
+ if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_all_handler)) {
+ ocs->tgt_mgmt_functions->get_all_handler(textbuf, &(ocs->tgt_ocs));
+ }
+
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if ((domain->mgmt_functions) && (domain->mgmt_functions->get_all_handler)) {
+ domain->mgmt_functions->get_all_handler(textbuf, domain);
+ }
+ }
+ ocs_device_unlock(ocs);
+
+ ocs_mgmt_end_unnumbered_section(textbuf, "ocs");
+}
+
+#if defined(OCS_INCLUDE_RAMD)
+static int32_t
+ocs_mgmt_read_phys(ocs_t *ocs, char *name, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length)
+{
+ uint32_t length;
+ char addr_str[80];
+ uintptr_t target_addr;
+ void* vaddr = NULL;
+ ocs_ramdisc_t **ramdisc_array;
+ uint32_t ramdisc_count;
+
+
+ if ((arg_in == NULL) ||
+ (arg_in_length == 0) ||
+ (arg_out == NULL) ||
+ (arg_out_length == 0)) {
+ return -1;
+ }
+
+ if (arg_in_length > 80) {
+ arg_in_length = 80;
+ }
+
+ if (ocs_copy_from_user(addr_str, arg_in, arg_in_length)) {
+ ocs_log_test(ocs, "Failed to copy addr from user\n");
+ return -EFAULT;
+ }
+
+ target_addr = (uintptr_t)ocs_strtoul(addr_str, NULL, 0);
+ /* addr_str must be the physical address of a buffer that was reported
+ * in an SGL. Search ramdiscs looking for a segment that contains that
+ * physical address
+ */
+
+ if (ocs->tgt_ocs.use_global_ramd) {
+ /* Only one target */
+ ramdisc_count = ocs->tgt_ocs.rdisc_count;
+ ramdisc_array = ocs->tgt_ocs.rdisc;
+ vaddr = find_address_in_target(ramdisc_array, ramdisc_count, target_addr);
+ } else {
+ /* Multiple targets. Each target is on a sport */
+ uint32_t domain_idx;
+
+ for (domain_idx=0; domain_idx<ocs->domain_instance_count; domain_idx++) {
+ ocs_domain_t *domain;
+ uint32_t sport_idx;
+
+ domain = ocs_domain_get_instance(ocs, domain_idx);
+ for (sport_idx=0; sport_idx < domain->sport_instance_count; sport_idx++) {
+ ocs_sport_t *sport;
+
+ sport = ocs_sport_get_instance(domain, sport_idx);
+ ramdisc_count = sport->tgt_sport.rdisc_count;
+ ramdisc_array = sport->tgt_sport.rdisc;
+ vaddr = find_address_in_target(ramdisc_array, ramdisc_count, target_addr);
+
+ if (vaddr != NULL) {
+ break;
+ }
+ }
+ }
+ }
+
+
+
+
+ length = arg_out_length;
+
+ if (vaddr != NULL) {
+
+ if (ocs_copy_to_user(arg_out, vaddr, length)) {
+ ocs_log_test(ocs, "Failed to copy buffer to user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+ } else {
+
+ return -EFAULT;
+ }
+
+}
+
+/*
+ * This function searches a target for a given physical address.
+ * The target is made up of a number of LUNs, each represented by
+ * a ocs_ramdisc_t.
+ */
+static void* find_address_in_target(ocs_ramdisc_t **ramdisc_array, uint32_t ramdisc_count, uintptr_t target_addr)
+{
+ void *vaddr = NULL;
+ uint32_t ramdisc_idx;
+
+ /* Check each ramdisc */
+ for (ramdisc_idx=0; ramdisc_idx<ramdisc_count; ramdisc_idx++) {
+ uint32_t segment_idx;
+ ocs_ramdisc_t *rdisc;
+ rdisc = ramdisc_array[ramdisc_idx];
+ /* Check each segment in the ramdisc */
+ for (segment_idx=0; segment_idx<rdisc->segment_count; segment_idx++) {
+ ramdisc_segment_t *segment = rdisc->segments[segment_idx];
+ uintptr_t segment_start;
+ uintptr_t segment_end;
+ uint32_t offset;
+
+ segment_start = segment->data_segment.phys;
+ segment_end = segment->data_segment.phys + segment->data_segment.size - 1;
+ if ((target_addr >= segment_start) && (target_addr <= segment_end)) {
+ /* Found the target address */
+ offset = target_addr - segment_start;
+ vaddr = (uint32_t*)segment->data_segment.virt + offset;
+ }
+
+ if (rdisc->dif_separate) {
+ segment_start = segment->dif_segment.phys;
+ segment_end = segment->data_segment.phys + segment->dif_segment.size - 1;
+ if ((target_addr >= segment_start) && (target_addr <= segment_end)) {
+ /* Found the target address */
+ offset = target_addr - segment_start;
+ vaddr = (uint32_t*)segment->dif_segment.virt + offset;
+ }
+ }
+
+ if (vaddr != NULL) {
+ break;
+ }
+
+ }
+
+ if (vaddr != NULL) {
+ break;
+ }
+
+
+ }
+
+ return vaddr;
+}
+#endif
+
+
+
+static int32_t
+ocs_mgmt_firmware_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length)
+{
+ int rc = 0;
+ int index = 0;
+ uint8_t bus, dev, func;
+ ocs_t *other_ocs;
+
+ ocs_get_bus_dev_func(ocs, &bus, &dev, &func);
+
+ ocs_log_debug(ocs, "Resetting port\n");
+ if (ocs_hw_reset(&ocs->hw, OCS_HW_RESET_FIRMWARE)) {
+ ocs_log_test(ocs, "failed to reset port\n");
+ rc = -1;
+ } else {
+ ocs_log_debug(ocs, "successfully reset port\n");
+
+ /* now reset all functions on the same device */
+
+ while ((other_ocs = ocs_get_instance(index++)) != NULL) {
+ uint8_t other_bus, other_dev, other_func;
+
+ ocs_get_bus_dev_func(other_ocs, &other_bus, &other_dev, &other_func);
+
+ if ((bus == other_bus) && (dev == other_dev)) {
+ if (other_ocs->hw.state !=
+ OCS_HW_STATE_UNINITIALIZED) {
+ other_ocs->hw.state =
+ OCS_HW_STATE_QUEUES_ALLOCATED;
+ }
+
+ ocs_device_detach(other_ocs);
+ if (ocs_device_attach(other_ocs)) {
+ ocs_log_err(other_ocs,
+ "device %d attach failed \n", index);
+ rc = -1;
+ }
+ }
+ }
+ }
+ return rc;
+}
+
+static int32_t
+ocs_mgmt_function_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length)
+{
+ int32_t rc;
+
+ ocs_device_detach(ocs);
+ rc = ocs_device_attach(ocs);
+
+ return rc;
+}
+
+static int32_t
+ocs_mgmt_firmware_write(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length)
+{
+ int rc = 0;
+ uint32_t bytes_left;
+ uint32_t xfer_size;
+ uint32_t offset;
+ uint8_t *userp;
+ ocs_dma_t dma;
+ int last = 0;
+ ocs_mgmt_fw_write_result_t result;
+ uint32_t change_status = 0;
+ char status_str[80];
+
+ ocs_sem_init(&(result.semaphore), 0, "fw_write");
+
+ bytes_left = buf_len;
+ offset = 0;
+ userp = (uint8_t *)buf;
+
+ if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) {
+ ocs_log_err(ocs, "ocs_mgmt_firmware_write: malloc failed");
+ return -ENOMEM;
+ }
+
+ while (bytes_left > 0) {
+
+
+ if (bytes_left > FW_WRITE_BUFSIZE) {
+ xfer_size = FW_WRITE_BUFSIZE;
+ } else {
+ xfer_size = bytes_left;
+ }
+
+ /* Copy xfer_size bytes from user space to kernel buffer */
+ if (ocs_copy_from_user(dma.virt, userp, xfer_size)) {
+ rc = -EFAULT;
+ break;
+ }
+
+ /* See if this is the last block */
+ if (bytes_left == xfer_size) {
+ last = 1;
+ }
+
+ /* Send the HW command */
+ ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset, last, ocs_mgmt_fw_write_cb, &result);
+
+ /* Wait for semaphore to be signaled when the command completes
+ * TODO: Should there be a timeout on this? If so, how long? */
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ rc = -ENXIO;
+ break;
+ }
+
+ if (result.actual_xfer == 0) {
+ ocs_log_test(ocs, "actual_write_length is %d\n", result.actual_xfer);
+ rc = -EFAULT;
+ break;
+ }
+
+ /* Check status */
+ if (result.status != 0) {
+ ocs_log_test(ocs, "write returned status %d\n", result.status);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (last) {
+ change_status = result.change_status;
+ }
+
+ bytes_left -= result.actual_xfer;
+ offset += result.actual_xfer;
+ userp += result.actual_xfer;
+
+ }
+
+ /* Create string with status and copy to userland */
+ if ((arg_out_length > 0) && (arg_out != NULL)) {
+ if (arg_out_length > sizeof(status_str)) {
+ arg_out_length = sizeof(status_str);
+ }
+ ocs_snprintf(status_str, arg_out_length, "%d", change_status);
+ if (ocs_copy_to_user(arg_out, status_str, arg_out_length)) {
+ ocs_log_test(ocs, "copy to user failed for change_status\n");
+ }
+ }
+
+
+ ocs_dma_free(ocs, &dma);
+
+ return rc;
+}
+
+static void
+ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg)
+{
+ ocs_mgmt_fw_write_result_t *result = arg;
+
+ result->status = status;
+ result->actual_xfer = actual_write_length;
+ result->change_status = change_status;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+typedef struct ocs_mgmt_sfp_result {
+ ocs_sem_t semaphore;
+ ocs_lock_t cb_lock;
+ int32_t running;
+ int32_t status;
+ uint32_t bytes_read;
+ uint32_t page_data[32];
+} ocs_mgmt_sfp_result_t;
+
+static void
+ocs_mgmt_sfp_cb(void *os, int32_t status, uint32_t bytes_read, uint32_t *data, void *arg)
+{
+ ocs_mgmt_sfp_result_t *result = arg;
+ ocs_t *ocs = os;
+
+ ocs_lock(&(result->cb_lock));
+ result->running++;
+ if(result->running == 2) {
+ /* get_sfp() has timed out */
+ ocs_unlock(&(result->cb_lock));
+ ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t));
+ return;
+ }
+
+ result->status = status;
+ result->bytes_read = bytes_read;
+ ocs_memcpy(&result->page_data, data, SFP_PAGE_SIZE);
+
+ ocs_sem_v(&(result->semaphore));
+ ocs_unlock(&(result->cb_lock));
+}
+
+static int32_t
+ocs_mgmt_get_sfp(ocs_t *ocs, uint16_t page, void *buf, uint32_t buf_len)
+{
+ int rc = 0;
+ ocs_mgmt_sfp_result_t *result = ocs_malloc(ocs, sizeof(ocs_mgmt_sfp_result_t), OCS_M_ZERO | OCS_M_NOWAIT);;
+
+ ocs_sem_init(&(result->semaphore), 0, "get_sfp");
+ ocs_lock_init(ocs, &(result->cb_lock), "get_sfp");
+
+ /* Send the HW command */
+ ocs_hw_get_sfp(&ocs->hw, page, ocs_mgmt_sfp_cb, result);
+
+ /* Wait for semaphore to be signaled when the command completes */
+ if (ocs_sem_p(&(result->semaphore), 5 * 1000 * 1000) != 0) {
+ /* Timed out, callback will free memory */
+ ocs_lock(&(result->cb_lock));
+ result->running++;
+ if(result->running == 1) {
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ ocs_unlock(&(result->cb_lock));
+ return (-ENXIO);
+ }
+ /* sfp_cb() has already executed, proceed as normal */
+ ocs_unlock(&(result->cb_lock));
+ }
+
+ /* Check status */
+ if (result->status != 0) {
+ ocs_log_test(ocs, "read_transceiver_data returned status %d\n",
+ result->status);
+ rc = -EFAULT;
+ }
+
+ if (rc == 0) {
+ rc = (result->bytes_read > buf_len ? buf_len : result->bytes_read);
+ /* Copy the results back to the supplied buffer */
+ ocs_memcpy(buf, result->page_data, rc);
+ }
+
+ ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t));
+ return rc;
+}
+
+static int32_t
+ocs_mgmt_force_assert(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length)
+{
+ ocs_assert(FALSE, 0);
+}
+
+static void
+get_nodes_count(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_xport_t *xport = ocs->xport;
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "nodes_count", "%d", xport->nodes_count);
+}
+
+static void
+get_driver_version(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "driver_version", ocs->driver_version);
+}
+
+static void
+get_desc(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "desc", ocs->desc);
+}
+
+static void
+get_fw_rev(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV));
+}
+
+static void
+get_fw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev2", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV2));
+}
+
+static void
+get_ipl(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "ipl", ocs_hw_get_ptr(&ocs->hw, OCS_HW_IPL));
+}
+
+static void
+get_hw_rev1(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t value;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_HW_REV1, &value);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev1", "%u", value);
+}
+
+static void
+get_hw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t value;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_HW_REV2, &value);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev2", "%u", value);
+}
+
+static void
+get_hw_rev3(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t value;
+ ocs_hw_get(&ocs->hw, OCS_HW_HW_REV3, &value);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev3", "%u", value);
+}
+
+static void
+get_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint64_t *wwnn;
+
+ wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%llx", (unsigned long long)ocs_htobe64(*wwnn));
+}
+
+static void
+get_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint64_t *wwpn;
+
+ wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%llx", (unsigned long long)ocs_htobe64(*wwpn));
+}
+
+static void
+get_fcid(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ if (ocs->domain && ocs->domain->attached) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x",
+ ocs->domain->sport->fc_id);
+ } else {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "UNKNOWN");
+ }
+
+}
+
+static void
+get_sn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint8_t *pserial;
+ uint32_t len;
+ char sn_buf[256];
+
+ pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_SERIALNUMBER);
+ if (pserial) {
+ len = *pserial ++;
+ strncpy(sn_buf, (char*)pserial, len);
+ sn_buf[len] = '\0';
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sn", sn_buf);
+ }
+}
+
+static void
+get_pn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint8_t *pserial;
+ uint32_t len;
+ char sn_buf[256];
+
+ pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PARTNUMBER);
+ if (pserial) {
+ len = *pserial ++;
+ strncpy(sn_buf, (char*)pserial, len);
+ sn_buf[len] = '\0';
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", sn_buf);
+ } else {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", ocs->model);
+ }
+}
+
+static void
+get_sli4_intf_reg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "sli4_intf_reg", "0x%04x",
+ ocs_config_read32(ocs, SLI4_INTF_REG));
+}
+
+static void
+get_phy_port_num(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ char *phy_port = NULL;
+
+ phy_port = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PORTNUM);
+ if (phy_port) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", phy_port);
+ } else {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", "unknown");
+ }
+}
+static void
+get_asic_id(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "asic_id_reg", "0x%04x",
+ ocs_config_read32(ocs, SLI4_ASIC_ID_REG));
+}
+
+static void
+get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t family;
+ uint32_t asic_id;
+ uint32_t asic_gen_num;
+ uint32_t asic_rev_num;
+ uint32_t rev_id;
+ char result_buf[80];
+ char tmp_buf[80];
+
+ family = (ocs_config_read32(ocs, SLI4_INTF_REG) & 0x00000f00) >> 8;
+ asic_id = ocs_config_read32(ocs, SLI4_ASIC_ID_REG);
+ asic_rev_num = asic_id & 0xff;
+ asic_gen_num = (asic_id & 0xff00) >> 8;
+
+ rev_id = ocs_config_read32(ocs, SLI4_PCI_CLASS_REVISION) & 0xff;
+
+ switch(family) {
+ case 0x00:
+ /* BE2 */
+ ocs_strncpy(result_buf, "BE2 A", sizeof(result_buf));
+ ocs_snprintf(tmp_buf, 2, "%d", rev_id);
+ strcat(result_buf, tmp_buf);
+ break;
+ case 0x01:
+ /* BE3 */
+ ocs_strncpy(result_buf, "BE3", sizeof(result_buf));
+ if (rev_id >= 0x10) {
+ strcat(result_buf, "-R");
+ }
+ ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A');
+ strcat(result_buf, tmp_buf);
+ ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f);
+ strcat(result_buf, tmp_buf);
+ break;
+ case 0x02:
+ /* Skyhawk A0 */
+ ocs_strncpy(result_buf, "Skyhawk A0", sizeof(result_buf));
+ break;
+ case 0x0a:
+ /* Lancer A0 */
+ ocs_strncpy(result_buf, "Lancer A", sizeof(result_buf));
+ ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f);
+ strcat(result_buf, tmp_buf);
+ break;
+ case 0x0b:
+ /* Lancer B0 or D0 */
+ ocs_strncpy(result_buf, "Lancer", sizeof(result_buf));
+ ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A');
+ strcat(result_buf, tmp_buf);
+ ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f);
+ strcat(result_buf, tmp_buf);
+ break;
+ case 0x0c:
+ ocs_strncpy(result_buf, "Lancer G6", sizeof(result_buf));
+ break;
+ case 0x0f:
+ /* Refer to ASIC_ID */
+ switch(asic_gen_num) {
+ case 0x00:
+ ocs_strncpy(result_buf, "BE2", sizeof(result_buf));
+ break;
+ case 0x03:
+ ocs_strncpy(result_buf, "BE3-R", sizeof(result_buf));
+ break;
+ case 0x04:
+ ocs_strncpy(result_buf, "Skyhawk-R", sizeof(result_buf));
+ break;
+ case 0x05:
+ ocs_strncpy(result_buf, "Corsair", sizeof(result_buf));
+ break;
+ case 0x0b:
+ ocs_strncpy(result_buf, "Lancer", sizeof(result_buf));
+ break;
+ case 0x0c:
+ ocs_strncpy(result_buf, "LancerG6", sizeof(result_buf));
+ break;
+ default:
+ ocs_strncpy(result_buf, "Unknown", sizeof(result_buf));
+ }
+ if (ocs_strcmp(result_buf, "Unknown") != 0) {
+ ocs_snprintf(tmp_buf, 3, " %c", ((asic_rev_num & 0xf0) >> 4) + 'A');
+ strcat(result_buf, tmp_buf);
+ ocs_snprintf(tmp_buf, 2, "%d", asic_rev_num & 0x0f);
+ strcat(result_buf, tmp_buf);
+ }
+ break;
+ default:
+ ocs_strncpy(result_buf, "Unknown", sizeof(result_buf));
+ }
+
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "chip_type", result_buf);
+
+}
+
+static void
+get_pci_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_vendor", "0x%04x", ocs->pci_vendor);
+}
+
+static void
+get_pci_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_device", "0x%04x", ocs->pci_device);
+}
+
+static void
+get_pci_subsystem_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_vendor", "0x%04x", ocs->pci_subsystem_vendor);
+}
+
+static void
+get_pci_subsystem_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_device", "0x%04x", ocs->pci_subsystem_device);
+}
+
+static void
+get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_delay", "%ld", (unsigned long)ocs->tgt_rscn_delay_msec / 1000);
+}
+
+static void
+get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_period", "%ld", (unsigned long)ocs->tgt_rscn_period_msec / 1000);
+}
+
+static void
+get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_cmd", "%d",
+ (ocs->err_injection == INJECT_DROP_CMD ? 1:0));
+}
+
+static void
+get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_free_drop_cmd", "%d",
+ (ocs->err_injection == INJECT_FREE_DROPPED ? 1:0));
+}
+
+static void
+get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_data", "%d",
+ (ocs->err_injection == INJECT_DROP_DATA ? 1:0));
+}
+
+static void
+get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_resp", "%d",
+ (ocs->err_injection == INJECT_DROP_RESP ? 1:0));
+}
+
+static void
+get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_err_inject", "0x%02x", ocs->cmd_err_inject);
+}
+
+static void
+get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_delay_value", "%ld", (unsigned long)ocs->delay_value_msec);
+}
+
+static void
+get_businfo(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "businfo", ocs->businfo);
+}
+
+static void
+get_sfp_a0(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint8_t *page_data;
+ char *buf;
+ int i;
+ int32_t bytes_read;
+
+ page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (page_data == NULL) {
+ return;
+ }
+
+ buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (buf == NULL) {
+ ocs_free(ocs, page_data, SFP_PAGE_SIZE);
+ return;
+ }
+
+ bytes_read = ocs_mgmt_get_sfp(ocs, 0xa0, page_data, SFP_PAGE_SIZE);
+
+ if (bytes_read <= 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", "(unknown)");
+ } else {
+ char *d = buf;
+ uint8_t *s = page_data;
+ int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1;
+ int bytes_added;
+
+ for (i = 0; i < bytes_read; i++) {
+ bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s);
+ ++s;
+ d += bytes_added;
+ buffer_remaining -= bytes_added;
+ }
+ *d = '\0';
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", buf);
+ }
+
+ ocs_free(ocs, page_data, SFP_PAGE_SIZE);
+ ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1);
+}
+
+static void
+get_sfp_a2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint8_t *page_data;
+ char *buf;
+ int i;
+ int32_t bytes_read;
+
+ page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (page_data == NULL) {
+ return;
+ }
+
+ buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (buf == NULL) {
+ ocs_free(ocs, page_data, SFP_PAGE_SIZE);
+ return;
+ }
+
+ bytes_read = ocs_mgmt_get_sfp(ocs, 0xa2, page_data, SFP_PAGE_SIZE);
+
+ if (bytes_read <= 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", "(unknown)");
+ } else {
+ char *d = buf;
+ uint8_t *s = page_data;
+ int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1;
+ int bytes_added;
+
+ for (i=0; i < bytes_read; i++) {
+ bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s);
+ ++s;
+ d += bytes_added;
+ buffer_remaining -= bytes_added;
+ }
+ *d = '\0';
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", buf);
+ }
+
+ ocs_free(ocs, page_data, SFP_PAGE_SIZE);
+ ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1);
+}
+
+static void
+get_debug_mq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_mq_dump",
+ ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP));
+}
+
+static void
+get_debug_cq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_cq_dump",
+ ocs_debug_is_enabled(OCS_DEBUG_ENABLE_CQ_DUMP));
+}
+
+static void
+get_debug_wq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_wq_dump",
+ ocs_debug_is_enabled(OCS_DEBUG_ENABLE_WQ_DUMP));
+}
+
+static void
+get_debug_eq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_eq_dump",
+ ocs_debug_is_enabled(OCS_DEBUG_ENABLE_EQ_DUMP));
+}
+
+static void
+get_logmask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "logmask", "0x%02x", ocs->logmask);
+
+}
+
+static void
+get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "loglevel", "%d", loglevel);
+
+}
+
+static void
+get_current_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t value;
+
+ ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_speed", "%d", value);
+}
+
+static void
+get_configured_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t value;
+
+ ocs_hw_get(&(ocs->hw), OCS_HW_LINK_CONFIG_SPEED, &value);
+ if (value == 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_speed", "auto");
+ } else {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_speed", "%d", value);
+ }
+
+}
+
+static void
+get_current_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t value;
+
+ ocs_hw_get(&(ocs->hw), OCS_HW_TOPOLOGY, &value);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_topology", "%d", value);
+
+}
+
+static void
+get_configured_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t value;
+
+ ocs_hw_get(&(ocs->hw), OCS_HW_CONFIG_TOPOLOGY, &value);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_topology", "%d", value);
+
+}
+
+static void
+get_current_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_xport_stats_t value;
+
+ if (ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value) == 0) {
+ if (value.value == OCS_XPORT_PORT_ONLINE) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "online");
+ } else {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "offline");
+ }
+ }
+}
+
+static void
+get_configured_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_xport_stats_t value;
+
+ if (ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &value) == 0) {
+ if (value.value == OCS_XPORT_PORT_ONLINE) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "online");
+ } else {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "offline");
+ }
+ }
+}
+
+/**
+ * @brief HW link config enum to mgmt string value mapping.
+ *
+ * This structure provides a mapping from the ocs_hw_linkcfg_e
+ * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
+ * control) to the mgmt string that is passed in by the mgmt application
+ * (elxsdkutil).
+ */
+typedef struct ocs_mgmt_linkcfg_map_s {
+ ocs_hw_linkcfg_e linkcfg;
+ const char *mgmt_str;
+} ocs_mgmt_linkcfg_map_t;
+
+static ocs_mgmt_linkcfg_map_t mgmt_linkcfg_map[] = {
+ {OCS_HW_LINKCFG_4X10G, OCS_CONFIG_LINKCFG_4X10G},
+ {OCS_HW_LINKCFG_1X40G, OCS_CONFIG_LINKCFG_1X40G},
+ {OCS_HW_LINKCFG_2X16G, OCS_CONFIG_LINKCFG_2X16G},
+ {OCS_HW_LINKCFG_4X8G, OCS_CONFIG_LINKCFG_4X8G},
+ {OCS_HW_LINKCFG_4X1G, OCS_CONFIG_LINKCFG_4X1G},
+ {OCS_HW_LINKCFG_2X10G, OCS_CONFIG_LINKCFG_2X10G},
+ {OCS_HW_LINKCFG_2X10G_2X8G, OCS_CONFIG_LINKCFG_2X10G_2X8G}};
+
+/**
+ * @brief Get the HW linkcfg enum from the mgmt config string.
+ *
+ * @param mgmt_str mgmt string value.
+ *
+ * @return Returns the HW linkcfg enum corresponding to clp_str.
+ */
+static ocs_hw_linkcfg_e
+ocs_hw_linkcfg_from_mgmt(const char *mgmt_str)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) {
+ if (ocs_strncmp(mgmt_linkcfg_map[i].mgmt_str,
+ mgmt_str, ocs_strlen(mgmt_str)) == 0) {
+ return mgmt_linkcfg_map[i].linkcfg;
+ }
+ }
+ return OCS_HW_LINKCFG_NA;
+}
+
+/**
+ * @brief Get the mgmt string value from the HW linkcfg enum.
+ *
+ * @param linkcfg HW linkcfg enum.
+ *
+ * @return Returns the mgmt string value corresponding to the given HW linkcfg.
+ */
+static const char *
+ocs_mgmt_from_hw_linkcfg(ocs_hw_linkcfg_e linkcfg)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) {
+ if (mgmt_linkcfg_map[i].linkcfg == linkcfg) {
+ return mgmt_linkcfg_map[i].mgmt_str;
+ }
+ }
+ return OCS_CONFIG_LINKCFG_UNKNOWN;
+}
+
+/**
+ * @brief Link configuration callback argument
+ */
+typedef struct ocs_mgmt_linkcfg_arg_s {
+ ocs_sem_t semaphore;
+ int32_t status;
+ ocs_hw_linkcfg_e linkcfg;
+} ocs_mgmt_linkcfg_arg_t;
+
+/**
+ * @brief Get linkcfg config value
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param textbuf The textbuf to which the result is written.
+ *
+ * @return None.
+ */
+static void
+get_linkcfg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ const char *linkcfg_str = NULL;
+ uint32_t value;
+ ocs_hw_linkcfg_e linkcfg;
+ ocs_hw_get(&ocs->hw, OCS_HW_LINKCFG, &value);
+ linkcfg = (ocs_hw_linkcfg_e)value;
+ linkcfg_str = ocs_mgmt_from_hw_linkcfg(linkcfg);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "linkcfg", linkcfg_str);
+}
+
+/**
+ * @brief Get requested WWNN config value
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param textbuf The textbuf to which the result is written.
+ *
+ * @return None.
+ */
+static void
+get_req_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_xport_t *xport = ocs->xport;
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwnn", "0x%llx", (unsigned long long)xport->req_wwnn);
+}
+
+/**
+ * @brief Get requested WWPN config value
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param textbuf The textbuf to which the result is written.
+ *
+ * @return None.
+ */
+static void
+get_req_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_xport_t *xport = ocs->xport;
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwpn", "0x%llx", (unsigned long long)xport->req_wwpn);
+}
+
+/**
+ * @brief Get requested nodedb_mask config value
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param textbuf The textbuf to which the result is written.
+ *
+ * @return None.
+ */
+static void
+get_nodedb_mask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "nodedb_mask", "0x%08x", ocs->nodedb_mask);
+}
+
+/**
+ * @brief Set requested WWNN value.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param value Value to which the linkcfg is set.
+ *
+ * @return Returns 0 on success.
+ */
+
+int
+set_req_wwnn(ocs_t *ocs, char *name, char *value)
+{
+ int rc;
+ uint64_t wwnn;
+
+ if (ocs_strcasecmp(value, "default") == 0) {
+ wwnn = 0;
+ }
+ else if (parse_wwn(value, &wwnn) != 0) {
+ ocs_log_test(ocs, "Invalid WWNN: %s\n", value);
+ return 1;
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWNN_SET, wwnn);
+
+ if(rc) {
+ ocs_log_test(ocs, "OCS_XPORT_WWNN_SET failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
+ if (rc) {
+ ocs_log_test(ocs, "port offline failed : %d\n", rc);
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+ if (rc) {
+ ocs_log_test(ocs, "port online failed : %d\n", rc);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Set requested WWNP value.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param value Value to which the linkcfg is set.
+ *
+ * @return Returns 0 on success.
+ */
+
+int
+set_req_wwpn(ocs_t *ocs, char *name, char *value)
+{
+ int rc;
+ uint64_t wwpn;
+
+ if (ocs_strcasecmp(value, "default") == 0) {
+ wwpn = 0;
+ }
+ else if (parse_wwn(value, &wwpn) != 0) {
+ ocs_log_test(ocs, "Invalid WWPN: %s\n", value);
+ return 1;
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWPN_SET, wwpn);
+
+ if(rc) {
+ ocs_log_test(ocs, "OCS_XPORT_WWPN_SET failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
+ if (rc) {
+ ocs_log_test(ocs, "port offline failed : %d\n", rc);
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+ if (rc) {
+ ocs_log_test(ocs, "port online failed : %d\n", rc);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Set node debug mask value
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param value Value to which the nodedb_mask is set.
+ *
+ * @return Returns 0 on success.
+ */
+static int
+set_nodedb_mask(ocs_t *ocs, char *name, char *value)
+{
+ ocs->nodedb_mask = ocs_strtoul(value, 0, 0);
+ return 0;
+}
+
+/**
+ * @brief Set linkcfg config value.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Not used.
+ * @param value Value to which the linkcfg is set.
+ *
+ * @return Returns 0 on success.
+ */
+static int
+set_linkcfg(ocs_t *ocs, char *name, char *value)
+{
+ ocs_hw_linkcfg_e linkcfg;
+ ocs_mgmt_linkcfg_arg_t cb_arg;
+ ocs_hw_rtn_e status;
+
+ ocs_sem_init(&cb_arg.semaphore, 0, "mgmt_linkcfg");
+
+ /* translate mgmt linkcfg string to HW linkcfg enum */
+ linkcfg = ocs_hw_linkcfg_from_mgmt(value);
+
+ /* set HW linkcfg */
+ status = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SET_LINK_CONFIG,
+ (uintptr_t)linkcfg, ocs_mgmt_linkcfg_cb, &cb_arg);
+ if (status) {
+ ocs_log_test(ocs, "ocs_hw_set_linkcfg failed\n");
+ return -1;
+ }
+
+ if (ocs_sem_p(&cb_arg.semaphore, OCS_SEM_FOREVER)) {
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ return -1;
+ }
+
+ if (cb_arg.status) {
+ ocs_log_test(ocs, "failed to set linkcfg from HW status=%d\n",
+ cb_arg.status);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Linkcfg callback
+ *
+ * @param status Result of the linkcfg get/set operation.
+ * @param value Resulting linkcfg value.
+ * @param arg Callback argument.
+ *
+ * @return None.
+ */
+static void
+ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
+{
+ ocs_mgmt_linkcfg_arg_t *cb_arg = (ocs_mgmt_linkcfg_arg_t *)arg;
+ cb_arg->status = status;
+ cb_arg->linkcfg = (ocs_hw_linkcfg_e)value;
+ ocs_sem_v(&cb_arg->semaphore);
+}
+
+static int
+set_debug_mq_dump(ocs_t *ocs, char *name, char *value)
+{
+ int result;
+
+ if (ocs_strcasecmp(value, "false") == 0) {
+ ocs_debug_disable(OCS_DEBUG_ENABLE_MQ_DUMP);
+ result = 0;
+ } else if (ocs_strcasecmp(value, "true") == 0) {
+ ocs_debug_enable(OCS_DEBUG_ENABLE_MQ_DUMP);
+ result = 0;
+ } else {
+ result = -1;
+ }
+
+ return result;
+}
+
+static int
+set_debug_cq_dump(ocs_t *ocs, char *name, char *value)
+{
+ int result;
+
+ if (ocs_strcasecmp(value, "false") == 0) {
+ ocs_debug_disable(OCS_DEBUG_ENABLE_CQ_DUMP);
+ result = 0;
+ } else if (ocs_strcasecmp(value, "true") == 0) {
+ ocs_debug_enable(OCS_DEBUG_ENABLE_CQ_DUMP);
+ result = 0;
+ } else {
+ result = -1;
+ }
+
+ return result;
+}
+
+static int
+set_debug_wq_dump(ocs_t *ocs, char *name, char *value)
+{
+ int result;
+
+ if (ocs_strcasecmp(value, "false") == 0) {
+ ocs_debug_disable(OCS_DEBUG_ENABLE_WQ_DUMP);
+ result = 0;
+ } else if (ocs_strcasecmp(value, "true") == 0) {
+ ocs_debug_enable(OCS_DEBUG_ENABLE_WQ_DUMP);
+ result = 0;
+ } else {
+ result = -1;
+ }
+
+ return result;
+}
+
+static int
+set_debug_eq_dump(ocs_t *ocs, char *name, char *value)
+{
+ int result;
+
+ if (ocs_strcasecmp(value, "false") == 0) {
+ ocs_debug_disable(OCS_DEBUG_ENABLE_EQ_DUMP);
+ result = 0;
+ } else if (ocs_strcasecmp(value, "true") == 0) {
+ ocs_debug_enable(OCS_DEBUG_ENABLE_EQ_DUMP);
+ result = 0;
+ } else {
+ result = -1;
+ }
+
+ return result;
+}
+
+static int
+set_logmask(ocs_t *ocs, char *name, char *value)
+{
+
+ ocs->logmask = ocs_strtoul(value, NULL, 0);
+
+ return 0;
+}
+
+static int
+set_loglevel(ocs_t *ocs, char *name, char *value)
+{
+
+ loglevel = ocs_strtoul(value, NULL, 0);
+
+ return 0;
+}
+
+int
+set_configured_speed(ocs_t *ocs, char *name, char *value)
+{
+ int result = 0;
+ ocs_hw_rtn_e hw_rc;
+ int xport_rc;
+ uint32_t spd;
+
+ spd = ocs_strtoul(value, NULL, 0);
+
+ if ((spd != 0) && (spd != 2000) && (spd != 4000) &&
+ (spd != 8000) && (spd != 16000) && (spd != 32000)) {
+ ocs_log_test(ocs, "unsupported speed %d\n", spd);
+ return 1;
+ }
+
+ ocs_log_debug(ocs, "Taking port offline\n");
+ xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
+ if (xport_rc != 0) {
+ ocs_log_test(ocs, "Port offline failed\n");
+ result = 1;
+ } else {
+ ocs_log_debug(ocs, "Setting port to speed %d\n", spd);
+ hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, spd);
+ if (hw_rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(ocs, "Speed set failed\n");
+ result = 1;
+ }
+
+ /* If we failed to set the speed we still want to try to bring
+ * the port back online */
+
+ ocs_log_debug(ocs, "Bringing port online\n");
+ xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+ if (xport_rc != 0) {
+ result = 1;
+ }
+ }
+
+ return result;
+}
+
+int
+set_configured_topology(ocs_t *ocs, char *name, char *value)
+{
+ int result = 0;
+ ocs_hw_rtn_e hw_rc;
+ int xport_rc;
+ uint32_t topo;
+
+ topo = ocs_strtoul(value, NULL, 0);
+ if (topo >= OCS_HW_TOPOLOGY_NONE) {
+ return 1;
+ }
+
+ ocs_log_debug(ocs, "Taking port offline\n");
+ xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
+ if (xport_rc != 0) {
+ ocs_log_test(ocs, "Port offline failed\n");
+ result = 1;
+ } else {
+ ocs_log_debug(ocs, "Setting port to topology %d\n", topo);
+ hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, topo);
+ if (hw_rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_test(ocs, "Topology set failed\n");
+ result = 1;
+ }
+
+ /* If we failed to set the topology we still want to try to bring
+ * the port back online */
+
+ ocs_log_debug(ocs, "Bringing port online\n");
+ xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+ if (xport_rc != 0) {
+ result = 1;
+ }
+ }
+
+ return result;
+}
+
+static int
+set_configured_link_state(ocs_t *ocs, char *name, char *value)
+{
+ int result = 0;
+ int xport_rc;
+
+ if (ocs_strcasecmp(value, "offline") == 0) {
+ ocs_log_debug(ocs, "Setting port to %s\n", value);
+ xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
+ if (xport_rc != 0) {
+ ocs_log_test(ocs, "Setting port to offline failed\n");
+ result = -1;
+ }
+ } else if (ocs_strcasecmp(value, "online") == 0) {
+ ocs_log_debug(ocs, "Setting port to %s\n", value);
+ xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
+ if (xport_rc != 0) {
+ ocs_log_test(ocs, "Setting port to online failed\n");
+ result = -1;
+ }
+ } else {
+ ocs_log_test(ocs, "Unsupported link state \"%s\"\n", value);
+ result = -1;
+ }
+
+ return result;
+}
+
+typedef struct ocs_mgmt_get_port_protocol_result {
+ ocs_sem_t semaphore;
+ int32_t status;
+ ocs_hw_port_protocol_e port_protocol;
+} ocs_mgmt_get_port_protocol_result_t;
+
+
+static void
+ocs_mgmt_get_port_protocol_cb(int32_t status,
+ ocs_hw_port_protocol_e port_protocol,
+ void *arg)
+{
+ ocs_mgmt_get_port_protocol_result_t *result = arg;
+
+ result->status = status;
+ result->port_protocol = port_protocol;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+static void
+get_port_protocol(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_get_port_protocol_result_t result;
+ uint8_t bus;
+ uint8_t dev;
+ uint8_t func;
+
+ ocs_sem_init(&(result.semaphore), 0, "get_port_protocol");
+
+ ocs_get_bus_dev_func(ocs, &bus, &dev, &func);
+
+ if(ocs_hw_get_port_protocol(&ocs->hw, func, ocs_mgmt_get_port_protocol_cb, &result) == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ }
+ if (result.status == 0) {
+ switch (result.port_protocol) {
+ case OCS_HW_PORT_PROTOCOL_ISCSI:
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "iSCSI");
+ break;
+ case OCS_HW_PORT_PROTOCOL_FCOE:
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FCoE");
+ break;
+ case OCS_HW_PORT_PROTOCOL_FC:
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FC");
+ break;
+ case OCS_HW_PORT_PROTOCOL_OTHER:
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Other");
+ break;
+ }
+ } else {
+ ocs_log_test(ocs, "getting port profile status 0x%x\n", result.status);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Unknown");
+ }
+ }
+}
+
+typedef struct ocs_mgmt_set_port_protocol_result {
+ ocs_sem_t semaphore;
+ int32_t status;
+} ocs_mgmt_set_port_protocol_result_t;
+
+
+
+static void
+ocs_mgmt_set_port_protocol_cb(int32_t status,
+ void *arg)
+{
+ ocs_mgmt_get_port_protocol_result_t *result = arg;
+
+ result->status = status;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+/**
+ * @brief Set port protocol
+ * @par Description
+ * This is a management action handler to set the current
+ * port protocol. Input value should be one of iSCSI,
+ * FC, or FCoE.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param value The value to be assigned
+ *
+ * @return Returns 0 on success, non-zero on failure.
+ */
+static int32_t
+set_port_protocol(ocs_t *ocs, char *name, char *value)
+{
+ ocs_mgmt_set_port_protocol_result_t result;
+ int32_t rc = 0;
+ ocs_hw_port_protocol_e new_protocol;
+ uint8_t bus;
+ uint8_t dev;
+ uint8_t func;
+
+ ocs_get_bus_dev_func(ocs, &bus, &dev, &func);
+
+ ocs_sem_init(&(result.semaphore), 0, "set_port_protocol");
+
+ if (ocs_strcasecmp(value, "iscsi") == 0) {
+ new_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
+ } else if (ocs_strcasecmp(value, "fc") == 0) {
+ new_protocol = OCS_HW_PORT_PROTOCOL_FC;
+ } else if (ocs_strcasecmp(value, "fcoe") == 0) {
+ new_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
+ } else {
+ return -1;
+ }
+
+ rc = ocs_hw_set_port_protocol(&ocs->hw, new_protocol, func,
+ ocs_mgmt_set_port_protocol_cb, &result);
+ if (rc == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ rc = -ENXIO;
+ }
+ if (result.status == 0) {
+ /* Success. */
+ rc = 0;
+ } else {
+ rc = -1;
+ ocs_log_test(ocs, "setting active profile status 0x%x\n",
+ result.status);
+ }
+ }
+
+ return rc;
+}
+
+typedef struct ocs_mgmt_get_profile_list_result_s {
+ ocs_sem_t semaphore;
+ int32_t status;
+ ocs_hw_profile_list_t *list;
+} ocs_mgmt_get_profile_list_result_t;
+
+static void
+ocs_mgmt_get_profile_list_cb(int32_t status, ocs_hw_profile_list_t *list, void *ul_arg)
+{
+ ocs_mgmt_get_profile_list_result_t *result = ul_arg;
+
+ result->status = status;
+ result->list = list;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+/**
+ * @brief Get list of profiles
+ * @par Description
+ * This is a management action handler to get the list of
+ * profiles supported by the SLI port. Although the spec says
+ * that all SLI platforms support this, only Skyhawk actually
+ * has a useful implementation.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param textbuf Pointer to an ocs_textbuf, which is used to return the results.
+ *
+ * @return none
+ */
+static void
+get_profile_list(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ ocs_mgmt_get_profile_list_result_t result;
+
+ ocs_sem_init(&(result.semaphore), 0, "get_profile_list");
+
+ if(ocs_hw_get_profile_list(&ocs->hw, ocs_mgmt_get_profile_list_cb, &result) == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ }
+ if (result.status == 0) {
+ /* Success. */
+#define MAX_LINE_SIZE 520
+#define BUFFER_SIZE MAX_LINE_SIZE*40
+ char *result_buf;
+ char result_line[MAX_LINE_SIZE];
+ uint32_t bytes_left;
+ uint32_t i;
+
+ result_buf = ocs_malloc(ocs, BUFFER_SIZE, OCS_M_ZERO);
+ bytes_left = BUFFER_SIZE;
+
+ for (i=0; i<result.list->num_descriptors; i++) {
+ sprintf(result_line, "0x%02x:%s\n", result.list->descriptors[i].profile_id,
+ result.list->descriptors[i].profile_description);
+ if (strlen(result_line) < bytes_left) {
+ strcat(result_buf, result_line);
+ bytes_left -= strlen(result_line);
+ }
+ }
+
+
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "profile_list", result_buf);
+
+ ocs_free(ocs, result_buf, BUFFER_SIZE);
+ ocs_free(ocs, result.list, sizeof(ocs_hw_profile_list_t));
+ } else {
+ ocs_log_test(ocs, "getting profile list status 0x%x\n", result.status);
+ }
+ }
+}
+
+typedef struct ocs_mgmt_get_active_profile_result {
+ ocs_sem_t semaphore;
+ int32_t status;
+ uint32_t active_profile_id;
+} ocs_mgmt_get_active_profile_result_t;
+
+static void
+ocs_mgmt_get_active_profile_cb(int32_t status, uint32_t active_profile, void *ul_arg)
+{
+ ocs_mgmt_get_active_profile_result_t *result = ul_arg;
+
+ result->status = status;
+ result->active_profile_id = active_profile;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+#define MAX_PROFILE_LENGTH 5
+
+/**
+ * @brief Get active profile
+ * @par Description
+ * This is a management action handler to get the currently
+ * active profile for an SLI port. Although the spec says that
+ * all SLI platforms support this, only Skyhawk actually has a
+ * useful implementation.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param textbuf Pointer to an ocs_textbuf, which is used to return the results.
+ *
+ * @return none
+ */
+static void
+get_active_profile(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ char result_string[MAX_PROFILE_LENGTH];
+ ocs_mgmt_get_active_profile_result_t result;
+
+ ocs_sem_init(&(result.semaphore), 0, "get_active_profile");
+
+ if(ocs_hw_get_active_profile(&ocs->hw, ocs_mgmt_get_active_profile_cb, &result) == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ }
+ if (result.status == 0) {
+ /* Success. */
+ sprintf(result_string, "0x%02x", result.active_profile_id);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "active_profile", result_string);
+ } else {
+ ocs_log_test(ocs, "getting active profile status 0x%x\n", result.status);
+ }
+ }
+}
+
+typedef struct ocs_mgmt_set_active_profile_result {
+ ocs_sem_t semaphore;
+ int32_t status;
+} ocs_mgmt_set_active_profile_result_t;
+
+
+static void
+ocs_mgmt_set_active_profile_cb(int32_t status, void *ul_arg)
+{
+ ocs_mgmt_get_profile_list_result_t *result = ul_arg;
+
+ result->status = status;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+/**
+ * @brief Set active profile
+ * @par Description
+ * This is a management action handler to set the currently
+ * active profile for an SLI port. Although the spec says that
+ * all SLI platforms support this, only Skyhawk actually has a
+ * useful implementation.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param value Requested new value of the property.
+ *
+ * @return Returns 0 on success, non-zero on failure.
+ */
+static int32_t
+set_active_profile(ocs_t *ocs, char *name, char *value)
+{
+ ocs_mgmt_set_active_profile_result_t result;
+ int32_t rc = 0;
+ int32_t new_profile;
+
+ new_profile = ocs_strtoul(value, NULL, 0);
+
+ ocs_sem_init(&(result.semaphore), 0, "set_active_profile");
+
+ rc = ocs_hw_set_active_profile(&ocs->hw, ocs_mgmt_set_active_profile_cb, new_profile, &result);
+ if (rc == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ rc = -ENXIO;
+ }
+ if (result.status == 0) {
+ /* Success. */
+ rc = 0;
+ } else {
+ rc = -1;
+ ocs_log_test(ocs, "setting active profile status 0x%x\n", result.status);
+ }
+ }
+
+ return rc;
+}
+
+typedef struct ocs_mgmt_get_nvparms_result {
+ ocs_sem_t semaphore;
+ int32_t status;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+ uint8_t hard_alpa;
+ uint32_t preferred_d_id;
+} ocs_mgmt_get_nvparms_result_t;
+
+static void
+ocs_mgmt_get_nvparms_cb(int32_t status, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa,
+ uint32_t preferred_d_id, void *ul_arg)
+{
+ ocs_mgmt_get_nvparms_result_t *result = ul_arg;
+
+ result->status = status;
+ ocs_memcpy(result->wwpn, wwpn, sizeof(result->wwpn));
+ ocs_memcpy(result->wwnn, wwnn, sizeof(result->wwnn));
+ result->hard_alpa = hard_alpa;
+ result->preferred_d_id = preferred_d_id;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+/**
+ * @brief Get wwpn
+ * @par Description
+ *
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param textbuf Pointer to an ocs_textbuf, which is used to return the results.
+ *
+ * @return none
+ */
+static void
+get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ char result_string[24];
+ static ocs_mgmt_get_nvparms_result_t result;
+
+ ocs_sem_init(&(result.semaphore), 0, "get_nv_wwpn");
+
+ if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ return;
+ }
+ if (result.status == 0) {
+ /* Success. Copy wwpn from result struct to result string */
+ sprintf(result_string, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ result.wwpn[0], result.wwpn[1], result.wwpn[2],
+ result.wwpn[3], result.wwpn[4], result.wwpn[5],
+ result.wwpn[6], result.wwpn[7]);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwpn", result_string);
+ } else {
+ ocs_log_test(ocs, "getting wwpn status 0x%x\n", result.status);
+ }
+ }
+}
+
+/**
+ * @brief Get wwnn
+ * @par Description
+ *
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param textbuf Pointer to an ocs_textbuf, which is used to return the results.
+ *
+ * @return none
+ */
+static void
+get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ char result_string[24];
+ static ocs_mgmt_get_nvparms_result_t result;
+
+ ocs_sem_init(&(result.semaphore), 0, "get_nv_wwnn");
+
+ if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ return;
+ }
+ if (result.status == 0) {
+ /* Success. Copy wwnn from result struct to result string */
+ ocs_snprintf(result_string, sizeof(result_string), "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ result.wwnn[0], result.wwnn[1], result.wwnn[2],
+ result.wwnn[3], result.wwnn[4], result.wwnn[5],
+ result.wwnn[6], result.wwnn[7]);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwnn", result_string);
+ } else {
+ ocs_log_test(ocs, "getting wwnn status 0x%x\n", result.status);
+ }
+ }
+}
+
+/**
+ * @brief Get accumulated node abort counts
+ * @par Description Get the sum of all nodes abort count.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param textbuf Pointer to an ocs_textbuf, which is used to return the results.
+ *
+ * @return None.
+ */
+static void
+get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
+{
+ uint32_t abort_counts = 0;
+ ocs_domain_t *domain;
+ ocs_sport_t *sport;
+ ocs_node_t *node;
+
+ if (ocs_device_lock_try(ocs) != TRUE) {
+ /* Didn't get the lock */
+ return;
+ }
+
+ /* Here the Device lock is held */
+ ocs_list_foreach(&ocs->domain_list, domain) {
+ if (ocs_domain_lock_try(domain) != TRUE) {
+ /* Didn't get the lock */
+ ocs_device_unlock(ocs);
+ return;
+ }
+
+ /* Here the Domain lock is held */
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if (ocs_sport_lock_try(sport) != TRUE) {
+ /* Didn't get the lock */
+ ocs_domain_unlock(domain);
+ ocs_device_unlock(ocs);
+ return;
+ }
+
+ /* Here the sport lock is held */
+ ocs_list_foreach(&sport->node_list, node) {
+ abort_counts += node->abort_cnt;
+ }
+
+ ocs_sport_unlock(sport);
+ }
+
+ ocs_domain_unlock(domain);
+ }
+
+ ocs_device_unlock(ocs);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "node_abort_cnt", "%d" , abort_counts);
+}
+
+typedef struct ocs_mgmt_set_nvparms_result {
+ ocs_sem_t semaphore;
+ int32_t status;
+} ocs_mgmt_set_nvparms_result_t;
+
+
+static void
+ocs_mgmt_set_nvparms_cb(int32_t status, void *ul_arg)
+{
+ ocs_mgmt_get_profile_list_result_t *result = ul_arg;
+
+ result->status = status;
+
+ ocs_sem_v(&(result->semaphore));
+}
+
+/**
+ * @brief Set wwn
+ * @par Description Sets the Non-volatile worldwide names,
+ * if provided.
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param name Name of the action being performed.
+ * @param wwn_p Requested new WWN values.
+ *
+ * @return Returns 0 on success, non-zero on failure.
+ */
+static int32_t
+set_nv_wwn(ocs_t *ocs, char *name, char *wwn_p)
+{
+ ocs_mgmt_get_nvparms_result_t result;
+ uint8_t new_wwpn[8];
+ uint8_t new_wwnn[8];
+ char *wwpn_p = NULL;
+ char *wwnn_p = NULL;
+ int32_t rc = -1;
+ int wwpn;
+ int wwnn;
+ int i;
+
+ /* This is a read-modify-write operation, so first we have to read
+ * the current values
+ */
+ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn1");
+
+ rc = ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result);
+
+ if (rc == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ return -ENXIO;
+ }
+ if (result.status != 0) {
+ ocs_log_test(ocs, "getting nvparms status 0x%x\n", result.status);
+ return -1;
+ }
+ }
+
+ /* wwn_p contains wwpn_p@wwnn_p values */
+ if (wwn_p != NULL) {
+ wwpn_p = ocs_strsep(&wwn_p, "@");
+ wwnn_p = wwn_p;
+ }
+
+ wwpn = ocs_strcmp(wwpn_p, "NA");
+ wwnn = ocs_strcmp(wwnn_p, "NA");
+
+ /* Parse the new WWPN */
+ if ((wwpn_p != NULL) && (wwpn != 0)) {
+ if (ocs_sscanf(wwpn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx",
+ &(new_wwpn[0]), &(new_wwpn[1]), &(new_wwpn[2]),
+ &(new_wwpn[3]), &(new_wwpn[4]), &(new_wwpn[5]),
+ &(new_wwpn[6]), &(new_wwpn[7])) != 8) {
+ ocs_log_test(ocs, "can't parse WWPN %s\n", wwpn_p);
+ return -1;
+ }
+ }
+
+ /* Parse the new WWNN */
+ if ((wwnn_p != NULL) && (wwnn != 0 )) {
+ if (ocs_sscanf(wwnn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx",
+ &(new_wwnn[0]), &(new_wwnn[1]), &(new_wwnn[2]),
+ &(new_wwnn[3]), &(new_wwnn[4]), &(new_wwnn[5]),
+ &(new_wwnn[6]), &(new_wwnn[7])) != 8) {
+ ocs_log_test(ocs, "can't parse WWNN %s\n", wwnn_p);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < 8; i++) {
+ /* Use active wwpn, if new one is not provided */
+ if (wwpn == 0) {
+ new_wwpn[i] = result.wwpn[i];
+ }
+
+ /* Use active wwnn, if new one is not provided */
+ if (wwnn == 0) {
+ new_wwnn[i] = result.wwnn[i];
+ }
+ }
+
+ /* Modify the nv_wwnn and nv_wwpn, then write it back */
+ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn2");
+
+ rc = ocs_hw_set_nvparms(&ocs->hw, ocs_mgmt_set_nvparms_cb, new_wwpn,
+ new_wwnn, result.hard_alpa, result.preferred_d_id,
+ &result);
+ if (rc == OCS_HW_RTN_SUCCESS) {
+ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_err(ocs, "ocs_sem_p failed\n");
+ return -ENXIO;
+ }
+ if (result.status != 0) {
+ ocs_log_test(ocs, "setting wwn status 0x%x\n", result.status);
+ return -1;
+ }
+ }
+
+ return rc;
+}
+
+static int
+set_tgt_rscn_delay(ocs_t *ocs, char *name, char *value)
+{
+ ocs->tgt_rscn_delay_msec = ocs_strtoul(value, NULL, 0) * 1000;
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+static int
+set_tgt_rscn_period(ocs_t *ocs, char *name, char *value)
+{
+ ocs->tgt_rscn_period_msec = ocs_strtoul(value, NULL, 0) * 1000;
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+static int
+set_inject_drop_cmd(ocs_t *ocs, char *name, char *value)
+{
+ ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_CMD);
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+static int
+set_inject_free_drop_cmd(ocs_t *ocs, char *name, char *value)
+{
+ ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_FREE_DROPPED);
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+static int
+set_inject_drop_data(ocs_t *ocs, char *name, char *value)
+{
+ ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_DATA);
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+static int
+set_inject_drop_resp(ocs_t *ocs, char *name, char *value)
+{
+ ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_RESP);
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+static int
+set_cmd_err_inject(ocs_t *ocs, char *name, char *value)
+{
+ ocs->cmd_err_inject = ocs_strtoul(value, NULL, 0);
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+static int
+set_cmd_delay_value(ocs_t *ocs, char *name, char *value)
+{
+ ocs->delay_value_msec = ocs_strtoul(value, NULL, 0);
+ ocs->err_injection = (ocs->delay_value_msec == 0 ? NO_ERR_INJECT : INJECT_DELAY_CMD);
+ ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value);
+ return 0;
+}
+
+/**
+ * @brief parse a WWN from a string into a 64-bit value
+ *
+ * Given a pointer to a string, parse the string into a 64-bit
+ * WWN value. The format of the string must be xx:xx:xx:xx:xx:xx:xx:xx
+ *
+ * @param wwn_in pointer to the string to be parsed
+ * @param wwn_out pointer to uint64_t in which to put the parsed result
+ *
+ * @return 0 if successful, non-zero if the WWN is malformed and couldn't be parsed
+ */
+int
+parse_wwn(char *wwn_in, uint64_t *wwn_out)
+{
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ uint8_t byte4;
+ uint8_t byte5;
+ uint8_t byte6;
+ uint8_t byte7;
+ int rc;
+
+ rc = ocs_sscanf(wwn_in, "0x%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx",
+ &byte0, &byte1, &byte2, &byte3,
+ &byte4, &byte5, &byte6, &byte7);
+
+ if (rc == 8) {
+ *wwn_out = ((uint64_t)byte0 << 56) |
+ ((uint64_t)byte1 << 48) |
+ ((uint64_t)byte2 << 40) |
+ ((uint64_t)byte3 << 32) |
+ ((uint64_t)byte4 << 24) |
+ ((uint64_t)byte5 << 16) |
+ ((uint64_t)byte6 << 8) |
+ ((uint64_t)byte7);
+ return 0;
+
+ } else {
+ return 1;
+ }
+}
+
+
+
+static char *mode_string(int mode);
+
+
+/**
+ * @ingroup mgmt
+ * @brief Generate the beginning of a numbered section in a management XML document.
+ *
+ * @par Description
+ * This function begins a section. The XML information is appended to
+ * the textbuf. This form of the function is used for sections that might have
+ * multiple instances, such as a node or a SLI Port (sport). The index number
+ * is appended to the name.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param name Name of the section.
+ * @param index Index number of this instance of the section.
+ *
+ * @return None.
+ */
+
+extern void ocs_mgmt_start_section(ocs_textbuf_t *textbuf, const char *name, int index)
+{
+ ocs_textbuf_printf(textbuf, "<%s instance=\"%d\">\n", name, index);
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Generate the beginning of an unnumbered section in a management XML document.
+ *
+ * @par Description
+ * This function begins a section. The XML information is appended to
+ * the textbuf. This form of the function is used for sections that have
+ * a single instance only. Therefore, no index number is needed.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param name Name of the section.
+ *
+ * @return None.
+ */
+
+extern void ocs_mgmt_start_unnumbered_section(ocs_textbuf_t *textbuf, const char *name)
+{
+ ocs_textbuf_printf(textbuf, "<%s>\n", name);
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Generate the end of a section in a management XML document.
+ *
+ * @par Description
+ * This function ends a section. The XML information is appended to
+ * the textbuf.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param name Name of the section.
+ *
+ * @return None.
+ */
+
+void ocs_mgmt_end_unnumbered_section(ocs_textbuf_t *textbuf, const char *name)
+{
+ ocs_textbuf_printf(textbuf, "</%s>\n", name);
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Generate the indexed end of a section in a management XML document.
+ *
+ * @par Description
+ * This function ends a section. The XML information is appended to
+ * the textbuf.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param name Name of the section.
+ * @param index Index number of this instance of the section.
+ *
+ * @return None.
+ */
+
+void ocs_mgmt_end_section(ocs_textbuf_t *textbuf, const char *name, int index)
+{
+
+ ocs_textbuf_printf(textbuf, "</%s>\n", name);
+
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Generate a property, with no value, in a management XML document.
+ *
+ * @par Description
+ * This function generates a property name. The XML information is appended to
+ * the textbuf. This form of the function is used by the list functions
+ * when the property name only (and not the current value) is given.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param mode Defines whether the property is read(r)/write(w)/executable(x).
+ * @param name Name of the property.
+ *
+ * @return None.
+ */
+
+void ocs_mgmt_emit_property_name(ocs_textbuf_t *textbuf, int mode, const char *name)
+{
+ ocs_textbuf_printf(textbuf, "<%s mode=\"%s\"/>\n", name, mode_string(mode));
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Generate a property with a string value in a management XML document.
+ *
+ * @par Description
+ * This function generates a property name and a string value.
+ * The XML information is appended to the textbuf.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param mode Defines whether the property is read(r)/write(w)/executable(x).
+ * @param name Name of the property.
+ * @param value Value of the property.
+ *
+ * @return None.
+ */
+
+void ocs_mgmt_emit_string(ocs_textbuf_t *textbuf, int mode, const char *name, const char *value)
+{
+ ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s</%s>\n", name, mode_string(mode), value, name);
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Generate a property with an integer value in a management XML document.
+ *
+ * @par Description
+ * This function generates a property name and an integer value.
+ * The XML information is appended to the textbuf.
+ *
+ * @param textbuf Pointer to driver dump text buffer.
+ * @param mode Defines whether the property is read(r)/write(w)/executable(x).
+ * @param name Name of the property.
+ * @param fmt A printf format for formatting the integer value.
+ *
+ * @return none
+ */
+
+void ocs_mgmt_emit_int(ocs_textbuf_t *textbuf, int mode, const char *name, const char *fmt, ...)
+{
+ va_list ap;
+ char valuebuf[64];
+
+ va_start(ap, fmt);
+ ocs_vsnprintf(valuebuf, sizeof(valuebuf), fmt, ap);
+ va_end(ap);
+
+ ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s</%s>\n", name, mode_string(mode), valuebuf, name);
+}
+
+/**
+ * @ingroup mgmt
+ * @brief Generate a property with a boolean value in a management XML document.
+ *
+ * @par Description
+ * This function generates a property name and a boolean value.
+ * The XML information is appended to the textbuf.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param mode Defines whether the property is read(r)/write(w)/executable(x).
+ * @param name Name of the property.
+ * @param value Boolean value to be added to the textbuf.
+ *
+ * @return None.
+ */
+
+void ocs_mgmt_emit_boolean(ocs_textbuf_t *textbuf, int mode, const char *name, int value)
+{
+ char *valuebuf = value ? "true" : "false";
+
+ ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s</%s>\n", name, mode_string(mode), valuebuf, name);
+}
+
+static char *mode_string(int mode)
+{
+ static char mode_str[4];
+
+ mode_str[0] = '\0';
+ if (mode & MGMT_MODE_RD) {
+ strcat(mode_str, "r");
+ }
+ if (mode & MGMT_MODE_WR) {
+ strcat(mode_str, "w");
+ }
+ if (mode & MGMT_MODE_EX) {
+ strcat(mode_str, "x");
+ }
+
+ return mode_str;
+
+}
diff --git a/sys/dev/ocs_fc/ocs_mgmt.h b/sys/dev/ocs_fc/ocs_mgmt.h
new file mode 100644
index 000000000000..a9717674cefa
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_mgmt.h
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Declarations for the common functions used by ocs_mgmt.
+ */
+
+
+#if !defined(__OCS_MGMT_H__)
+#define __OCS_MGMT_H__
+
+#include "ocs_utils.h"
+
+#define OCS_MGMT_MAX_NAME 128
+#define OCS_MGMT_MAX_VALUE 128
+
+#define MGMT_MODE_RD 4
+#define MGMT_MODE_WR 2
+#define MGMT_MODE_EX 1
+#define MGMT_MODE_RW (MGMT_MODE_RD | MGMT_MODE_WR)
+
+#define FW_WRITE_BUFSIZE 64*1024
+
+typedef struct ocs_mgmt_fw_write_result {
+ ocs_sem_t semaphore;
+ int32_t status;
+ uint32_t actual_xfer;
+ uint32_t change_status;
+} ocs_mgmt_fw_write_result_t;
+
+
+/*
+ * This structure is used in constructing a table of internal handler functions.
+ */
+typedef void (*ocs_mgmt_get_func)(ocs_t *, char *, ocs_textbuf_t*);
+typedef int (*ocs_mgmt_set_func)(ocs_t *, char *, char *);
+typedef int (*ocs_mgmt_action_func)(ocs_t *, char *, void *, uint32_t, void *, uint32_t);
+typedef struct ocs_mgmt_table_entry_s {
+ char *name;
+ ocs_mgmt_get_func get_handler;
+ ocs_mgmt_set_func set_handler;
+ ocs_mgmt_action_func action_handler;
+} ocs_mgmt_table_entry_t;
+
+/*
+ * This structure is used when defining the top level management handlers for
+ * different types of objects (sport, node, domain, etc)
+ */
+typedef void (*ocs_mgmt_get_list_handler)(ocs_textbuf_t *textbuf, void* object);
+typedef void (*ocs_mgmt_get_all_handler)(ocs_textbuf_t *textbuf, void* object);
+typedef int (*ocs_mgmt_get_handler)(ocs_textbuf_t *, char *parent, char *name, void *object);
+typedef int (*ocs_mgmt_set_handler)(char *parent, char *name, char *value, void *object);
+typedef int (*ocs_mgmt_exec_handler)(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length, void *object);
+
+typedef struct ocs_mgmt_functions_s {
+ ocs_mgmt_get_list_handler get_list_handler;
+ ocs_mgmt_get_handler get_handler;
+ ocs_mgmt_get_all_handler get_all_handler;
+ ocs_mgmt_set_handler set_handler;
+ ocs_mgmt_exec_handler exec_handler;
+} ocs_mgmt_functions_t;
+
+
+/* Helper functions */
+extern void ocs_mgmt_start_section(ocs_textbuf_t *textbuf, const char *name, int index);
+extern void ocs_mgmt_start_unnumbered_section(ocs_textbuf_t *textbuf, const char *name);
+extern void ocs_mgmt_end_section(ocs_textbuf_t *textbuf, const char *name, int index);
+extern void ocs_mgmt_end_unnumbered_section(ocs_textbuf_t *textbuf, const char *name);
+extern void ocs_mgmt_emit_property_name(ocs_textbuf_t *textbuf, int access, const char *name);
+extern void ocs_mgmt_emit_string(ocs_textbuf_t *textbuf, int access, const char *name, const char *value);
+__attribute__((format(printf,4,5)))
+extern void ocs_mgmt_emit_int(ocs_textbuf_t *textbuf, int access, const char *name, const char *fmt, ...);
+extern void ocs_mgmt_emit_boolean(ocs_textbuf_t *textbuf, int access, const char *name, const int value);
+extern int parse_wwn(char *wwn_in, uint64_t *wwn_out);
+
+/* Top level management functions - called by the ioctl */
+extern void ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf);
+extern void ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf);
+extern int ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf);
+extern int ocs_mgmt_set(ocs_t *ocs, char *name, char *value);
+extern int ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length);
+
+extern int set_req_wwnn(ocs_t*, char*, char*);
+extern int set_req_wwpn(ocs_t*, char*, char*);
+extern int set_configured_speed(ocs_t*, char*, char*);
+extern int set_configured_topology(ocs_t*, char*, char*);
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_node.c b/sys/dev/ocs_fc/ocs_node.c
new file mode 100644
index 000000000000..0e4b08369562
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_node.c
@@ -0,0 +1,2376 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS driver remote node handler. This file contains code that is shared
+ * between fabric (ocs_fabric.c) and device (ocs_device.c) nodes.
+ */
+
+/*!
+ * @defgroup node_common Node common support
+ * @defgroup node_alloc Node allocation
+ */
+
+#include "ocs.h"
+#include "ocs_els.h"
+#include "ocs_device.h"
+
+#define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]"
+#define SCSI_ITT_SIZE(ocs) ((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8)
+
+#define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag
+
+#define scsi_io_printf(io, fmt, ...) ocs_log_debug(io->ocs, "[%s]" SCSI_IOFMT fmt, \
+ io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__)
+
+void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *node);
+void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *node);
+int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *node);
+int ocs_mgmt_node_set(char *parent, char *name, char *value, void *node);
+int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length, void *node);
+static ocs_mgmt_functions_t node_mgmt_functions = {
+ .get_list_handler = ocs_mgmt_node_list,
+ .get_handler = ocs_mgmt_node_get,
+ .get_all_handler = ocs_mgmt_node_get_all,
+ .set_handler = ocs_mgmt_node_set,
+ .exec_handler = ocs_mgmt_node_exec,
+};
+
+
+/**
+ * @ingroup node_common
+ * @brief Device node state machine wait for all ELS's to
+ * complete
+ *
+ * Abort all ELS's for given node.
+ *
+ * @param node node for which ELS's will be aborted
+ */
+
+void
+ocs_node_abort_all_els(ocs_node_t *node)
+{
+ ocs_io_t *els;
+ ocs_io_t *els_next;
+ ocs_node_cb_t cbdata = {0};
+
+ ocs_node_hold_frames(node);
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
+ ocs_log_debug(node->ocs, "[%s] initiate ELS abort %s\n", node->display_name, els->display_name);
+ ocs_unlock(&node->active_ios_lock);
+ cbdata.els = els;
+ ocs_els_post_event(els, OCS_EVT_ABORT_ELS, &cbdata);
+ ocs_lock(&node->active_ios_lock);
+ }
+ ocs_unlock(&node->active_ios_lock);
+}
+
+/**
+ * @ingroup node_common
+ * @brief Handle remote node events from HW
+ *
+ * Handle remote node events from HW. Essentially the HW event is translated into
+ * a node state machine event that is posted to the affected node.
+ *
+ * @param arg pointer to ocs
+ * @param event HW event to proceoss
+ * @param data application specific data (pointer to the affected node)
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_remote_node_cb(void *arg, ocs_hw_remote_node_event_e event, void *data)
+{
+ ocs_t *ocs = arg;
+ ocs_sm_event_t sm_event = OCS_EVT_LAST;
+ ocs_remote_node_t *rnode = data;
+ ocs_node_t *node = rnode->node;
+
+ switch (event) {
+ case OCS_HW_NODE_ATTACH_OK:
+ sm_event = OCS_EVT_NODE_ATTACH_OK;
+ break;
+
+ case OCS_HW_NODE_ATTACH_FAIL:
+ sm_event = OCS_EVT_NODE_ATTACH_FAIL;
+ break;
+
+ case OCS_HW_NODE_FREE_OK:
+ sm_event = OCS_EVT_NODE_FREE_OK;
+ break;
+
+ case OCS_HW_NODE_FREE_FAIL:
+ sm_event = OCS_EVT_NODE_FREE_FAIL;
+ break;
+
+ default:
+ ocs_log_test(ocs, "unhandled event %#x\n", event);
+ return -1;
+ }
+
+ /* If we're using HLM, forward the NODE_ATTACH_OK/FAIL event to all nodes in the node group */
+ if ((node->node_group != NULL) &&
+ ((sm_event == OCS_EVT_NODE_ATTACH_OK) || (sm_event == OCS_EVT_NODE_ATTACH_FAIL))) {
+ ocs_node_t *n = NULL;
+ uint8_t attach_ok = sm_event == OCS_EVT_NODE_ATTACH_OK;
+
+ ocs_sport_lock(node->sport);
+ {
+ ocs_list_foreach(&node->sport->node_list, n) {
+ if (node == n) {
+ continue;
+ }
+ ocs_node_lock(n);
+ if ((!n->rnode.attached) && (node->node_group == n->node_group)) {
+ n->rnode.attached = attach_ok;
+ node_printf(n, "rpi[%d] deferred HLM node attach %s posted\n",
+ n->rnode.index, attach_ok ? "ok" : "fail");
+ ocs_node_post_event(n, sm_event, NULL);
+ }
+ ocs_node_unlock(n);
+ }
+ }
+
+ ocs_sport_unlock(node->sport);
+ }
+
+ ocs_node_post_event(node, sm_event, NULL);
+
+ return 0;
+}
+
+/**
+ * @ingroup node_alloc
+ * @brief Find an FC node structure given the FC port ID
+ *
+ * @param sport the SPORT to search
+ * @param port_id FC port ID
+ *
+ * @return pointer to the object or NULL if not found
+ */
+ocs_node_t *
+ocs_node_find(ocs_sport_t *sport, uint32_t port_id)
+{
+ ocs_node_t *node;
+
+ ocs_assert(sport->lookup, NULL);
+ ocs_sport_lock(sport);
+ node = spv_get(sport->lookup, port_id);
+ ocs_sport_unlock(sport);
+ return node;
+}
+
+/**
+ * @ingroup node_alloc
+ * @brief Find an FC node structure given the WWPN
+ *
+ * @param sport the SPORT to search
+ * @param wwpn the WWPN to search for (host endian)
+ *
+ * @return pointer to the object or NULL if not found
+ */
+ocs_node_t *
+ocs_node_find_wwpn(ocs_sport_t *sport, uint64_t wwpn)
+{
+ ocs_node_t *node = NULL;;
+
+ ocs_assert(sport, NULL);
+
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, node) {
+ if (ocs_node_get_wwpn(node) == wwpn) {
+ ocs_sport_unlock(sport);
+ return node;
+ }
+ }
+ ocs_sport_unlock(sport);
+ return NULL;
+}
+
+/**
+ * @ingroup node_alloc
+ * @brief allocate node object pool
+ *
+ * A pool of ocs_node_t objects is allocated.
+ *
+ * @param ocs pointer to driver instance context
+ * @param node_count count of nodes to allocate
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_node_create_pool(ocs_t *ocs, uint32_t node_count)
+{
+ ocs_xport_t *xport = ocs->xport;
+ uint32_t i;
+ ocs_node_t *node;
+ uint32_t max_sge;
+ uint32_t num_sgl;
+ uint64_t max_xfer_size;
+ int32_t rc;
+
+ xport->nodes_count = node_count;
+
+ xport->nodes = ocs_malloc(ocs, node_count * sizeof(ocs_node_t *), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (xport->nodes == NULL) {
+ ocs_log_err(ocs, "node ptrs allocation failed");
+ return -1;
+ }
+
+ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge) &&
+ 0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &num_sgl)) {
+ max_xfer_size = max_sge * num_sgl;
+ } else {
+ max_xfer_size = 65536;
+ }
+
+ if (max_xfer_size > 65536)
+ max_xfer_size = 65536;
+
+ ocs_list_init(&xport->nodes_free_list, ocs_node_t, link);
+
+
+ for (i = 0; i < node_count; i ++) {
+ node = ocs_malloc(ocs, sizeof(ocs_node_t), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (node == NULL) {
+ ocs_log_err(ocs, "node allocation failed");
+ goto error;
+ }
+
+ /* Assign any persistent field values */
+ node->instance_index = i;
+ node->max_wr_xfer_size = max_xfer_size;
+ node->rnode.indicator = UINT32_MAX;
+
+ rc = ocs_dma_alloc(ocs, &node->sparm_dma_buf, 256, 16);
+ if (rc) {
+ ocs_free(ocs, node, sizeof(ocs_node_t));
+ ocs_log_err(ocs, "ocs_dma_alloc failed: %d\n", rc);
+ goto error;
+ }
+
+ xport->nodes[i] = node;
+ ocs_list_add_tail(&xport->nodes_free_list, node);
+ }
+ return 0;
+
+error:
+ ocs_node_free_pool(ocs);
+ return -1;
+}
+
+/**
+ * @ingroup node_alloc
+ * @brief free node object pool
+ *
+ * The pool of previously allocated node objects is freed
+ *
+ * @param ocs pointer to driver instance context
+ *
+ * @return none
+ */
+
+void
+ocs_node_free_pool(ocs_t *ocs)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_node_t *node;
+ uint32_t i;
+
+ if (!xport->nodes)
+ return;
+
+ ocs_device_lock(ocs);
+
+ for (i = 0; i < xport->nodes_count; i ++) {
+ node = xport->nodes[i];
+ if (node) {
+ /* free sparam_dma_buf */
+ ocs_dma_free(ocs, &node->sparm_dma_buf);
+ ocs_free(ocs, node, sizeof(ocs_node_t));
+ }
+ xport->nodes[i] = NULL;
+ }
+
+ ocs_free(ocs, xport->nodes, (xport->nodes_count * sizeof(ocs_node_t *)));
+
+ ocs_device_unlock(ocs);
+}
+
+/**
+ * @ingroup node_alloc
+ * @brief return pointer to node object given instance index
+ *
+ * A pointer to the node object given by an instance index is returned.
+ *
+ * @param ocs pointer to driver instance context
+ * @param index instance index
+ *
+ * @return returns pointer to node object, or NULL
+ */
+
+ocs_node_t *
+ocs_node_get_instance(ocs_t *ocs, uint32_t index)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_node_t *node = NULL;
+
+ if (index >= (xport->nodes_count)) {
+ ocs_log_test(ocs, "invalid index: %d\n", index);
+ return NULL;
+ }
+ node = xport->nodes[index];
+ return node->attached ? node : NULL;
+}
+
+/**
+ * @ingroup node_alloc
+ * @brief Allocate an fc node structure and add to node list
+ *
+ * @param sport pointer to the SPORT from which this node is allocated
+ * @param port_id FC port ID of new node
+ * @param init Port is an inititiator (sent a plogi)
+ * @param targ Port is potentially a target
+ *
+ * @return pointer to the object or NULL if none available
+ */
+
+ocs_node_t *
+ocs_node_alloc(ocs_sport_t *sport, uint32_t port_id, uint8_t init, uint8_t targ)
+{
+ int32_t rc;
+ ocs_node_t *node = NULL;
+ uint32_t instance_index;
+ uint32_t max_wr_xfer_size;
+ ocs_t *ocs = sport->ocs;
+ ocs_xport_t *xport = ocs->xport;
+ ocs_dma_t sparm_dma_buf;
+
+ ocs_assert(sport, NULL);
+
+ if (sport->shutting_down) {
+ ocs_log_debug(ocs, "node allocation when shutting down %06x", port_id);
+ return NULL;
+ }
+
+ ocs_device_lock(ocs);
+ node = ocs_list_remove_head(&xport->nodes_free_list);
+ ocs_device_unlock(ocs);
+ if (node == NULL) {
+ ocs_log_err(ocs, "node allocation failed %06x", port_id);
+ return NULL;
+ }
+
+ /* Save persistent values across memset zero */
+ instance_index = node->instance_index;
+ max_wr_xfer_size = node->max_wr_xfer_size;
+ sparm_dma_buf = node->sparm_dma_buf;
+
+ ocs_memset(node, 0, sizeof(*node));
+ node->instance_index = instance_index;
+ node->max_wr_xfer_size = max_wr_xfer_size;
+ node->sparm_dma_buf = sparm_dma_buf;
+ node->rnode.indicator = UINT32_MAX;
+
+ node->sport = sport;
+ ocs_sport_lock(sport);
+
+ node->ocs = ocs;
+ node->init = init;
+ node->targ = targ;
+
+ rc = ocs_hw_node_alloc(&ocs->hw, &node->rnode, port_id, sport);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_node_alloc failed: %d\n", rc);
+ ocs_sport_unlock(sport);
+
+ /* Return back to pool. */
+ ocs_device_lock(ocs);
+ ocs_list_add_tail(&xport->nodes_free_list, node);
+ ocs_device_unlock(ocs);
+
+ return NULL;
+ }
+ ocs_list_add_tail(&sport->node_list, node);
+
+ ocs_node_lock_init(node);
+ ocs_lock_init(ocs, &node->pend_frames_lock, "pend_frames_lock[%d]", node->instance_index);
+ ocs_list_init(&node->pend_frames, ocs_hw_sequence_t, link);
+ ocs_lock_init(ocs, &node->active_ios_lock, "active_ios[%d]", node->instance_index);
+ ocs_list_init(&node->active_ios, ocs_io_t, link);
+ ocs_list_init(&node->els_io_pend_list, ocs_io_t, link);
+ ocs_list_init(&node->els_io_active_list, ocs_io_t, link);
+ ocs_scsi_io_alloc_enable(node);
+
+ /* zero the service parameters */
+ ocs_memset(node->sparm_dma_buf.virt, 0, node->sparm_dma_buf.size);
+
+ node->rnode.node = node;
+ node->sm.app = node;
+ node->evtdepth = 0;
+
+ ocs_node_update_display_name(node);
+
+ spv_set(sport->lookup, port_id, node);
+ ocs_sport_unlock(sport);
+ node->mgmt_functions = &node_mgmt_functions;
+
+ return node;
+}
+
+/**
+ * @ingroup node_alloc
+ * @brief free a node structure
+ *
+ * The node structure given by 'node' is free'd
+ *
+ * @param node the node to free
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_node_free(ocs_node_t *node)
+{
+ ocs_sport_t *sport;
+ ocs_t *ocs;
+ ocs_xport_t *xport;
+ ocs_hw_rtn_e rc = 0;
+ ocs_node_t *ns = NULL;
+ int post_all_free = FALSE;
+
+ ocs_assert(node, -1);
+ ocs_assert(node->sport, -1);
+ ocs_assert(node->ocs, -1);
+ sport = node->sport;
+ ocs_assert(sport, -1);
+ ocs = node->ocs;
+ ocs_assert(ocs->xport, -1);
+ xport = ocs->xport;
+
+ node_printf(node, "Free'd\n");
+
+ if(node->refound) {
+ /*
+ * Save the name server node. We will send fake RSCN event at
+ * the end to handle ignored RSCN event during node deletion
+ */
+ ns = ocs_node_find(node->sport, FC_ADDR_NAMESERVER);
+ }
+
+ /* Remove from node list */
+ ocs_sport_lock(sport);
+ ocs_list_remove(&sport->node_list, node);
+
+ /* Free HW resources */
+ if (OCS_HW_RTN_IS_ERROR((rc = ocs_hw_node_free_resources(&ocs->hw, &node->rnode)))) {
+ ocs_log_test(ocs, "ocs_hw_node_free failed: %d\n", rc);
+ rc = -1;
+ }
+
+ /* if the gidpt_delay_timer is still running, then delete it */
+ if (ocs_timer_pending(&node->gidpt_delay_timer)) {
+ ocs_del_timer(&node->gidpt_delay_timer);
+ }
+
+ if (node->fcp2device) {
+ ocs_del_crn(node);
+ }
+
+ /* remove entry from sparse vector list */
+ if (sport->lookup == NULL) {
+ ocs_log_test(node->ocs, "assertion failed: sport lookup is NULL\n");
+ ocs_sport_unlock(sport);
+ return -1;
+ }
+
+ spv_set(sport->lookup, node->rnode.fc_id, NULL);
+
+ /*
+ * If the node_list is empty, then post a ALL_CHILD_NODES_FREE event to the sport,
+ * after the lock is released. The sport may be free'd as a result of the event.
+ */
+ if (ocs_list_empty(&sport->node_list)) {
+ post_all_free = TRUE;
+ }
+
+ ocs_sport_unlock(sport);
+
+ if (post_all_free) {
+ ocs_sm_post_event(&sport->sm, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
+ }
+
+ node->sport = NULL;
+ node->sm.current_state = NULL;
+
+ ocs_node_lock_free(node);
+ ocs_lock_free(&node->pend_frames_lock);
+ ocs_lock_free(&node->active_ios_lock);
+
+ /* return to free list */
+ ocs_device_lock(ocs);
+ ocs_list_add_tail(&xport->nodes_free_list, node);
+ ocs_device_unlock(ocs);
+
+ if(ns != NULL) {
+ /* sending fake RSCN event to name server node */
+ ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, NULL);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief free memory resources of a node object
+ *
+ * The node object's child objects are freed after which the
+ * node object is freed.
+ *
+ * @param node pointer to a node object
+ *
+ * @return none
+ */
+
+void
+ocs_node_force_free(ocs_node_t *node)
+{
+ ocs_io_t *io;
+ ocs_io_t *next;
+ ocs_io_t *els;
+ ocs_io_t *els_next;
+
+ /* shutdown sm processing */
+ ocs_sm_disable(&node->sm);
+ ocs_strncpy(node->prev_state_name, node->current_state_name, sizeof(node->prev_state_name));
+ ocs_strncpy(node->current_state_name, "disabled", sizeof(node->current_state_name));
+
+ /* Let the backend cleanup if needed */
+ ocs_scsi_notify_node_force_free(node);
+
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach_safe(&node->active_ios, io, next) {
+ ocs_list_remove(&io->node->active_ios, io);
+ ocs_io_free(node->ocs, io);
+ }
+ ocs_unlock(&node->active_ios_lock);
+
+ /* free all pending ELS IOs */
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
+ /* can't call ocs_els_io_free() because lock is held; cleanup manually */
+ ocs_list_remove(&node->els_io_pend_list, els);
+
+ ocs_io_free(node->ocs, els);
+ }
+ ocs_unlock(&node->active_ios_lock);
+
+ /* free all active ELS IOs */
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
+ /* can't call ocs_els_io_free() because lock is held; cleanup manually */
+ ocs_list_remove(&node->els_io_active_list, els);
+
+ ocs_io_free(node->ocs, els);
+ }
+ ocs_unlock(&node->active_ios_lock);
+
+ /* manually purge pending frames (if any) */
+ ocs_node_purge_pending(node);
+
+ ocs_node_free(node);
+}
+
+/**
+ * @ingroup node_common
+ * @brief Perform HW call to attach a remote node
+ *
+ * @param node pointer to node object
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+int32_t
+ocs_node_attach(ocs_node_t *node)
+{
+ int32_t rc = 0;
+ ocs_sport_t *sport = node->sport;
+ ocs_domain_t *domain = sport->domain;
+ ocs_t *ocs = node->ocs;
+
+ if (!domain->attached) {
+ ocs_log_test(ocs, "Warning: ocs_node_attach with unattached domain\n");
+ return -1;
+ }
+ /* Update node->wwpn/wwnn */
+
+ ocs_node_build_eui_name(node->wwpn, sizeof(node->wwpn), ocs_node_get_wwpn(node));
+ ocs_node_build_eui_name(node->wwnn, sizeof(node->wwnn), ocs_node_get_wwnn(node));
+
+ if (ocs->enable_hlm) {
+ ocs_node_group_init(node);
+ }
+
+ ocs_dma_copy_in(&node->sparm_dma_buf, node->service_params+4, sizeof(node->service_params)-4);
+
+ /* take lock to protect node->rnode.attached */
+ ocs_node_lock(node);
+ rc = ocs_hw_node_attach(&ocs->hw, &node->rnode, &node->sparm_dma_buf);
+ if (OCS_HW_RTN_IS_ERROR(rc)) {
+ ocs_log_test(ocs, "ocs_hw_node_attach failed: %d\n", rc);
+ }
+ ocs_node_unlock(node);
+
+ return rc;
+}
+
+/**
+ * @ingroup node_common
+ * @brief Generate text for a node's fc_id
+ *
+ * The text for a nodes fc_id is generated, either as a well known name, or a 6 digit
+ * hex value.
+ *
+ * @param fc_id fc_id
+ * @param buffer text buffer
+ * @param buffer_length text buffer length in bytes
+ *
+ * @return none
+ */
+
+void
+ocs_node_fcid_display(uint32_t fc_id, char *buffer, uint32_t buffer_length)
+{
+ switch (fc_id) {
+ case FC_ADDR_FABRIC:
+ ocs_snprintf(buffer, buffer_length, "fabric");
+ break;
+ case FC_ADDR_CONTROLLER:
+ ocs_snprintf(buffer, buffer_length, "fabctl");
+ break;
+ case FC_ADDR_NAMESERVER:
+ ocs_snprintf(buffer, buffer_length, "nserve");
+ break;
+ default:
+ if (FC_ADDR_IS_DOMAIN_CTRL(fc_id)) {
+ ocs_snprintf(buffer, buffer_length, "dctl%02x",
+ FC_ADDR_GET_DOMAIN_CTRL(fc_id));
+ } else {
+ ocs_snprintf(buffer, buffer_length, "%06x", fc_id);
+ }
+ break;
+ }
+
+}
+
+/**
+ * @brief update the node's display name
+ *
+ * The node's display name is updated, sometimes needed because the sport part
+ * is updated after the node is allocated.
+ *
+ * @param node pointer to the node object
+ *
+ * @return none
+ */
+
+void
+ocs_node_update_display_name(ocs_node_t *node)
+{
+ uint32_t port_id = node->rnode.fc_id;
+ ocs_sport_t *sport = node->sport;
+ char portid_display[16];
+
+ ocs_assert(sport);
+
+ ocs_node_fcid_display(port_id, portid_display, sizeof(portid_display));
+
+ ocs_snprintf(node->display_name, sizeof(node->display_name), "%s.%s", sport->display_name, portid_display);
+}
+
+/**
+ * @brief cleans up an XRI for the pending link services accept by aborting the
+ * XRI if required.
+ *
+ * <h3 class="desc">Description</h3>
+ * This function is called when the LS accept is not sent.
+ *
+ * @param node Node for which should be cleaned up
+ */
+
+void
+ocs_node_send_ls_io_cleanup(ocs_node_t *node)
+{
+ ocs_t *ocs = node->ocs;
+
+ if (node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) {
+ ocs_assert(node->ls_acc_io);
+ ocs_log_debug(ocs, "[%s] cleaning up LS_ACC oxid=0x%x\n",
+ node->display_name, node->ls_acc_oxid);
+
+ node->ls_acc_io->hio = NULL;
+ ocs_els_io_free(node->ls_acc_io);
+ node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ }
+}
+
+/**
+ * @ingroup node_common
+ * @brief state: shutdown a node
+ *
+ * A node is shutdown,
+ *
+ * @param ctx remote node sm context
+ * @param evt event to process
+ * @param arg per event optional argument
+ *
+ * @return returns NULL
+ *
+ * @note
+ */
+
+void *
+__ocs_node_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ int32_t rc;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ ocs_node_hold_frames(node);
+ ocs_assert(ocs_node_active_ios_empty(node), NULL);
+ ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
+
+ /* by default, we will be freeing node after we unwind */
+ node->req_free = 1;
+
+ switch (node->shutdown_reason) {
+ case OCS_NODE_SHUTDOWN_IMPLICIT_LOGO:
+ /* sm: if shutdown reason is implicit logout / ocs_node_attach
+ * Node shutdown b/c of PLOGI received when node already
+ * logged in. We have PLOGI service parameters, so submit
+ * node attach; we won't be freeing this node
+ */
+
+ /* currently, only case for implicit logo is PLOGI recvd. Thus,
+ * node's ELS IO pending list won't be empty (PLOGI will be on it)
+ */
+ ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
+ node_printf(node, "Shutdown reason: implicit logout, re-authenticate\n");
+
+ ocs_scsi_io_alloc_enable(node);
+
+ /* Re-attach node with the same HW node resources */
+ node->req_free = 0;
+ rc = ocs_node_attach(node);
+ ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
+ if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
+ }
+ break;
+ case OCS_NODE_SHUTDOWN_EXPLICIT_LOGO: {
+ int8_t pend_frames_empty;
+
+ /* cleanup any pending LS_ACC ELSs */
+ ocs_node_send_ls_io_cleanup(node);
+ ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
+
+ ocs_lock(&node->pend_frames_lock);
+ pend_frames_empty = ocs_list_empty(&node->pend_frames);
+ ocs_unlock(&node->pend_frames_lock);
+
+ /* there are two scenarios where we want to keep this node alive:
+ * 1. there are pending frames that need to be processed or
+ * 2. we're an initiator and the remote node is a target and we
+ * need to re-authenticate
+ */
+ node_printf(node, "Shutdown: explicit logo pend=%d sport.ini=%d node.tgt=%d\n",
+ !pend_frames_empty, node->sport->enable_ini, node->targ);
+
+ if((!pend_frames_empty) || (node->sport->enable_ini && node->targ)) {
+ uint8_t send_plogi = FALSE;
+ if (node->sport->enable_ini && node->targ) {
+ /* we're an initiator and node shutting down is a target; we'll
+ * need to re-authenticate in initial state
+ */
+ send_plogi = TRUE;
+ }
+
+ /* transition to __ocs_d_init (will retain HW node resources) */
+ ocs_scsi_io_alloc_enable(node);
+ node->req_free = 0;
+
+ /* either pending frames exist, or we're re-authenticating with PLOGI
+ * (or both); in either case, return to initial state
+ */
+ ocs_node_init_device(node, send_plogi);
+
+ }
+ /* else: let node shutdown occur */
+ break;
+ }
+ case OCS_NODE_SHUTDOWN_DEFAULT:
+ default:
+ /* shutdown due to link down, node going away (xport event) or
+ * sport shutdown, purge pending and proceed to cleanup node
+ */
+
+ /* cleanup any pending LS_ACC ELSs */
+ ocs_node_send_ls_io_cleanup(node);
+ ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
+
+ node_printf(node, "Shutdown reason: default, purge pending\n");
+ ocs_node_purge_pending(node);
+ break;
+ }
+
+ break;
+ }
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ default:
+ __ocs_node_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup common_node
+ * @brief Checks to see if ELS's have been quiesced
+ *
+ * Check if ELS's have been quiesced. If so, transition to the
+ * next state in the shutdown process.
+ *
+ * @param node Node for which ELS's are checked
+ *
+ * @return Returns 1 if ELS's have been quiesced, 0 otherwise.
+ */
+static int
+ocs_node_check_els_quiesced(ocs_node_t *node)
+{
+ ocs_assert(node, -1);
+
+ /* check to see if ELS requests, completions are quiesced */
+ if ((node->els_req_cnt == 0) && (node->els_cmpl_cnt == 0) &&
+ ocs_els_io_list_empty(node, &node->els_io_active_list)) {
+ if (!node->attached) {
+ /* hw node detach already completed, proceed */
+ node_printf(node, "HW node not attached\n");
+ ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
+ } else {
+ /* hw node detach hasn't completed, transition and wait */
+ node_printf(node, "HW node still attached\n");
+ ocs_node_transition(node, __ocs_node_wait_node_free, NULL);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup common_node
+ * @brief Initiate node IO cleanup.
+ *
+ * Note: this function must be called with a non-attached node
+ * or a node for which the node detach (ocs_hw_node_detach())
+ * has already been initiated.
+ *
+ * @param node Node for which shutdown is initiated
+ *
+ * @return Returns None.
+ */
+
+void
+ocs_node_initiate_cleanup(ocs_node_t *node)
+{
+ ocs_io_t *els;
+ ocs_io_t *els_next;
+ ocs_t *ocs;
+ ocs_assert(node);
+ ocs = node->ocs;
+
+ /* first cleanup ELS's that are pending (not yet active) */
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
+
+ /* skip the ELS IO for which a response will be sent after shutdown */
+ if ((node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) &&
+ (els == node->ls_acc_io)) {
+ continue;
+ }
+ /* can't call ocs_els_io_free() because lock is held; cleanup manually */
+ node_printf(node, "Freeing pending els %s\n", els->display_name);
+ ocs_list_remove(&node->els_io_pend_list, els);
+
+ ocs_io_free(node->ocs, els);
+ }
+ ocs_unlock(&node->active_ios_lock);
+
+ if (node->ls_acc_io && node->ls_acc_io->hio != NULL) {
+ /*
+ * if there's an IO that will result in an LS_ACC after
+ * shutdown and its HW IO is non-NULL, it better be an
+ * implicit logout in vanilla sequence coalescing. In this
+ * case, force the LS_ACC to go out on another XRI (hio)
+ * since the previous will have been aborted by the UNREG_RPI
+ */
+ ocs_assert(node->shutdown_reason == OCS_NODE_SHUTDOWN_IMPLICIT_LOGO);
+ ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI);
+ node_printf(node, "invalidating ls_acc_io due to implicit logo\n");
+
+ /* No need to abort because the unreg_rpi takes care of it, just free */
+ ocs_hw_io_free(&ocs->hw, node->ls_acc_io->hio);
+
+ /* NULL out hio to force the LS_ACC to grab a new XRI */
+ node->ls_acc_io->hio = NULL;
+ }
+
+ /*
+ * if ELS's have already been quiesced, will move to next state
+ * if ELS's have not been quiesced, abort them
+ */
+ if (ocs_node_check_els_quiesced(node) == 0) {
+ /*
+ * Abort all ELS's since ELS's won't be aborted by HW
+ * node free.
+ */
+ ocs_node_abort_all_els(node);
+ ocs_node_transition(node, __ocs_node_wait_els_shutdown, NULL);
+ }
+}
+
+/**
+ * @ingroup node_common
+ * @brief Node state machine: Wait for all ELSs to complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * State waits for all ELSs to complete after aborting all
+ * outstanding .
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_node_wait_els_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ uint8_t check_quiesce = FALSE;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+
+ case OCS_EVT_ENTER: {
+ ocs_node_hold_frames(node);
+ if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
+ node_printf(node, "All ELS IOs complete\n");
+ check_quiesce = TRUE;
+ }
+ break;
+ }
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_ELS_REQ_ABORTED:
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ check_quiesce = TRUE;
+ break;
+
+ case OCS_EVT_SRRS_ELS_CMPL_OK:
+ case OCS_EVT_SRRS_ELS_CMPL_FAIL:
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ check_quiesce = TRUE;
+ break;
+
+ case OCS_EVT_ALL_CHILD_NODES_FREE:
+ /* all ELS IO's complete */
+ node_printf(node, "All ELS IOs complete\n");
+ ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
+ check_quiesce = TRUE;
+ break;
+
+ case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ break;
+
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case OCS_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ /* fall through */
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ break;
+
+ default:
+ __ocs_node_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ if (check_quiesce) {
+ ocs_node_check_els_quiesced(node);
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup node_command
+ * @brief Node state machine: Wait for a HW node free event to
+ * complete.
+ *
+ * <h3 class="desc">Description</h3>
+ * State waits for the node free event to be received from the HW.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_node_wait_node_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+ break;
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_NODE_FREE_OK:
+ /* node is officially no longer attached */
+ node->attached = FALSE;
+ ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
+ break;
+
+ case OCS_EVT_ALL_CHILD_NODES_FREE:
+ case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ /* As IOs and ELS IO's complete we expect to get these events */
+ break;
+
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case OCS_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ /* Fall through */
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", ocs_sm_event_name(evt));
+ break;
+ default:
+ __ocs_node_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup node_common
+ * @brief state: initiate node shutdown
+ *
+ * State is entered when a node receives a shutdown event, and it's waiting
+ * for all the active IOs and ELS IOs associated with the node to complete.
+ *
+ * @param ctx remote node sm context
+ * @param evt event to process
+ * @param arg per event optional argument
+ *
+ * @return returns NULL
+ */
+
+void *
+__ocs_node_wait_ios_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_io_t *io;
+ ocs_io_t *next;
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ ocs_node_hold_frames(node);
+
+ /* first check to see if no ELS IOs are outstanding */
+ if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
+ /* If there are any active IOS, Free them. */
+ if (!ocs_node_active_ios_empty(node)) {
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach_safe(&node->active_ios, io, next) {
+ ocs_list_remove(&io->node->active_ios, io);
+ ocs_io_free(node->ocs, io);
+ }
+ ocs_unlock(&node->active_ios_lock);
+ }
+ ocs_node_transition(node, __ocs_node_shutdown, NULL);
+ }
+ break;
+
+ case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case OCS_EVT_ALL_CHILD_NODES_FREE: {
+ if (ocs_node_active_ios_empty(node) &&
+ ocs_els_io_list_empty(node, &node->els_io_active_list)) {
+ ocs_node_transition(node, __ocs_node_shutdown, NULL);
+ }
+ break;
+ }
+
+ case OCS_EVT_EXIT:
+ ocs_node_accept_frames(node);
+ break;
+
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case OCS_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
+ /* fall through */
+ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ ocs_log_debug(ocs, "[%s] %-20s\n", node->display_name, ocs_sm_event_name(evt));
+ break;
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __ocs_node_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup node_common
+ * @brief state: common node event handler
+ *
+ * Handle common/shared node events
+ *
+ * @param funcname calling function's name
+ * @param ctx remote node sm context
+ * @param evt event to process
+ * @param arg per event optional argument
+ *
+ * @return returns NULL
+ */
+
+void *
+__ocs_node_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ ocs_node_t *node = NULL;
+ ocs_t *ocs = NULL;
+ ocs_node_cb_t *cbdata = arg;
+ ocs_assert(ctx, NULL);
+ ocs_assert(ctx->app, NULL);
+ node = ctx->app;
+ ocs_assert(node->ocs, NULL);
+ ocs = node->ocs;
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ case OCS_EVT_REENTER:
+ case OCS_EVT_EXIT:
+ case OCS_EVT_SPORT_TOPOLOGY_NOTIFY:
+ case OCS_EVT_NODE_MISSING:
+ case OCS_EVT_FCP_CMD_RCVD:
+ break;
+
+ case OCS_EVT_NODE_REFOUND:
+ node->refound = 1;
+ break;
+
+ /* node->attached must be set appropriately for all node attach/detach events */
+ case OCS_EVT_NODE_ATTACH_OK:
+ node->attached = TRUE;
+ break;
+
+ case OCS_EVT_NODE_FREE_OK:
+ case OCS_EVT_NODE_ATTACH_FAIL:
+ node->attached = FALSE;
+ break;
+
+ /* handle any ELS completions that other states either didn't care about
+ * or forgot about
+ */
+ case OCS_EVT_SRRS_ELS_CMPL_OK:
+ case OCS_EVT_SRRS_ELS_CMPL_FAIL:
+ ocs_assert(node->els_cmpl_cnt, NULL);
+ node->els_cmpl_cnt--;
+ break;
+
+ /* handle any ELS request completions that other states either didn't care about
+ * or forgot about
+ */
+ case OCS_EVT_SRRS_ELS_REQ_OK:
+ case OCS_EVT_SRRS_ELS_REQ_FAIL:
+ case OCS_EVT_SRRS_ELS_REQ_RJT:
+ case OCS_EVT_ELS_REQ_ABORTED:
+ ocs_assert(node->els_req_cnt, NULL);
+ node->els_req_cnt--;
+ break;
+
+ case OCS_EVT_ELS_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+
+ /* Unsupported ELS was received, send LS_RJT, command not supported */
+ ocs_log_debug(ocs, "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
+ node->display_name, funcname, ((uint8_t*)cbdata->payload->dma.virt)[0]);
+ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
+ FC_REASON_COMMAND_NOT_SUPPORTED, FC_EXPL_NO_ADDITIONAL, 0,
+ NULL, NULL);
+ break;
+ }
+
+ case OCS_EVT_PLOGI_RCVD:
+ case OCS_EVT_FLOGI_RCVD:
+ case OCS_EVT_LOGO_RCVD:
+ case OCS_EVT_PRLI_RCVD:
+ case OCS_EVT_PRLO_RCVD:
+ case OCS_EVT_PDISC_RCVD:
+ case OCS_EVT_FDISC_RCVD:
+ case OCS_EVT_ADISC_RCVD:
+ case OCS_EVT_RSCN_RCVD:
+ case OCS_EVT_SCR_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ /* sm: / send ELS_RJT */
+ ocs_log_debug(ocs, "[%s] (%s) %s sending ELS_RJT\n",
+ node->display_name, funcname, ocs_sm_event_name(evt));
+ /* if we didn't catch this in a state, send generic LS_RJT */
+ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
+ FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NO_ADDITIONAL, 0,
+ NULL, NULL);
+
+ break;
+ }
+ case OCS_EVT_GID_PT_RCVD:
+ case OCS_EVT_RFT_ID_RCVD:
+ case OCS_EVT_RFF_ID_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ ocs_log_debug(ocs, "[%s] (%s) %s sending CT_REJECT\n",
+ node->display_name, funcname, ocs_sm_event_name(evt));
+ ocs_send_ct_rsp(cbdata->io, hdr->ox_id, cbdata->payload->dma.virt, FCCT_HDR_CMDRSP_REJECT, FCCT_COMMAND_NOT_SUPPORTED, 0);
+ break;
+ }
+
+ case OCS_EVT_ABTS_RCVD: {
+ fc_header_t *hdr = cbdata->header->dma.virt;
+ ocs_log_debug(ocs, "[%s] (%s) %s sending BA_ACC\n",
+ node->display_name, funcname, ocs_sm_event_name(evt));
+
+ /* sm: send BA_ACC */
+ ocs_bls_send_acc_hdr(cbdata->io, hdr);
+ break;
+ }
+
+ default:
+ ocs_log_test(node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname,
+ ocs_sm_event_name(evt));
+ break;
+ }
+ return NULL;
+}
+
+
+/**
+ * @ingroup node_common
+ * @brief save node service parameters
+ *
+ * Service parameters are copyed into the node structure
+ *
+ * @param node pointer to node structure
+ * @param payload pointer to service parameters to save
+ *
+ * @return none
+ */
+
+void
+ocs_node_save_sparms(ocs_node_t *node, void *payload)
+{
+ ocs_memcpy(node->service_params, payload, sizeof(node->service_params));
+}
+
+/**
+ * @ingroup node_common
+ * @brief Post event to node state machine context
+ *
+ * This is used by the node state machine code to post events to the nodes. Upon
+ * completion of the event posting, if the nesting depth is zero and we're not holding
+ * inbound frames, then the pending frames are processed.
+ *
+ * @param node pointer to node
+ * @param evt event to post
+ * @param arg event posting argument
+ *
+ * @return none
+ */
+
+void
+ocs_node_post_event(ocs_node_t *node, ocs_sm_event_t evt, void *arg)
+{
+ int free_node = FALSE;
+ ocs_assert(node);
+
+ ocs_node_lock(node);
+ node->evtdepth ++;
+
+ ocs_sm_post_event(&node->sm, evt, arg);
+
+ /* If our event call depth is one and we're not holding frames
+ * then we can dispatch any pending frames. We don't want to allow
+ * the ocs_process_node_pending() call to recurse.
+ */
+ if (!node->hold_frames && (node->evtdepth == 1)) {
+ ocs_process_node_pending(node);
+ }
+ node->evtdepth --;
+
+ /* Free the node object if so requested, and we're at an event
+ * call depth of zero
+ */
+ if ((node->evtdepth == 0) && node->req_free) {
+ free_node = TRUE;
+ }
+ ocs_node_unlock(node);
+
+ if (free_node) {
+ ocs_node_free(node);
+ }
+
+ return;
+}
+
+/**
+ * @ingroup node_common
+ * @brief transition state of a node
+ *
+ * The node's state is transitioned to the requested state. Entry/Exit
+ * events are posted as needed.
+ *
+ * @param node pointer to node
+ * @param state state to transition to
+ * @param data transition data
+ *
+ * @return none
+ */
+
+void
+ocs_node_transition(ocs_node_t *node, ocs_sm_function_t state, void *data)
+{
+ ocs_sm_ctx_t *ctx = &node->sm;
+
+ ocs_node_lock(node);
+ if (ctx->current_state == state) {
+ ocs_node_post_event(node, OCS_EVT_REENTER, data);
+ } else {
+ ocs_node_post_event(node, OCS_EVT_EXIT, data);
+ ctx->current_state = state;
+ ocs_node_post_event(node, OCS_EVT_ENTER, data);
+ }
+ ocs_node_unlock(node);
+}
+
+/**
+ * @ingroup node_common
+ * @brief build EUI formatted WWN
+ *
+ * Build a WWN given the somewhat transport agnostic iScsi naming specification, for FC
+ * use the eui. format, an ascii string such as: "eui.10000000C9A19501"
+ *
+ * @param buffer buffer to place formatted name into
+ * @param buffer_len length in bytes of the buffer
+ * @param eui_name cpu endian 64 bit WWN value
+ *
+ * @return none
+ */
+
+void
+ocs_node_build_eui_name(char *buffer, uint32_t buffer_len, uint64_t eui_name)
+{
+ ocs_memset(buffer, 0, buffer_len);
+
+ ocs_snprintf(buffer, buffer_len, "eui.%016llx", (unsigned long long)eui_name);
+}
+
+/**
+ * @ingroup node_common
+ * @brief return nodes' WWPN as a uint64_t
+ *
+ * The WWPN is computed from service parameters and returned as a uint64_t
+ *
+ * @param node pointer to node structure
+ *
+ * @return WWPN
+ *
+ */
+
+uint64_t
+ocs_node_get_wwpn(ocs_node_t *node)
+{
+ fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
+
+ return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
+}
+
+/**
+ * @ingroup node_common
+ * @brief return nodes' WWNN as a uint64_t
+ *
+ * The WWNN is computed from service parameters and returned as a uint64_t
+ *
+ * @param node pointer to node structure
+ *
+ * @return WWNN
+ *
+ */
+
+uint64_t
+ocs_node_get_wwnn(ocs_node_t *node)
+{
+ fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
+
+ return (((uint64_t)ocs_be32toh(sp->node_name_hi) << 32ll) | (ocs_be32toh(sp->node_name_lo)));
+}
+
+/**
+ * @brief Generate node ddump data
+ *
+ * Generates the node ddumpdata
+ *
+ * @param textbuf pointer to text buffer
+ * @param node pointer to node context
+ *
+ * @return Returns 0 on success, or a negative value on failure.
+ */
+
+int
+ocs_ddump_node(ocs_textbuf_t *textbuf, ocs_node_t *node)
+{
+ ocs_io_t *io;
+ ocs_io_t *els;
+ int retval = 0;
+
+ ocs_ddump_section(textbuf, "node", node->instance_index);
+ ocs_ddump_value(textbuf, "display_name", "%s", node->display_name);
+ ocs_ddump_value(textbuf, "current_state", "%s", node->current_state_name);
+ ocs_ddump_value(textbuf, "prev_state", "%s", node->prev_state_name);
+ ocs_ddump_value(textbuf, "current_evt", "%s", ocs_sm_event_name(node->current_evt));
+ ocs_ddump_value(textbuf, "prev_evt", "%s", ocs_sm_event_name(node->prev_evt));
+
+ ocs_ddump_value(textbuf, "indicator", "%#x", node->rnode.indicator);
+ ocs_ddump_value(textbuf, "fc_id", "%#06x", node->rnode.fc_id);
+ ocs_ddump_value(textbuf, "attached", "%d", node->rnode.attached);
+
+ ocs_ddump_value(textbuf, "hold_frames", "%d", node->hold_frames);
+ ocs_ddump_value(textbuf, "io_alloc_enabled", "%d", node->io_alloc_enabled);
+ ocs_ddump_value(textbuf, "shutdown_reason", "%d", node->shutdown_reason);
+ ocs_ddump_value(textbuf, "send_ls_acc", "%d", node->send_ls_acc);
+ ocs_ddump_value(textbuf, "ls_acc_did", "%d", node->ls_acc_did);
+ ocs_ddump_value(textbuf, "ls_acc_oxid", "%#04x", node->ls_acc_oxid);
+ ocs_ddump_value(textbuf, "req_free", "%d", node->req_free);
+ ocs_ddump_value(textbuf, "els_req_cnt", "%d", node->els_req_cnt);
+ ocs_ddump_value(textbuf, "els_cmpl_cnt", "%d", node->els_cmpl_cnt);
+
+ ocs_ddump_value(textbuf, "targ", "%d", node->targ);
+ ocs_ddump_value(textbuf, "init", "%d", node->init);
+ ocs_ddump_value(textbuf, "wwnn", "%s", node->wwnn);
+ ocs_ddump_value(textbuf, "wwpn", "%s", node->wwpn);
+ ocs_ddump_value(textbuf, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
+ ocs_ddump_value(textbuf, "chained_io_count", "%d", node->chained_io_count);
+ ocs_ddump_value(textbuf, "abort_cnt", "%d", node->abort_cnt);
+
+ ocs_display_sparams(NULL, "node_sparams", 1, textbuf, node->service_params+4);
+
+ ocs_lock(&node->pend_frames_lock);
+ if (!ocs_list_empty(&node->pend_frames)) {
+ ocs_hw_sequence_t *frame;
+ ocs_ddump_section(textbuf, "pending_frames", 0);
+ ocs_list_foreach(&node->pend_frames, frame) {
+ fc_header_t *hdr;
+ char buf[128];
+
+ hdr = frame->header->dma.virt;
+ ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu",
+ hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
+ frame->payload->dma.len);
+ ocs_ddump_value(textbuf, "frame", "%s", buf);
+ }
+ ocs_ddump_endsection(textbuf, "pending_frames", 0);
+ }
+ ocs_unlock(&node->pend_frames_lock);
+
+ ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
+ ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
+
+ ocs_lock(&node->active_ios_lock);
+ ocs_ddump_section(textbuf, "active_ios", 0);
+ ocs_list_foreach(&node->active_ios, io) {
+ ocs_ddump_io(textbuf, io);
+ }
+ ocs_ddump_endsection(textbuf, "active_ios", 0);
+
+ ocs_ddump_section(textbuf, "els_io_pend_list", 0);
+ ocs_list_foreach(&node->els_io_pend_list, els) {
+ ocs_ddump_els(textbuf, els);
+ }
+ ocs_ddump_endsection(textbuf, "els_io_pend_list", 0);
+
+ ocs_ddump_section(textbuf, "els_io_active_list", 0);
+ ocs_list_foreach(&node->els_io_active_list, els) {
+ ocs_ddump_els(textbuf, els);
+ }
+ ocs_ddump_endsection(textbuf, "els_io_active_list", 0);
+ ocs_unlock(&node->active_ios_lock);
+
+ ocs_ddump_endsection(textbuf, "node", node->instance_index);
+
+ return retval;
+}
+
+/**
+ * @brief check ELS request completion
+ *
+ * Check ELS request completion event to make sure it's for the
+ * ELS request we expect. If not, invoke given common event
+ * handler and return an error.
+ *
+ * @param ctx state machine context
+ * @param evt ELS request event
+ * @param arg event argument
+ * @param cmd ELS command expected
+ * @param node_common_func common event handler to call if ELS
+ * doesn't match
+ * @param funcname function name that called this
+ *
+ * @return zero if ELS command matches, -1 otherwise
+ */
+int32_t
+node_check_els_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint8_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
+{
+ ocs_node_t *node = NULL;
+ ocs_t *ocs = NULL;
+ ocs_node_cb_t *cbdata = arg;
+ fc_els_gen_t *els_gen = NULL;
+ ocs_assert(ctx, -1);
+ node = ctx->app;
+ ocs_assert(node, -1);
+ ocs = node->ocs;
+ ocs_assert(ocs, -1);
+ cbdata = arg;
+ ocs_assert(cbdata, -1);
+ ocs_assert(cbdata->els, -1);
+ els_gen = (fc_els_gen_t *)cbdata->els->els_req.virt;
+ ocs_assert(els_gen, -1);
+
+ if ((cbdata->els->hio_type != OCS_HW_ELS_REQ) || (els_gen->command_code != cmd)) {
+ if (cbdata->els->hio_type != OCS_HW_ELS_REQ) {
+ ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received type=%d\n",
+ node->display_name, funcname, cmd, cbdata->els->hio_type);
+ } else {
+ ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received cmd=x%x\n",
+ node->display_name, funcname, cmd, els_gen->command_code);
+ }
+ /* send event to common handler */
+ node_common_func(funcname, ctx, evt, arg);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * @brief check NS request completion
+ *
+ * Check ELS request completion event to make sure it's for the
+ * nameserver request we expect. If not, invoke given common
+ * event handler and return an error.
+ *
+ * @param ctx state machine context
+ * @param evt ELS request event
+ * @param arg event argument
+ * @param cmd nameserver command expected
+ * @param node_common_func common event handler to call if
+ * nameserver cmd doesn't match
+ * @param funcname function name that called this
+ *
+ * @return zero if NS command matches, -1 otherwise
+ */
+int32_t
+node_check_ns_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint32_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
+{
+ ocs_node_t *node = NULL;
+ ocs_t *ocs = NULL;
+ ocs_node_cb_t *cbdata = arg;
+ fcct_iu_header_t *fcct = NULL;
+ ocs_assert(ctx, -1);
+ node = ctx->app;
+ ocs_assert(node, -1);
+ ocs = node->ocs;
+ ocs_assert(ocs, -1);
+ cbdata = arg;
+ ocs_assert(cbdata, -1);
+ ocs_assert(cbdata->els, -1);
+ fcct = (fcct_iu_header_t *)cbdata->els->els_req.virt;
+ ocs_assert(fcct, -1);
+
+ if ((cbdata->els->hio_type != OCS_HW_FC_CT) || fcct->cmd_rsp_code != ocs_htobe16(cmd)) {
+ if (cbdata->els->hio_type != OCS_HW_FC_CT) {
+ ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received type=%d\n",
+ node->display_name, funcname, cmd, cbdata->els->hio_type);
+ } else {
+ ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received cmd=x%x\n",
+ node->display_name, funcname, cmd, fcct->cmd_rsp_code);
+ }
+ /* send event to common handler */
+ node_common_func(funcname, ctx, evt, arg);
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *object)
+{
+ ocs_io_t *io;
+ ocs_node_t *node = (ocs_node_t *)object;
+
+ ocs_mgmt_start_section(textbuf, "node", node->instance_index);
+
+ /* Readonly values */
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "attached");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "hold_frames");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "shutting_down");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "req_free");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id_in_use");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "abort_cnt");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "targ");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "pend_frames");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "chained_io_count");
+
+ /* Actions */
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
+
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach(&node->active_ios, io) {
+ if ((io->mgmt_functions) && (io->mgmt_functions->get_list_handler)) {
+ io->mgmt_functions->get_list_handler(textbuf, io);
+ }
+ }
+ ocs_unlock(&node->active_ios_lock);
+
+ ocs_mgmt_end_section(textbuf, "node", node->instance_index);
+}
+
+int
+ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
+{
+ ocs_io_t *io;
+ ocs_node_t *node = (ocs_node_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ ocs_mgmt_start_section(textbuf, "node", node->instance_index);
+
+ ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = name + strlen(qualifier) +1;
+
+ /* See if it's a value I can supply */
+ if (ocs_strcmp(unqualified_name, "display_name") == 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "indicator") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "attached") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "hold_frames") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "io_alloc_enabled") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "req_free") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "ls_acc_oxid") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "ls_acc_did") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "abort_cnt") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "targ") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ", node->targ);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "init") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init", node->init);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "current_state") == 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "login_state") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "pend_frames") == 0) {
+ ocs_hw_sequence_t *frame;
+ ocs_lock(&node->pend_frames_lock);
+ ocs_list_foreach(&node->pend_frames, frame) {
+ fc_header_t *hdr;
+ char buf[128];
+
+ hdr = frame->header->dma.virt;
+ ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
+ ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
+ frame->payload->dma.len);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
+ }
+ ocs_unlock(&node->pend_frames_lock);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "chained_io_count") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
+ retval = 0;
+ } else {
+ /* If I didn't know the value of this status pass the request to each of my children */
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach(&node->active_ios, io) {
+ if ((io->mgmt_functions) && (io->mgmt_functions->get_handler)) {
+ retval = io->mgmt_functions->get_handler(textbuf, qualifier, name, io);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+ }
+ ocs_unlock(&node->active_ios_lock);
+ }
+ }
+
+ ocs_mgmt_end_section(textbuf, "node", node->instance_index);
+
+ return retval;
+}
+
+void
+ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *object)
+{
+ ocs_io_t *io;
+ ocs_node_t *node = (ocs_node_t *)object;
+ ocs_hw_sequence_t *frame;
+
+ ocs_mgmt_start_section(textbuf, "node", node->instance_index);
+
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ", node->targ);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init", node->init);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
+
+ ocs_lock(&node->pend_frames_lock);
+ ocs_list_foreach(&node->pend_frames, frame) {
+ fc_header_t *hdr;
+ char buf[128];
+
+ hdr = frame->header->dma.virt;
+ ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
+ ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
+ frame->payload->dma.len);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
+ }
+ ocs_unlock(&node->pend_frames_lock);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
+
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach(&node->active_ios, io) {
+ if ((io->mgmt_functions) && (io->mgmt_functions->get_all_handler)) {
+ io->mgmt_functions->get_all_handler(textbuf,io);
+ }
+ }
+ ocs_unlock(&node->active_ios_lock);
+
+ ocs_mgmt_end_section(textbuf, "node", node->instance_index);
+}
+
+int
+ocs_mgmt_node_set(char *parent, char *name, char *value, void *object)
+{
+ ocs_io_t *io;
+ ocs_node_t *node = (ocs_node_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach(&node->active_ios, io) {
+ if ((io->mgmt_functions) && (io->mgmt_functions->set_handler)) {
+ retval = io->mgmt_functions->set_handler(qualifier, name, value, io);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+
+ }
+ ocs_unlock(&node->active_ios_lock);
+
+ }
+
+ return retval;
+}
+
+int
+ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length, void *object)
+{
+ ocs_io_t *io;
+ ocs_node_t *node = (ocs_node_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ ocs_snprintf(qualifier, sizeof(qualifier), "%s.node%d", parent, node->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = action + strlen(qualifier) +1;
+
+ if (ocs_strcmp(unqualified_name, "resume") == 0) {
+ ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
+ }
+
+ {
+ /* If I didn't know how to do this action pass the request to each of my children */
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_foreach(&node->active_ios, io) {
+ if ((io->mgmt_functions) && (io->mgmt_functions->exec_handler)) {
+ retval = io->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
+ arg_out, arg_out_length, io);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+
+ }
+ ocs_unlock(&node->active_ios_lock);
+ }
+ }
+
+ return retval;
+}
+
+
+
+/**
+ * @brief Return TRUE if active ios list is empty
+ *
+ * Test if node->active_ios list is empty while holding the node->active_ios_lock.
+ *
+ * @param node pointer to node object
+ *
+ * @return TRUE if node active ios list is empty
+ */
+
+int
+ocs_node_active_ios_empty(ocs_node_t *node)
+{
+ int empty;
+
+ ocs_lock(&node->active_ios_lock);
+ empty = ocs_list_empty(&node->active_ios);
+ ocs_unlock(&node->active_ios_lock);
+ return empty;
+}
+
+/**
+ * @brief Pause a node
+ *
+ * The node is placed in the __ocs_node_paused state after saving the state
+ * to return to
+ *
+ * @param node Pointer to node object
+ * @param state State to resume to
+ *
+ * @return none
+ */
+
+void
+ocs_node_pause(ocs_node_t *node, ocs_sm_function_t state)
+{
+ node->nodedb_state = state;
+ ocs_node_transition(node, __ocs_node_paused, NULL);
+}
+
+/**
+ * @brief Paused node state
+ *
+ * This state is entered when a state is "paused". When resumed, the node
+ * is transitioned to a previously saved state (node->ndoedb_state)
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return returns NULL
+ */
+
+void *
+__ocs_node_paused(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_node_state_decl();
+
+ node_sm_trace();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ node_printf(node, "Paused\n");
+ break;
+
+ case OCS_EVT_RESUME: {
+ ocs_sm_function_t pf = node->nodedb_state;
+
+ node->nodedb_state = NULL;
+ ocs_node_transition(node, pf, NULL);
+ break;
+ }
+
+ case OCS_EVT_DOMAIN_ATTACH_OK:
+ break;
+
+ case OCS_EVT_SHUTDOWN:
+ node->req_free = 1;
+ break;
+
+ default:
+ __ocs_node_common(__func__, ctx, evt, arg);
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * @brief Resume a paused state
+ *
+ * Posts a resume event to the paused node.
+ *
+ * @param node Pointer to node object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_node_resume(ocs_node_t *node)
+{
+ ocs_assert(node != NULL, -1);
+
+ ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
+
+ return 0;
+}
+
+/**
+ * @ingroup node_common
+ * @brief Dispatch a ELS frame.
+ *
+ * <h3 class="desc">Description</h3>
+ * An ELS frame is dispatched to the \c node state machine.
+ * RQ Pair mode: this function is always called with a NULL hw
+ * io.
+ *
+ * @param node Node that originated the frame.
+ * @param seq header/payload sequence buffers
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled and RX buffers need
+ * to be returned.
+ */
+
+int32_t
+ocs_node_recv_els_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ struct {
+ uint32_t cmd;
+ ocs_sm_event_t evt;
+ uint32_t payload_size;
+ } els_cmd_list[] = {
+ {FC_ELS_CMD_PLOGI, OCS_EVT_PLOGI_RCVD, sizeof(fc_plogi_payload_t)},
+ {FC_ELS_CMD_FLOGI, OCS_EVT_FLOGI_RCVD, sizeof(fc_plogi_payload_t)},
+ {FC_ELS_CMD_LOGO, OCS_EVT_LOGO_RCVD, sizeof(fc_acc_payload_t)},
+ {FC_ELS_CMD_RRQ, OCS_EVT_RRQ_RCVD, sizeof(fc_acc_payload_t)},
+ {FC_ELS_CMD_PRLI, OCS_EVT_PRLI_RCVD, sizeof(fc_prli_payload_t)},
+ {FC_ELS_CMD_PRLO, OCS_EVT_PRLO_RCVD, sizeof(fc_prlo_payload_t)},
+ {FC_ELS_CMD_PDISC, OCS_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {FC_ELS_CMD_FDISC, OCS_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {FC_ELS_CMD_ADISC, OCS_EVT_ADISC_RCVD, sizeof(fc_adisc_payload_t)},
+ {FC_ELS_CMD_RSCN, OCS_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {FC_ELS_CMD_SCR , OCS_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ };
+ ocs_t *ocs = node->ocs;
+ ocs_node_cb_t cbdata;
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint8_t *buf = seq->payload->dma.virt;
+ ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
+ uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
+ uint32_t i;
+
+ ocs_memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ /* find a matching event for the ELS command */
+ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i ++) {
+ if (els_cmd_list[i].cmd == buf[0]) {
+ evt = els_cmd_list[i].evt;
+ payload_size = els_cmd_list[i].payload_size;
+ break;
+ }
+ }
+
+ switch(evt) {
+ case OCS_EVT_FLOGI_RCVD:
+ ocs_display_sparams(node->display_name, "flogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
+ break;
+ case OCS_EVT_FDISC_RCVD:
+ ocs_display_sparams(node->display_name, "fdisc rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
+ break;
+ case OCS_EVT_PLOGI_RCVD:
+ ocs_display_sparams(node->display_name, "plogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
+ break;
+ default:
+ break;
+ }
+
+ cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
+
+ if (cbdata.io != NULL) {
+ cbdata.io->hw_priv = seq->hw_priv;
+ /* if we're here, sequence initiative has been transferred */
+ cbdata.io->seq_init = 1;
+
+ ocs_node_post_event(node, evt, &cbdata);
+ } else {
+ node_printf(node, "failure to allocate SCSI IO for ELS s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
+ fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
+ }
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+}
+
+/**
+ * @ingroup node_common
+ * @brief Dispatch a ABTS frame (RQ Pair/sequence coalescing).
+ *
+ * <h3 class="desc">Description</h3>
+ * An ABTS frame is dispatched to the node state machine. This
+ * function is used for both RQ Pair and sequence coalescing.
+ *
+ * @param node Node that originated the frame.
+ * @param seq Header/payload sequence buffers
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled and RX buffers need
+ * to be returned.
+ */
+
+int32_t
+ocs_node_recv_abts_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ ocs_t *ocs = node->ocs;
+ ocs_xport_t *xport = ocs->xport;
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint16_t ox_id = ocs_be16toh(hdr->ox_id);
+ uint16_t rx_id = ocs_be16toh(hdr->rx_id);
+ ocs_node_cb_t cbdata;
+ int32_t rc = 0;
+
+ node->abort_cnt++;
+
+ /*
+ * Check to see if the IO we want to abort is active, if it not active,
+ * then we can send the BA_ACC using the send frame option
+ */
+ if (ocs_io_find_tgt_io(ocs, node, ox_id, rx_id) == NULL) {
+ uint32_t send_frame_capable;
+
+ ocs_log_debug(ocs, "IO not found (ox_id %04x)\n", ox_id);
+
+ /* If we have SEND_FRAME capability, then use it to send BA_ACC */
+ rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
+ if ((rc == 0) && send_frame_capable) {
+ rc = ocs_sframe_send_bls_acc(node, seq);
+ if (rc) {
+ ocs_log_test(ocs, "ocs_bls_acc_send_frame failed\n");
+ }
+ return rc;
+ }
+ /* continuing */
+ }
+
+ ocs_memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ cbdata.io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
+ if (cbdata.io != NULL) {
+ cbdata.io->hw_priv = seq->hw_priv;
+ /* If we got this far, SIT=1 */
+ cbdata.io->seq_init = 1;
+
+ /* fill out generic fields */
+ cbdata.io->ocs = ocs;
+ cbdata.io->node = node;
+ cbdata.io->cmd_tgt = TRUE;
+
+ ocs_node_post_event(node, OCS_EVT_ABTS_RCVD, &cbdata);
+ } else {
+ ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
+ node_printf(node, "SCSI IO allocation failed for ABTS received s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
+ fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
+ }
+
+ /* ABTS processed, return RX buffer to the chip */
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+}
+
+/**
+ * @ingroup node_common
+ * @brief Dispatch a CT frame.
+ *
+ * <h3 class="desc">Description</h3>
+ * A CT frame is dispatched to the \c node state machine.
+ * RQ Pair mode: this function is always called with a NULL hw
+ * io.
+ *
+ * @param node Node that originated the frame.
+ * @param seq header/payload sequence buffers
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled and RX buffers need
+ * to be returned.
+ */
+
+int32_t
+ocs_node_recv_ct_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ ocs_t *ocs = node->ocs;
+ fc_header_t *hdr = seq->header->dma.virt;
+ fcct_iu_header_t *iu = seq->payload->dma.virt;
+ ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
+ uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
+ uint16_t gscmd = ocs_be16toh(iu->cmd_rsp_code);
+ ocs_node_cb_t cbdata;
+ uint32_t i;
+ struct {
+ uint32_t cmd;
+ ocs_sm_event_t evt;
+ uint32_t payload_size;
+ } ct_cmd_list[] = {
+ {FC_GS_NAMESERVER_RFF_ID, OCS_EVT_RFF_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_RFT_ID, OCS_EVT_RFT_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_GNN_ID, OCS_EVT_GNN_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_GPN_ID, OCS_EVT_GPN_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_GFPN_ID, OCS_EVT_GFPN_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_GFF_ID, OCS_EVT_GFF_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_GID_FT, OCS_EVT_GID_FT_RCVD, 256},
+ {FC_GS_NAMESERVER_GID_PT, OCS_EVT_GID_PT_RCVD, 256},
+ {FC_GS_NAMESERVER_RPN_ID, OCS_EVT_RPN_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_RNN_ID, OCS_EVT_RNN_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_RCS_ID, OCS_EVT_RCS_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_RSNN_NN, OCS_EVT_RSNN_NN_RCVD, 100},
+ {FC_GS_NAMESERVER_RSPN_ID, OCS_EVT_RSPN_ID_RCVD, 100},
+ {FC_GS_NAMESERVER_RHBA, OCS_EVT_RHBA_RCVD, 100},
+ {FC_GS_NAMESERVER_RPA, OCS_EVT_RPA_RCVD, 100},
+ };
+
+ ocs_memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ /* find a matching event for the ELS/GS command */
+ for (i = 0; i < ARRAY_SIZE(ct_cmd_list); i ++) {
+ if (ct_cmd_list[i].cmd == gscmd) {
+ evt = ct_cmd_list[i].evt;
+ payload_size = ct_cmd_list[i].payload_size;
+ break;
+ }
+ }
+
+ /* Allocate an IO and send a reject */
+ cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
+ if (cbdata.io == NULL) {
+ node_printf(node, "GS IO failed for s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
+ fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id),
+ ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
+ return -1;
+ }
+ cbdata.io->hw_priv = seq->hw_priv;
+ ocs_node_post_event(node, evt, &cbdata);
+
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+}
+
+/**
+ * @ingroup node_common
+ * @brief Dispatch a FCP command frame when the node is not ready.
+ *
+ * <h3 class="desc">Description</h3>
+ * A frame is dispatched to the \c node state machine.
+ *
+ * @param node Node that originated the frame.
+ * @param seq header/payload sequence buffers
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled.
+ */
+
+int32_t
+ocs_node_recv_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ ocs_node_cb_t cbdata;
+ ocs_t *ocs = node->ocs;
+
+ ocs_memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+ ocs_node_post_event(node, OCS_EVT_FCP_CMD_RCVD, &cbdata);
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+}
+
+/**
+ * @ingroup node_common
+ * @brief Stub handler for non-ABTS BLS frames
+ *
+ * <h3 class="desc">Description</h3>
+ * Log message and drop. Customer can plumb it to their back-end as needed
+ *
+ * @param node Node that originated the frame.
+ * @param seq header/payload sequence buffers
+ *
+ * @return Returns 0
+ */
+
+int32_t
+ocs_node_recv_bls_no_sit(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ fc_header_t *hdr = seq->header->dma.virt;
+
+ node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
+ ocs_htobe32(((uint32_t *)hdr)[0]),
+ ocs_htobe32(((uint32_t *)hdr)[1]),
+ ocs_htobe32(((uint32_t *)hdr)[2]),
+ ocs_htobe32(((uint32_t *)hdr)[3]),
+ ocs_htobe32(((uint32_t *)hdr)[4]),
+ ocs_htobe32(((uint32_t *)hdr)[5]));
+
+ return -1;
+}
diff --git a/sys/dev/ocs_fc/ocs_node.h b/sys/dev/ocs_fc/ocs_node.h
new file mode 100644
index 000000000000..d173c763bfd3
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_node.h
@@ -0,0 +1,240 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS linux driver remote node callback declarations
+ */
+
+#if !defined(__OCS_NODE_H__)
+#define __OCS_NODE_H__
+
+
+#define node_sm_trace() \
+ do { \
+ if (OCS_LOG_ENABLE_SM_TRACE(node->ocs)) \
+ ocs_log_info(node->ocs, "[%s] %-20s\n", node->display_name, ocs_sm_event_name(evt)); \
+ } while (0)
+
+#define node_printf(node, fmt, ...) ocs_log_debug(node->ocs, "[%s] " fmt, node->display_name, ##__VA_ARGS__)
+
+#define std_node_state_decl(...) \
+ ocs_node_t *node = NULL; \
+ ocs_t *ocs = NULL; \
+ node = ctx->app; \
+ ocs_assert(node, NULL); \
+ ocs = node->ocs; \
+ ocs_assert(ocs, NULL); \
+ if (evt == OCS_EVT_ENTER) { \
+ ocs_strncpy(node->current_state_name, __func__, sizeof(node->current_state_name)); \
+ } else if (evt == OCS_EVT_EXIT) { \
+ ocs_strncpy(node->prev_state_name, node->current_state_name, sizeof(node->prev_state_name)); \
+ ocs_strncpy(node->current_state_name, "invalid", sizeof(node->current_state_name)); \
+ } \
+ node->prev_evt = node->current_evt; \
+ node->current_evt = evt;
+
+#define OCS_NODEDB_PAUSE_FABRIC_LOGIN (1U << 0)
+#define OCS_NODEDB_PAUSE_NAMESERVER (1U << 1)
+#define OCS_NODEDB_PAUSE_NEW_NODES (1U << 2)
+
+/**
+ * @brief Node SM IO Context Callback structure
+ *
+ * Structure used as callback argument
+ */
+
+struct ocs_node_cb_s {
+ ocs_io_t *io; /**< SCSI IO for sending response */
+ int32_t status; /**< completion status */
+ int32_t ext_status; /**< extended completion status */
+ ocs_hw_rq_buffer_t *header; /**< completion header buffer */
+ ocs_hw_rq_buffer_t *payload; /**< completion payload buffers */
+ ocs_io_t *els; /**< ELS IO object */
+};
+
+/**
+ * @brief hold frames in pending frame list
+ *
+ * Unsolicited receive frames are held on the node pending frame list, rather than
+ * being processed.
+ *
+ * @param node pointer to node structure
+ *
+ * @return none
+ */
+
+static inline void
+ocs_node_hold_frames(ocs_node_t *node)
+{
+ ocs_assert(node);
+ node->hold_frames = TRUE;
+}
+
+/**
+ * @brief accept frames
+ *
+ * Unsolicited receive frames processed rather than being held on the node
+ * pending frame list.
+ *
+ * @param node pointer to node structure
+ *
+ * @return none
+ */
+
+static inline void
+ocs_node_accept_frames(ocs_node_t *node)
+{
+ ocs_assert(node);
+ node->hold_frames = FALSE;
+}
+
+extern int32_t ocs_node_create_pool(ocs_t *ocs, uint32_t node_count);
+extern void ocs_node_free_pool(ocs_t *ocs);
+extern ocs_node_t *ocs_node_get_instance(ocs_t *ocs, uint32_t index);
+
+static inline void
+ocs_node_lock_init(ocs_node_t *node)
+{
+ ocs_rlock_init(node->ocs, &node->lock, "node rlock");
+}
+
+static inline void
+ocs_node_lock_free(ocs_node_t *node)
+{
+ ocs_rlock_free(&node->lock);
+}
+
+static inline int32_t
+ocs_node_lock_try(ocs_node_t *node)
+{
+ return ocs_rlock_try(&node->lock);
+}
+
+static inline void
+ocs_node_lock(ocs_node_t *node)
+{
+ ocs_rlock_acquire(&node->lock);
+}
+static inline void
+ocs_node_unlock(ocs_node_t *node)
+{
+ ocs_rlock_release(&node->lock);
+}
+
+/**
+ * @brief Node initiator/target enable defines
+ *
+ * All combinations of the SLI port (sport) initiator/target enable, and remote
+ * node initiator/target enable are enumerated.
+ *
+ */
+
+typedef enum {
+ OCS_NODE_ENABLE_x_TO_x,
+ OCS_NODE_ENABLE_x_TO_T,
+ OCS_NODE_ENABLE_x_TO_I,
+ OCS_NODE_ENABLE_x_TO_IT,
+ OCS_NODE_ENABLE_T_TO_x,
+ OCS_NODE_ENABLE_T_TO_T,
+ OCS_NODE_ENABLE_T_TO_I,
+ OCS_NODE_ENABLE_T_TO_IT,
+ OCS_NODE_ENABLE_I_TO_x,
+ OCS_NODE_ENABLE_I_TO_T,
+ OCS_NODE_ENABLE_I_TO_I,
+ OCS_NODE_ENABLE_I_TO_IT,
+ OCS_NODE_ENABLE_IT_TO_x,
+ OCS_NODE_ENABLE_IT_TO_T,
+ OCS_NODE_ENABLE_IT_TO_I,
+ OCS_NODE_ENABLE_IT_TO_IT,
+} ocs_node_enable_e;
+
+static inline ocs_node_enable_e ocs_node_get_enable(ocs_node_t *node)
+{
+ uint32_t retval = 0;
+
+ if (node->sport->enable_ini) retval |= (1U << 3);
+ if (node->sport->enable_tgt) retval |= (1U << 2);
+ if (node->init) retval |= (1U << 1);
+ if (node->targ) retval |= (1U << 0);
+ return (ocs_node_enable_e) retval;
+}
+
+typedef void* (*ocs_node_common_func_t)(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern int32_t node_check_els_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint8_t cmd, ocs_node_common_func_t node_common_func, const char *funcname);
+extern int32_t node_check_ns_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint32_t cmd, ocs_node_common_func_t node_common_func, const char *funcname);
+extern int32_t ocs_remote_node_cb(void *arg, ocs_hw_remote_node_event_e event, void *data);
+extern int32_t ocs_node_attach(ocs_node_t *node);
+extern ocs_node_t *ocs_node_find(ocs_sport_t *sport, uint32_t port_id);
+extern ocs_node_t *ocs_node_find_wwpn(ocs_sport_t *sport, uint64_t wwpn);
+extern void ocs_node_dump(ocs_t *ocs);
+extern ocs_node_t *ocs_node_alloc(ocs_sport_t *sport, uint32_t port_id, uint8_t init, uint8_t targ);
+extern int32_t ocs_node_free(ocs_node_t *node);
+extern void ocs_node_force_free(ocs_node_t *node);
+extern void ocs_node_fcid_display(uint32_t fc_id, char *buffer, uint32_t buffer_length);
+extern void ocs_node_update_display_name(ocs_node_t *node);
+
+extern void *__ocs_node_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void * __ocs_node_wait_node_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_node_wait_els_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_node_wait_ios_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void ocs_node_save_sparms(ocs_node_t *node, void *payload);
+extern void ocs_node_post_event(ocs_node_t *node, ocs_sm_event_t evt, void *arg);
+extern void ocs_node_transition(ocs_node_t *node, ocs_sm_function_t state, void *data);
+extern void *__ocs_node_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+
+extern void ocs_node_initiate_cleanup(ocs_node_t *node);
+extern int ocs_ddump_node(ocs_textbuf_t *textbuf, ocs_node_t *node);
+
+extern void ocs_node_build_eui_name(char *buffer, uint32_t buffer_len, uint64_t eui_name);
+extern uint64_t ocs_node_get_wwpn(ocs_node_t *node);
+extern uint64_t ocs_node_get_wwnn(ocs_node_t *node);
+extern void ocs_node_abort_all_els(ocs_node_t *node);
+
+extern void ocs_node_pause(ocs_node_t *node, ocs_sm_function_t state);
+extern int32_t ocs_node_resume(ocs_node_t *node);
+extern void *__ocs_node_paused(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern int ocs_node_active_ios_empty(ocs_node_t *node);
+extern void ocs_node_send_ls_io_cleanup(ocs_node_t *node);
+
+extern int32_t ocs_node_recv_link_services_frame(ocs_node_t *node, ocs_hw_sequence_t *seq);
+extern int32_t ocs_node_recv_abts_frame(ocs_node_t *node, ocs_hw_sequence_t *seq);
+extern int32_t ocs_node_recv_els_frame(ocs_node_t *node, ocs_hw_sequence_t *seq);
+extern int32_t ocs_node_recv_ct_frame(ocs_node_t *node, ocs_hw_sequence_t *seq);
+extern int32_t ocs_node_recv_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq);
+extern int32_t ocs_node_recv_bls_no_sit(ocs_node_t *node, ocs_hw_sequence_t *seq);
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_os.c b/sys/dev/ocs_fc/ocs_os.c
new file mode 100644
index 000000000000..9e9045edd215
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_os.c
@@ -0,0 +1,1046 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Implementation of common BSD OS abstraction functions
+ */
+
+#include "ocs.h"
+#include <sys/sysctl.h>
+#include <sys/malloc.h>
+#include <sys/linker.h> /* for debug of memory allocations */
+
+static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data");
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <machine/bus.h>
+
+timeout_t __ocs_callout;
+
+uint32_t
+ocs_config_read32(ocs_os_handle_t os, uint32_t reg)
+{
+ return pci_read_config(os->dev, reg, 4);
+}
+
+uint16_t
+ocs_config_read16(ocs_os_handle_t os, uint32_t reg)
+{
+ return pci_read_config(os->dev, reg, 2);
+}
+
+uint8_t
+ocs_config_read8(ocs_os_handle_t os, uint32_t reg)
+{
+ return pci_read_config(os->dev, reg, 1);
+}
+
+void
+ocs_config_write8(ocs_os_handle_t os, uint32_t reg, uint8_t val)
+{
+ return pci_write_config(os->dev, reg, val, 1);
+}
+
+void
+ocs_config_write16(ocs_os_handle_t os, uint32_t reg, uint16_t val)
+{
+ return pci_write_config(os->dev, reg, val, 2);
+}
+
+void
+ocs_config_write32(ocs_os_handle_t os, uint32_t reg, uint32_t val)
+{
+ return pci_write_config(os->dev, reg, val, 4);
+}
+
+/**
+ * @ingroup os
+ * @brief Read a 32bit PCI register
+ *
+ * The SLI documentation uses the term "register set" to describe one or more
+ * PCI BARs which form a logical address. For example, a 64-bit address uses
+ * two BARs, and thus constitute a register set.
+ *
+ * @param ocs Pointer to the driver's context
+ * @param rset Register Set to use
+ * @param off Offset from the base address of the Register Set
+ *
+ * @return register value
+ */
+uint32_t
+ocs_reg_read32(ocs_t *ocs, uint32_t rset, uint32_t off)
+{
+ ocs_pci_reg_t *reg = NULL;
+
+ reg = &ocs->reg[rset];
+
+ return bus_space_read_4(reg->btag, reg->bhandle, off);
+}
+
+/**
+ * @ingroup os
+ * @brief Read a 16bit PCI register
+ *
+ * The SLI documentation uses the term "register set" to describe one or more
+ * PCI BARs which form a logical address. For example, a 64-bit address uses
+ * two BARs, and thus constitute a register set.
+ *
+ * @param ocs Pointer to the driver's context
+ * @param rset Register Set to use
+ * @param off Offset from the base address of the Register Set
+ *
+ * @return register value
+ */
+uint16_t
+ocs_reg_read16(ocs_t *ocs, uint32_t rset, uint32_t off)
+{
+ ocs_pci_reg_t *reg = NULL;
+
+ reg = &ocs->reg[rset];
+
+ return bus_space_read_2(reg->btag, reg->bhandle, off);
+}
+
+/**
+ * @ingroup os
+ * @brief Read a 8bit PCI register
+ *
+ * The SLI documentation uses the term "register set" to describe one or more
+ * PCI BARs which form a logical address. For example, a 64-bit address uses
+ * two BARs, and thus constitute a register set.
+ *
+ * @param ocs Pointer to the driver's context
+ * @param rset Register Set to use
+ * @param off Offset from the base address of the Register Set
+ *
+ * @return register value
+ */
+uint8_t
+ocs_reg_read8(ocs_t *ocs, uint32_t rset, uint32_t off)
+{
+ ocs_pci_reg_t *reg = NULL;
+
+ reg = &ocs->reg[rset];
+
+ return bus_space_read_1(reg->btag, reg->bhandle, off);
+}
+
+/**
+ * @ingroup os
+ * @brief Write a 32bit PCI register
+ *
+ * The SLI documentation uses the term "register set" to describe one or more
+ * PCI BARs which form a logical address. For example, a 64-bit address uses
+ * two BARs, and thus constitute a register set.
+ *
+ * @param ocs Pointer to the driver's context
+ * @param rset Register Set to use
+ * @param off Offset from the base address of the Register Set
+ * @param val Value to write
+ *
+ * @return none
+ */
+void
+ocs_reg_write32(ocs_t *ocs, uint32_t rset, uint32_t off, uint32_t val)
+{
+ ocs_pci_reg_t *reg = NULL;
+
+ reg = &ocs->reg[rset];
+
+ return bus_space_write_4(reg->btag, reg->bhandle, off, val);
+}
+
+/**
+ * @ingroup os
+ * @brief Write a 16-bit PCI register
+ *
+ * The SLI documentation uses the term "register set" to describe one or more
+ * PCI BARs which form a logical address. For example, a 64-bit address uses
+ * two BARs, and thus constitute a register set.
+ *
+ * @param ocs Pointer to the driver's context
+ * @param rset Register Set to use
+ * @param off Offset from the base address of the Register Set
+ * @param val Value to write
+ *
+ * @return none
+ */
+void
+ocs_reg_write16(ocs_t *ocs, uint32_t rset, uint32_t off, uint16_t val)
+{
+ ocs_pci_reg_t *reg = NULL;
+
+ reg = &ocs->reg[rset];
+
+ return bus_space_write_2(reg->btag, reg->bhandle, off, val);
+}
+
+/**
+ * @ingroup os
+ * @brief Write a 8-bit PCI register
+ *
+ * The SLI documentation uses the term "register set" to describe one or more
+ * PCI BARs which form a logical address. For example, a 64-bit address uses
+ * two BARs, and thus constitute a register set.
+ *
+ * @param ocs Pointer to the driver's context
+ * @param rset Register Set to use
+ * @param off Offset from the base address of the Register Set
+ * @param val Value to write
+ *
+ * @return none
+ */
+void
+ocs_reg_write8(ocs_t *ocs, uint32_t rset, uint32_t off, uint8_t val)
+{
+ ocs_pci_reg_t *reg = NULL;
+
+ reg = &ocs->reg[rset];
+
+ return bus_space_write_1(reg->btag, reg->bhandle, off, val);
+}
+
+/**
+ * @ingroup os
+ * @brief Allocate host memory
+ *
+ * @param os OS handle
+ * @param size number of bytes to allocate
+ * @param flags additional options
+ *
+ * @return pointer to allocated memory, NULL otherwise
+ */
+void *
+ocs_malloc(ocs_os_handle_t os, size_t size, int32_t flags)
+{
+ if ((flags & OCS_M_NOWAIT) == 0) {
+ flags |= M_WAITOK;
+ }
+
+#ifndef OCS_DEBUG_MEMORY
+ return malloc(size, M_OCS, flags);
+#else
+ char nameb[80];
+ long offset = 0;
+ void *addr = malloc(size, M_OCS, flags);
+
+ linker_ddb_search_symbol_name(__builtin_return_address(1), nameb, sizeof(nameb), &offset);
+ printf("A: %p %ld @ %s+%#lx\n", addr, size, nameb, offset);
+
+ return addr;
+#endif
+}
+
+/**
+ * @ingroup os
+ * @brief Free host memory
+ *
+ * @param os OS handle
+ * @param addr pointer to memory
+ * @param size bytes to free
+ *
+ * @note size ignored in BSD
+ */
+void
+ocs_free(ocs_os_handle_t os, void *addr, size_t size)
+{
+#ifndef OCS_DEBUG_MEMORY
+ free(addr, M_OCS);
+#else
+ printf("F: %p %ld\n", addr, size);
+ free(addr, M_OCS);
+#endif
+}
+
+/**
+ * @brief Callback function provided to bus_dmamap_load
+ *
+ * Function loads the physical / bus address into the DMA descriptor. The caller
+ * can detect a mapping failure if a descriptor's phys element is zero.
+ *
+ * @param arg Argument provided to bus_dmamap_load is a ocs_dma_t
+ * @param seg Array of DMA segment(s), each describing segment's address and length
+ * @param nseg Number of elements in array
+ * @param error Indicates success (0) or failure of mapping
+ */
+static void
+ocs_dma_load(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ ocs_dma_t *dma = arg;
+
+ if (error) {
+ printf("%s: error=%d\n", __func__, error);
+ dma->phys = 0;
+ } else {
+ dma->phys = seg->ds_addr;
+ }
+}
+
+/**
+ * @ingroup os
+ * @brief Free a DMA capable block of memory
+ *
+ * @param os Device abstraction
+ * @param dma DMA descriptor for memory to be freed
+ *
+ * @return 0 if memory is de-allocated, -1 otherwise
+ */
+int32_t
+ocs_dma_free(ocs_os_handle_t os, ocs_dma_t *dma)
+{
+ struct ocs_softc *ocs = os;
+
+ if (!dma) {
+ device_printf(ocs->dev, "%s: bad parameter(s) dma=%p\n", __func__, dma);
+ return -1;
+ }
+
+ if (dma->size == 0) {
+ return 0;
+ }
+
+ if (dma->map) {
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD |
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ }
+
+ if (dma->virt) {
+ bus_dmamem_free(dma->tag, dma->virt, dma->map);
+ bus_dmamap_destroy(dma->tag, dma->map);
+ }
+ bus_dma_tag_destroy(dma->tag);
+
+ bzero(dma, sizeof(ocs_dma_t));
+
+ return 0;
+}
+
+/**
+ * @ingroup os
+ * @brief Allocate a DMA capable block of memory
+ *
+ * @param os Device abstraction
+ * @param dma DMA descriptor containing results of memory allocation
+ * @param size Size in bytes of desired allocation
+ * @param align Alignment in bytes
+ *
+ * @return 0 on success, ENOMEM otherwise
+ */
+int32_t
+ocs_dma_alloc(ocs_os_handle_t os, ocs_dma_t *dma, size_t size, size_t align)
+{
+ struct ocs_softc *ocs = os;
+
+ if (!dma || !size) {
+ device_printf(ocs->dev, "%s bad parameter(s) dma=%p size=%zd\n",
+ __func__, dma, size);
+ return ENOMEM;
+ }
+
+ bzero(dma, sizeof(ocs_dma_t));
+
+ /* create a "tag" that describes the desired memory allocation */
+ if (bus_dma_tag_create(ocs->dmat, align, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL,
+ size, 1, size, 0, NULL, NULL, &dma->tag)) {
+ device_printf(ocs->dev, "DMA tag allocation failed\n");
+ return ENOMEM;
+ }
+
+ dma->size = size;
+
+ /* allocate the memory */
+ if (bus_dmamem_alloc(dma->tag, &dma->virt, BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
+ &dma->map)) {
+ device_printf(ocs->dev, "DMA memory allocation failed s=%zd a=%zd\n", size, align);
+ ocs_dma_free(ocs, dma);
+ return ENOMEM;
+ }
+
+ dma->alloc = dma->virt;
+
+ /* map virtual address to device visible address */
+ if (bus_dmamap_load(dma->tag, dma->map, dma->virt, dma->size, ocs_dma_load,
+ dma, 0)) {
+ device_printf(ocs->dev, "DMA memory load failed\n");
+ ocs_dma_free(ocs, dma);
+ return ENOMEM;
+ }
+
+ /* if the DMA map load callback fails, it sets the physical address to zero */
+ if (0 == dma->phys) {
+ device_printf(ocs->dev, "ocs_dma_load failed\n");
+ ocs_dma_free(ocs, dma);
+ return ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup os
+ * @brief Synchronize the DMA buffer memory
+ *
+ * Ensures memory coherency between the CPU and device
+ *
+ * @param dma DMA descriptor of memory to synchronize
+ * @param flags Describes direction of synchronization
+ * See BUS_DMA(9) for details
+ * - BUS_DMASYNC_PREWRITE
+ * - BUS_DMASYNC_POSTREAD
+ */
+void
+ocs_dma_sync(ocs_dma_t *dma, uint32_t flags)
+{
+ bus_dmamap_sync(dma->tag, dma->map, flags);
+}
+
+int32_t
+ocs_dma_copy_in(ocs_dma_t *dma, void *buffer, uint32_t buffer_length)
+{
+ if (!dma)
+ return -1;
+ if (!buffer)
+ return -1;
+ if (buffer_length == 0)
+ return 0;
+ if (buffer_length > dma->size)
+ buffer_length = dma->size;
+ ocs_memcpy(dma->virt, buffer, buffer_length);
+ dma->len = buffer_length;
+ return buffer_length;
+}
+
+int32_t
+ocs_dma_copy_out(ocs_dma_t *dma, void *buffer, uint32_t buffer_length)
+{
+ if (!dma)
+ return -1;
+ if (!buffer)
+ return -1;
+ if (buffer_length == 0)
+ return 0;
+ if (buffer_length > dma->len)
+ buffer_length = dma->len;
+ ocs_memcpy(buffer, dma->virt, buffer_length);
+ return buffer_length;
+}
+
+/**
+ * @ingroup os
+ * @brief Initialize a lock
+ *
+ * @param lock lock to initialize
+ * @param name string identifier for the lock
+ */
+void
+ocs_lock_init(void *os, ocs_lock_t *lock, const char *name, ...)
+{
+ va_list ap;
+
+ va_start(ap, name);
+ ocs_vsnprintf(lock->name, MAX_LOCK_DESC_LEN, name, ap);
+ va_end(ap);
+
+ mtx_init(&lock->lock, lock->name, NULL, MTX_DEF);
+}
+
+/**
+ * @brief Allocate a bit map
+ *
+ * For BSD, this is a simple character string
+ *
+ * @param n_bits number of bits in bit map
+ *
+ * @return pointer to the bit map, NULL on error
+ */
+ocs_bitmap_t *
+ocs_bitmap_alloc(uint32_t n_bits)
+{
+
+ return malloc(bitstr_size(n_bits), M_OCS, M_ZERO | M_NOWAIT);
+}
+
+/**
+ * @brief Free a bit map
+ *
+ * @param bitmap pointer to previously allocated bit map
+ */
+void
+ocs_bitmap_free(ocs_bitmap_t *bitmap)
+{
+
+ free(bitmap, M_OCS);
+}
+
+/**
+ * @brief find next unset bit and set it
+ *
+ * @param bitmap bit map to search
+ * @param n_bits number of bits in map
+ *
+ * @return bit position or -1 if map is full
+ */
+int32_t
+ocs_bitmap_find(ocs_bitmap_t *bitmap, uint32_t n_bits)
+{
+ int32_t position = -1;
+
+ bit_ffc(bitmap, n_bits, &position);
+
+ if (-1 != position) {
+ bit_set(bitmap, position);
+ }
+
+ return position;
+}
+
+/**
+ * @brief search for next (un)set bit
+ *
+ * @param bitmap bit map to search
+ * @param set search for a set or unset bit
+ * @param n_bits number of bits in map
+ *
+ * @return bit position or -1
+ */
+int32_t
+ocs_bitmap_search(ocs_bitmap_t *bitmap, uint8_t set, uint32_t n_bits)
+{
+ int32_t position;
+
+ if (!bitmap) {
+ return -1;
+ }
+
+ if (set) {
+ bit_ffs(bitmap, n_bits, &position);
+ } else {
+ bit_ffc(bitmap, n_bits, &position);
+ }
+
+ return position;
+}
+
+/**
+ * @brief clear the specified bit
+ *
+ * @param bitmap pointer to bit map
+ * @param bit bit number to clear
+ */
+void
+ocs_bitmap_clear(ocs_bitmap_t *bitmap, uint32_t bit)
+{
+ bit_clear(bitmap, bit);
+}
+
+void _ocs_log(ocs_t *ocs, const char *func_name, int line, const char *fmt, ...)
+{
+ va_list ap;
+ char buf[256];
+ char *p = buf;
+
+ va_start(ap, fmt);
+
+ /* TODO: Add Current PID info here. */
+
+ p += snprintf(p, sizeof(buf) - (p - buf), "%s: ", DRV_NAME);
+ p += snprintf(p, sizeof(buf) - (p - buf), "%s:", func_name);
+ p += snprintf(p, sizeof(buf) - (p - buf), "%i:", line);
+ p += snprintf(p, sizeof(buf) - (p - buf), "%s:", (ocs != NULL) ? device_get_nameunit(ocs->dev) : "");
+ p += vsnprintf(p, sizeof(buf) - (p - buf), fmt, ap);
+
+ va_end(ap);
+
+ printf("%s", buf);
+}
+
+/**
+ * @brief Common thread call function
+ *
+ * This is the common function called whenever a thread instantiated by ocs_thread_create() is started.
+ * It captures the return value from the actual thread function and stashes it in the thread object, to
+ * be later retrieved by ocs_thread_get_retval(), and calls kthread_exit(), the proscribed method to terminate
+ * a thread.
+ *
+ * @param arg a pointer to the thread object
+ *
+ * @return none
+ */
+
+static void
+ocs_thread_call_fctn(void *arg)
+{
+ ocs_thread_t *thread = arg;
+ thread->retval = (*thread->fctn)(thread->arg);
+ ocs_free(NULL, thread->name, ocs_strlen(thread->name+1));
+ kthread_exit();
+}
+
+/**
+ * @brief Create a kernel thread
+ *
+ * Creates a kernel thread and optionally starts it. If the thread is not immediately
+ * started, ocs_thread_start() should be called at some later point.
+ *
+ * @param os OS handle
+ * @param thread pointer to thread object
+ * @param fctn function for thread to be begin executing
+ * @param name text name to identify thread
+ * @param arg application specific argument passed to thread function
+ * @param start start option, OCS_THREAD_RUN will start the thread immediately,
+ * OCS_THREAD_CREATE will create but not start the thread
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_thread_create(ocs_os_handle_t os, ocs_thread_t *thread, ocs_thread_fctn fctn, const char *name, void *arg, ocs_thread_start_e start)
+{
+ int32_t rc = 0;
+
+ ocs_memset(thread, 0, sizeof(thread));
+
+ thread->fctn = fctn;
+ thread->name = ocs_strdup(name);
+ if (thread->name == NULL) {
+ thread->name = "unknown";
+ }
+ thread->arg = arg;
+
+ ocs_atomic_set(&thread->terminate, 0);
+
+ rc = kthread_add(ocs_thread_call_fctn, thread, NULL, &thread->tcb, (start == OCS_THREAD_CREATE) ? RFSTOPPED : 0,
+ OCS_THREAD_DEFAULT_STACK_SIZE_PAGES, "%s", name);
+
+ return rc;
+}
+
+/**
+ * @brief Start a thread
+ *
+ * Starts a thread that was created with OCS_THREAD_CREATE rather than OCS_THREAD_RUN
+ *
+ * @param thread pointer to thread object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t ocs_thread_start(ocs_thread_t *thread)
+{
+ sched_add(thread->tcb, SRQ_BORING);
+ return 0;
+}
+
+/**
+ * @brief return thread argument
+ *
+ * Returns a pointer to the thread's application specific argument
+ *
+ * @param mythread pointer to the thread object
+ *
+ * @return pointer to application specific argument
+ */
+
+void *ocs_thread_get_arg(ocs_thread_t *mythread)
+{
+ return mythread->arg;
+}
+
+/**
+ * @brief Request thread stop
+ *
+ * A stop request is made to the thread. This is a voluntary call, the thread needs
+ * to periodically query its terminate request using ocs_thread_terminate_requested()
+ *
+ * @param thread pointer to thread object
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_thread_terminate(ocs_thread_t *thread)
+{
+ ocs_atomic_set(&thread->terminate, 1);
+ return 0;
+}
+
+/**
+ * @brief See if a terminate request has been made
+ *
+ * Check to see if a stop request has been made to the current thread. This
+ * function would be used by a thread to see if it should terminate.
+ *
+ * @return returns non-zero if a stop has been requested
+ */
+
+int32_t ocs_thread_terminate_requested(ocs_thread_t *thread)
+{
+ return ocs_atomic_read(&thread->terminate);
+}
+
+/**
+ * @brief Retrieve threads return value
+ *
+ * After a thread has terminated, it's return value may be retrieved with this function.
+ *
+ * @param thread pointer to thread object
+ *
+ * @return return value from thread function
+ */
+
+int32_t
+ocs_thread_get_retval(ocs_thread_t *thread)
+{
+ return thread->retval;
+}
+
+/**
+ * @brief Request that the currently running thread yield
+ *
+ * The currently running thread yields to the scheduler
+ *
+ * @param thread pointer to thread (ignored)
+ *
+ * @return none
+ */
+
+void
+ocs_thread_yield(ocs_thread_t *thread) {
+ pause("thread yield", 1);
+}
+
+ocs_thread_t *
+ocs_thread_self(void)
+{
+ ocs_printf(">>> %s not implemented\n", __func__);
+ ocs_abort();
+}
+
+int32_t
+ocs_thread_setcpu(ocs_thread_t *thread, uint32_t cpu)
+{
+ ocs_printf(">>> %s not implemented\n", __func__);
+ return -1;
+}
+
+int32_t
+ocs_thread_getcpu(void)
+{
+ return curcpu;
+}
+
+int
+ocs_sem_init(ocs_sem_t *sem, int val, const char *name, ...)
+{
+ va_list ap;
+
+ va_start(ap, name);
+ ocs_vsnprintf(sem->name, sizeof(sem->name), name, ap);
+ va_end(ap);
+
+ sema_init(&sem->sem, val, sem->name);
+ return 0;
+}
+
+/**
+ * @ingroup os
+ * @brief Copy user arguments in to kernel space for an ioctl
+ * @par Description
+ * This function is called at the beginning of an ioctl function
+ * to copy the ioctl argument from user space to kernel space.
+ *
+ * BSD handles this for us - arg is already in kernel space,
+ * so we just return it.
+ *
+ * @param os OS handle
+ * @param arg The argument passed to the ioctl function
+ * @param size The size of the structure pointed to by arg
+ *
+ * @return A pointer to a kernel space copy of the argument on
+ * success; NULL on failure
+ */
+void *ocs_ioctl_preprocess(ocs_os_handle_t os, void *arg, size_t size)
+{
+ return arg;
+}
+
+/**
+ * @ingroup os
+ * @brief Copy results of an ioctl back to user space
+ * @par Description
+ * This function is called at the end of ioctl processing to
+ * copy the argument back to user space.
+ *
+ * BSD handles this for us.
+ *
+ * @param os OS handle
+ * @param arg The argument passed to the ioctl function
+ * @param kern_ptr A pointer to the kernel space copy of the
+ * argument
+ * @param size The size of the structure pointed to by arg.
+ *
+ * @return Returns 0.
+ */
+int32_t ocs_ioctl_postprocess(ocs_os_handle_t os, void *arg, void *kern_ptr, size_t size)
+{
+ return 0;
+}
+
+/**
+ * @ingroup os
+ * @brief Free memory allocated by ocs_ioctl_preprocess
+ * @par Description
+ * This function is called in the event of an error in ioctl
+ * processing. For operating environments where ocs_ioctlpreprocess
+ * allocates memory, this call frees the memory without copying
+ * results back to user space.
+ *
+ * For BSD, because no memory was allocated in ocs_ioctl_preprocess,
+ * nothing needs to be done here.
+ *
+ * @param os OS handle
+ * @param kern_ptr A pointer to the kernel space copy of the
+ * argument
+ * @param size The size of the structure pointed to by arg.
+ *
+ * @return Returns nothing.
+ */
+void ocs_ioctl_free(ocs_os_handle_t os, void *kern_ptr, size_t size)
+{
+ return;
+}
+
+void ocs_intr_disable(ocs_os_handle_t os)
+{
+}
+
+void ocs_intr_enable(ocs_os_handle_t os)
+{
+}
+
+void ocs_print_stack(void)
+{
+ struct stack st;
+
+ stack_zero(&st);
+ stack_save(&st);
+ stack_print(&st);
+}
+
+void ocs_abort(void)
+{
+ panic(">>> abort/panic\n");
+}
+
+const char *
+ocs_pci_model(uint16_t vendor, uint16_t device)
+{
+ switch (device) {
+ case PCI_PRODUCT_EMULEX_OCE16002: return "OCE16002";
+ case PCI_PRODUCT_EMULEX_OCE1600_VF: return "OCE1600_VF";
+ case PCI_PRODUCT_EMULEX_OCE50102: return "OCE50102";
+ case PCI_PRODUCT_EMULEX_OCE50102_VF: return "OCE50102_VR";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+int32_t
+ocs_get_bus_dev_func(ocs_t *ocs, uint8_t* bus, uint8_t* dev, uint8_t* func)
+{
+ *bus = pci_get_bus(ocs->dev);
+ *dev = pci_get_slot(ocs->dev);
+ *func= pci_get_function(ocs->dev);
+ return 0;
+}
+
+/**
+ * @brief return CPU information
+ *
+ * This function populates the ocs_cpuinfo_t buffer with CPU information
+ *
+ * @param cpuinfo pointer to ocs_cpuinfo_t buffer
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+extern int mp_ncpus;
+int32_t
+ocs_get_cpuinfo(ocs_cpuinfo_t *cpuinfo)
+{
+ cpuinfo->num_cpus = mp_ncpus;
+ return 0;
+}
+
+uint32_t
+ocs_get_num_cpus(void)
+{
+ static ocs_cpuinfo_t cpuinfo;
+
+ if (cpuinfo.num_cpus == 0) {
+ ocs_get_cpuinfo(&cpuinfo);
+ }
+ return cpuinfo.num_cpus;
+}
+
+
+void
+__ocs_callout(void *t)
+{
+ ocs_timer_t *timer = t;
+
+ if (callout_pending(&timer->callout)) {
+ /* Callout was reset */
+ return;
+ }
+
+ if (!callout_active(&timer->callout)) {
+ /* Callout was stopped */
+ return;
+ }
+
+ callout_deactivate(&timer->callout);
+
+ if (timer->func) {
+ timer->func(timer->data);
+ }
+}
+
+int32_t
+ocs_setup_timer(ocs_os_handle_t os, ocs_timer_t *timer, void(*func)(void *arg), void *data, uint32_t timeout_ms)
+{
+ struct timeval tv;
+ int hz;
+
+ if (timer == NULL) {
+ ocs_log_err(NULL, "bad parameter\n");
+ return -1;
+ }
+
+ if (!mtx_initialized(&timer->lock)) {
+ mtx_init(&timer->lock, "ocs_timer", NULL, MTX_DEF);
+ }
+
+ callout_init_mtx(&timer->callout, &timer->lock, 0);
+
+ timer->func = func;
+ timer->data = data;
+
+ tv.tv_sec = timeout_ms / 1000;
+ tv.tv_usec = (timeout_ms % 1000) * 1000;
+
+ hz = tvtohz(&tv);
+ if (hz < 0)
+ hz = INT32_MAX;
+ if (hz == 0)
+ hz = 1;
+
+ mtx_lock(&timer->lock);
+ callout_reset(&timer->callout, hz, __ocs_callout, timer);
+ mtx_unlock(&timer->lock);
+
+ return 0;
+}
+
+int32_t
+ocs_mod_timer(ocs_timer_t *timer, uint32_t timeout_ms)
+{
+ struct timeval tv;
+ int hz;
+
+ if (timer == NULL) {
+ ocs_log_err(NULL, "bad parameter\n");
+ return -1;
+ }
+
+ tv.tv_sec = timeout_ms / 1000;
+ tv.tv_usec = (timeout_ms % 1000) * 1000;
+
+ hz = tvtohz(&tv);
+ if (hz < 0)
+ hz = INT32_MAX;
+ if (hz == 0)
+ hz = 1;
+
+ mtx_lock(&timer->lock);
+ callout_reset(&timer->callout, hz, __ocs_callout, timer);
+ mtx_unlock(&timer->lock);
+
+ return 0;
+}
+
+int32_t
+ocs_timer_pending(ocs_timer_t *timer)
+{
+ return callout_active(&timer->callout);
+}
+
+int32_t
+ocs_del_timer(ocs_timer_t *timer)
+{
+
+ mtx_lock(&timer->lock);
+ callout_stop(&timer->callout);
+ mtx_unlock(&timer->lock);
+
+ return 0;
+}
+
+char *
+ocs_strdup(const char *s)
+{
+ uint32_t l = strlen(s);
+ char *d;
+
+ d = ocs_malloc(NULL, l+1, OCS_M_NOWAIT);
+ if (d != NULL) {
+ ocs_strcpy(d, s);
+ }
+ return d;
+}
+
+void
+_ocs_assert(const char *cond, const char *filename, int linenum)
+{
+ const char *fn = strrchr(__FILE__, '/');
+
+ ocs_log_err(NULL, "%s(%d) assertion (%s) failed\n", (fn ? fn + 1 : filename), linenum, cond);
+ ocs_print_stack();
+ ocs_save_ddump_all(OCS_DDUMP_FLAGS_WQES|OCS_DDUMP_FLAGS_CQES|OCS_DDUMP_FLAGS_MQES, -1, TRUE);
+}
diff --git a/sys/dev/ocs_fc/ocs_os.h b/sys/dev/ocs_fc/ocs_os.h
new file mode 100644
index 000000000000..9c18f46ac1a5
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_os.h
@@ -0,0 +1,1406 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * bsd specific headers common to the driver
+ */
+
+#ifndef _OCS_OS_H
+#define _OCS_OS_H
+
+typedef struct ocs_softc ocs_t;
+
+/***************************************************************************
+ * OS specific includes
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/endian.h>
+#include <sys/stddef.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/bitstring.h>
+#include <sys/stack.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/pci/pcivar.h>
+
+#include <sys/sema.h>
+#include <sys/time.h>
+
+#include <sys/proc.h>
+#include <sys/kthread.h>
+#include <sys/unistd.h>
+#include <sys/sched.h>
+
+#include <sys/conf.h>
+#include <sys/sysctl.h>
+#include <sys/ioccom.h>
+#include <sys/ctype.h>
+
+/* OCS_OS_MAX_ISR_TIME_MSEC - maximum time driver code should spend in an interrupt
+ * or kernel thread context without yielding
+ */
+#define OCS_OS_MAX_ISR_TIME_MSEC 1000
+
+/* BSD driver specific definitions */
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define OCS_MAX_LUN 512
+#define OCS_NUM_UNSOLICITED_FRAMES 1024
+
+#define OCS_MAX_DOMAINS 1
+#define OCS_MAX_REMOTE_NODES 2048
+#define OCS_MAX_TARGETS 1024
+#define OCS_MAX_INITIATORS 1024
+/** Reserve this number of IO for each intiator to return FULL/BUSY status */
+#define OCS_RSVD_INI_IO 8
+
+#define OCS_MIN_DMA_ALIGNMENT 16
+#define OCS_MAX_DMA_ALLOC (64*1024) /* maxium DMA allocation that is expected to reliably succeed */
+
+/*
+ * Macros used to size the CQ hash table. We want to round up to the next
+ * power of 2 for the hash.
+ */
+#define B2(x) ( (x) | ( (x) >> 1) )
+#define B4(x) ( B2(x) | ( B2(x) >> 2) )
+#define B8(x) ( B4(x) | ( B4(x) >> 4) )
+#define B16(x) ( B8(x) | ( B8(x) >> 8) )
+#define B32(x) (B16(x) | (B16(x) >>16) )
+#define B32_NEXT_POWER_OF_2(x) (B32((x)-1) + 1)
+
+/*
+ * likely/unlikely - branch prediction hint
+ */
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+/***************************************************************************
+ * OS abstraction
+ */
+
+/**
+ * @brief Min/Max macros
+ *
+ */
+#define OCS_MAX(x, y) ((x) > (y) ? (x) : (y))
+#define OCS_MIN(x, y) ((x) < (y) ? (x) : (y))
+
+#define PRIX64 "lX"
+#define PRIx64 "lx"
+#define PRId64 "ld"
+#define PRIu64 "lu"
+
+/**
+ * Enable optional features
+ * - OCS_INCLUDE_DEBUG include low-level SLI debug support
+ */
+#define OCS_INCLUDE_DEBUG
+
+/**
+ * @brief Set the Nth bit
+ *
+ * @todo move to a private file used internally?
+ */
+#ifndef BIT
+#define BIT(n) (1U << (n))
+#endif
+
+/***************************************************************************
+ * Platform specific operations
+ */
+
+/**
+ * @ingroup os
+ * @typedef ocs_os_handle_t
+ * @brief OS specific handle or driver context
+ *
+ * This can be anything from a void * to some other OS specific type. The lower
+ * layers make no assumption about its value and pass it back as the first
+ * parameter to most OS functions.
+ */
+typedef ocs_t * ocs_os_handle_t;
+
+/**
+ * @ingroup os
+ * @brief return the lower 32-bits of a bus address
+ *
+ * @param addr Physical or bus address to convert
+ * @return lower 32-bits of a bus address
+ *
+ * @note this may be a good cadidate for an inline or macro
+ */
+static inline uint32_t ocs_addr32_lo(uintptr_t addr)
+{
+#if defined(__LP64__)
+ return (uint32_t)(addr & 0xffffffffUL);
+#else
+ return addr;
+#endif
+}
+
+/**
+ * @ingroup os
+ * @brief return the upper 32-bits of a bus address
+ *
+ * @param addr Physical or bus address to convert
+ * @return upper 32-bits of a bus address
+ *
+ * @note this may be a good cadidate for an inline or macro
+ */
+static inline uint32_t ocs_addr32_hi(uintptr_t addr)
+{
+#if defined(__LP64__)
+ return (uint32_t)(addr >> 32);
+#else
+ return 0;
+#endif
+}
+
+/**
+ * @ingroup os
+ * @brief return the log2(val)
+ *
+ * @param val number to use (assumed to be exact power of 2)
+ *
+ * @return log base 2 of val
+ */
+static inline uint32_t ocs_lg2(uint32_t val)
+{
+#if defined(__GNUC__)
+ /*
+ * clz = "count leading zero's"
+ *
+ * Assuming val is an exact power of 2, the most significant bit
+ * will be the log base 2 of val
+ */
+ return 31 - __builtin_clz(val);
+#else
+#error You need to provide a non-GCC version of this function
+#endif
+}
+
+/**
+ * @ingroup os
+ * @brief optimization barrier
+ *
+ * Optimization barrier. Prevents compiler re-ordering
+ * instructions across barrier.
+ *
+ * @return none
+ */
+#define ocs_barrier() __asm __volatile("" : : : "memory");
+
+/**
+ * @ingroup os
+ * @brief convert a big endian 32 bit value to the host's native format
+ *
+ * @param val 32 bit big endian value
+ *
+ * @return value converted to the host's native endianness
+ */
+#define ocs_be32toh(val) be32toh(val)
+
+/**
+ * @ingroup os
+ * @brief convert a 32 bit value from the host's native format to big endian
+ *
+ * @param val 32 bit native endian value
+ *
+ * @return value converted to big endian
+ */
+#define ocs_htobe32(val) htobe32(val)
+
+/**
+ * @ingroup os
+ * @brief convert a 16 bit value from the host's native format to big endian
+ *
+ * @param v 16 bit native endian value
+ *
+ * @return value converted to big endian
+ */
+#define ocs_htobe16(v) htobe16(v)
+#define ocs_be16toh(v) be16toh(v)
+
+
+#define ocs_htobe64(v) htobe64(v)
+#define ocs_be64toh(v) be64toh(v)
+
+/**
+ * @ingroup os
+ * @brief Delay execution by the given number of micro-seconds
+ *
+ * @param usec number of micro-seconds to "busy-wait"
+ *
+ * @note The value of usec may be greater than 1,000,000
+ */
+#define ocs_udelay(usec) DELAY(usec)
+
+/**
+ * @ingroup os
+ * @brief Delay execution by the given number of milli-seconds
+ *
+ * @param msec number of milli-seconds to "busy-wait"
+ *
+ * @note The value of usec may be greater than 1,000,000
+ */
+#define ocs_msleep(msec) ocs_udelay((msec)*1000)
+
+/**
+ * @ingroup os
+ * @brief Get time of day in msec
+ *
+ * @return time of day in msec
+ */
+static inline time_t
+ocs_msectime(void)
+{
+ struct timeval tv;
+
+ getmicrotime(&tv);
+ return (tv.tv_sec*1000) + (tv.tv_usec / 1000);
+}
+
+/**
+ * @ingroup os
+ * @brief Copy length number of bytes from the source to destination address
+ *
+ * @param d pointer to the destination memory
+ * @param s pointer to the source memory
+ * @param l number of bytes to copy
+ *
+ * @return original value of dst pointer
+ */
+#define ocs_memcpy(d, s, l) memcpy(d, s, l)
+
+#define ocs_strlen(s) strlen(s)
+#define ocs_strcpy(d,s) strcpy(d, s)
+#define ocs_strncpy(d,s, n) strncpy(d, s, n)
+#define ocs_strcat(d, s) strcat(d, s)
+#define ocs_strtoul(s,ep,b) strtoul(s,ep,b)
+#define ocs_strtoull(s,ep,b) ((uint64_t)strtouq(s,ep,b))
+#define ocs_atoi(s) strtol(s, 0, 0)
+#define ocs_strcmp(d,s) strcmp(d,s)
+#define ocs_strcasecmp(d,s) strcasecmp(d,s)
+#define ocs_strncmp(d,s,n) strncmp(d,s,n)
+#define ocs_strstr(h,n) strstr(h,n)
+#define ocs_strsep(h, n) strsep(h, n)
+#define ocs_strchr(s,c) strchr(s,c)
+#define ocs_copy_from_user(dst, src, n) copyin(src, dst, n)
+#define ocs_copy_to_user(dst, src, n) copyout(src, dst, n)
+#define ocs_snprintf(buf, n, fmt, ...) snprintf(buf, n, fmt, ##__VA_ARGS__)
+#define ocs_vsnprintf(buf, n, fmt, ap) vsnprintf((char*)buf, n, fmt, ap)
+#define ocs_sscanf(buf,fmt, ...) sscanf(buf, fmt, ##__VA_ARGS__)
+#define ocs_printf printf
+#define ocs_isspace(c) isspace(c)
+#define ocs_isdigit(c) isdigit(c)
+#define ocs_isxdigit(c) isxdigit(c)
+
+extern uint64_t ocs_get_tsc(void);
+extern void *ocs_ioctl_preprocess(ocs_os_handle_t os, void *arg, size_t size);
+extern int32_t ocs_ioctl_postprocess(ocs_os_handle_t os, void *arg, void *kern_ptr, size_t size);
+extern void ocs_ioctl_free(ocs_os_handle_t os, void *kern_ptr, size_t size);
+extern char *ocs_strdup(const char *s);
+
+/**
+ * @ingroup os
+ * @brief Set the value of each byte in memory
+ *
+ * @param b pointer to the memory
+ * @param c value used to set memory
+ * @param l number of bytes to set
+ *
+ * @return original value of mem pointer
+ */
+#define ocs_memset(b, c, l) memset(b, c, l)
+
+#define LOG_CRIT 0
+#define LOG_ERR 1
+#define LOG_WARN 2
+#define LOG_INFO 3
+#define LOG_TEST 4
+#define LOG_DEBUG 5
+
+extern int loglevel;
+
+extern void _ocs_log(ocs_t *ocs, const char *func, int line, const char *fmt, ...);
+
+#define ocs_log_crit(os, fmt, ...) ocs_log(os, LOG_CRIT, fmt, ##__VA_ARGS__);
+#define ocs_log_err(os, fmt, ...) ocs_log(os, LOG_ERR, fmt, ##__VA_ARGS__);
+#define ocs_log_warn(os, fmt, ...) ocs_log(os, LOG_WARN, fmt, ##__VA_ARGS__);
+#define ocs_log_info(os, fmt, ...) ocs_log(os, LOG_INFO, fmt, ##__VA_ARGS__);
+#define ocs_log_test(os, fmt, ...) ocs_log(os, LOG_TEST, fmt, ##__VA_ARGS__);
+#define ocs_log_debug(os, fmt, ...) ocs_log(os, LOG_DEBUG, fmt, ##__VA_ARGS__);
+
+#define ocs_log(os, level, fmt, ...) \
+ do { \
+ if (level <= loglevel) { \
+ _ocs_log(os, __func__, __LINE__, fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+static inline uint32_t ocs_roundup(uint32_t x, uint32_t y)
+{
+ return (((x + y - 1) / y) * y);
+}
+
+static inline uint32_t ocs_rounddown(uint32_t x, uint32_t y)
+{
+ return ((x / y) * y);
+}
+
+/***************************************************************************
+ * Memory allocation interfaces
+ */
+
+#define OCS_M_ZERO M_ZERO
+#define OCS_M_NOWAIT M_NOWAIT
+
+/**
+ * @ingroup os
+ * @brief Allocate host memory
+ *
+ * @param os OS handle
+ * @param size number of bytes to allocate
+ * @param flags additional options
+ *
+ * Flags include
+ * - OCS_M_ZERO zero memory after allocating
+ * - OCS_M_NOWAIT do not block/sleep waiting for an allocation request
+ *
+ * @return pointer to allocated memory, NULL otherwise
+ */
+extern void *ocs_malloc(ocs_os_handle_t os, size_t size, int32_t flags);
+
+/**
+ * @ingroup os
+ * @brief Free host memory
+ *
+ * @param os OS handle
+ * @param addr pointer to memory
+ * @param size bytes to free
+ */
+extern void ocs_free(ocs_os_handle_t os, void *addr, size_t size);
+
+/**
+ * @ingroup os
+ * @brief generic DMA memory descriptor for driver allocations
+ *
+ * Memory regions ultimately used by the hardware are described using
+ * this structure. All implementations must include the structure members
+ * defined in the first section, and they may also add their own structure
+ * members in the second section.
+ *
+ * Note that each region described by ocs_dma_s is assumed to be physically
+ * contiguous.
+ */
+typedef struct ocs_dma_s {
+ /*
+ * OCS layer requires the following members
+ */
+ void *virt; /**< virtual address of the memory used by the CPU */
+ void *alloc; /**< originally allocated virtual address used to restore virt if modified */
+ uintptr_t phys; /**< physical or bus address of the memory used by the hardware */
+ size_t size; /**< size in bytes of the memory */
+ /*
+ * Implementation specific fields allowed here
+ */
+ size_t len; /**< application specific length */
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+} ocs_dma_t;
+
+/**
+ * @ingroup os
+ * @brief Returns maximum supported DMA allocation size
+ *
+ * @param os OS specific handle or driver context
+ * @param align alignment requirement for DMA allocation
+ *
+ * Return maximum supported DMA allocation size, given alignment
+ * requirement.
+ *
+ * @return maxiumum supported DMA allocation size
+ */
+static inline uint32_t ocs_max_dma_alloc(ocs_os_handle_t os, size_t align)
+{
+ return ~((uint32_t)0); /* no max */
+}
+
+/**
+ * @ingroup os
+ * @brief Allocate a DMA capable block of memory
+ *
+ * @param os OS specific handle or driver context
+ * @param dma DMA descriptor containing results of memory allocation
+ * @param size Size in bytes of desired allocation
+ * @param align Alignment in bytes of the requested allocation
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+extern int32_t ocs_dma_alloc(ocs_os_handle_t, ocs_dma_t *, size_t, size_t);
+
+/**
+ * @ingroup os
+ * @brief Free a DMA capable block of memory
+ *
+ * @param os OS specific handle or driver context
+ * @param dma DMA descriptor for memory to be freed
+ *
+ * @return 0 if memory is de-allocated, non-zero otherwise
+ */
+extern int32_t ocs_dma_free(ocs_os_handle_t, ocs_dma_t *);
+extern int32_t ocs_dma_copy_in(ocs_dma_t *dma, void *buffer, uint32_t buffer_length);
+extern int32_t ocs_dma_copy_out(ocs_dma_t *dma, void *buffer, uint32_t buffer_length);
+
+static inline int32_t ocs_dma_valid(ocs_dma_t *dma)
+{
+ return (dma->size != 0);
+}
+
+/**
+ * @ingroup os
+ * @brief Synchronize the DMA buffer memory
+ *
+ * Ensures memory coherency between the CPU and device
+ *
+ * @param dma DMA descriptor of memory to synchronize
+ * @param flags Describes direction of synchronization
+ * - OCS_DMASYNC_PREREAD sync needed before hardware updates host memory
+ * - OCS_DMASYNC_PREWRITE sync needed after CPU updates host memory but before hardware can access
+ * - OCS_DMASYNC_POSTREAD sync needed after hardware updates host memory but before CPU can access
+ * - OCS_DMASYNC_POSTWRITE sync needed after hardware updates host memory
+ */
+extern void ocs_dma_sync(ocs_dma_t *, uint32_t);
+
+#define OCS_DMASYNC_PREWRITE BUS_DMASYNC_PREWRITE
+#define OCS_DMASYNC_POSTREAD BUS_DMASYNC_POSTREAD
+
+
+/***************************************************************************
+ * Locking
+ */
+
+/**
+ * @ingroup os
+ * @typedef ocs_lock_t
+ * @brief Define the type used implement locking
+ */
+#define MAX_LOCK_DESC_LEN 64
+typedef struct ocs_lock_s {
+ struct mtx lock;
+ char name[MAX_LOCK_DESC_LEN];
+} ocs_lock_t;
+
+/**
+ * @ingroup os
+ * @brief Initialize a lock
+ *
+ * @param lock lock to initialize
+ * @param name string identifier for the lock
+ */
+extern void ocs_lock_init(void *os, ocs_lock_t *lock, const char *name, ...);
+
+/**
+ * @ingroup os
+ * @brief Free a previously allocated lock
+ *
+ * @param lock lock to free
+ */
+static inline void
+ocs_lock_free(ocs_lock_t *lock)
+{
+
+ if (mtx_initialized(&(lock)->lock)) {
+ mtx_assert(&(lock)->lock, MA_NOTOWNED);
+ mtx_destroy(&(lock)->lock);
+ } else {
+ panic("XXX trying to free with un-initialized mtx!?!?\n");
+ }
+}
+
+/**
+ * @ingroup os
+ * @brief Acquire a lock
+ *
+ * @param lock lock to obtain
+ */
+static inline void
+ocs_lock(ocs_lock_t *lock)
+{
+
+ if (mtx_initialized(&(lock)->lock)) {
+ mtx_assert(&(lock)->lock, MA_NOTOWNED);
+ mtx_lock(&(lock)->lock);
+ } else {
+ panic("XXX trying to lock with un-initialized mtx!?!?\n");
+ }
+}
+
+/**
+ * @ingroup os
+ * @brief Release a lock
+ *
+ * @param lock lock to release
+ */
+static inline void
+ocs_unlock(ocs_lock_t *lock)
+{
+
+ if (mtx_initialized(&(lock)->lock)) {
+ mtx_assert(&(lock)->lock, MA_OWNED | MA_NOTRECURSED);
+ mtx_unlock(&(lock)->lock);
+ } else {
+ panic("XXX trying to unlock with un-initialized mtx!?!?\n");
+ }
+}
+
+/**
+ * @ingroup os
+ * @typedef ocs_lock_t
+ * @brief Define the type used implement recursive locking
+ */
+typedef struct ocs_lock_s ocs_rlock_t;
+
+/**
+ * @ingroup os
+ * @brief Initialize a recursive lock
+ *
+ * @param ocs pointer to ocs structure
+ * @param lock lock to initialize
+ * @param name string identifier for the lock
+ */
+static inline void
+ocs_rlock_init(ocs_t *ocs, ocs_rlock_t *lock, const char *name)
+{
+ ocs_strncpy(lock->name, name, MAX_LOCK_DESC_LEN);
+ mtx_init(&(lock)->lock, lock->name, NULL, MTX_DEF | MTX_RECURSE | MTX_DUPOK);
+}
+
+/**
+ * @ingroup os
+ * @brief Free a previously allocated recursive lock
+ *
+ * @param lock lock to free
+ */
+static inline void
+ocs_rlock_free(ocs_rlock_t *lock)
+{
+ if (mtx_initialized(&(lock)->lock)) {
+ mtx_destroy(&(lock)->lock);
+ } else {
+ panic("XXX trying to free with un-initialized mtx!?!?\n");
+ }
+}
+
+/**
+ * @brief try to acquire a recursive lock
+ *
+ * Attempt to acquire a recursive lock, return TRUE if successful
+ *
+ * @param lock pointer to recursive lock
+ *
+ * @return TRUE if lock was acquired, FALSE if not
+ */
+static inline int32_t
+ocs_rlock_try(ocs_rlock_t *lock)
+{
+ int rc = mtx_trylock(&(lock)->lock);
+
+ return rc != 0;
+}
+
+/**
+ * @ingroup os
+ * @brief Acquire a recursive lock
+ *
+ * @param lock lock to obtain
+ */
+static inline void
+ocs_rlock_acquire(ocs_rlock_t *lock)
+{
+ if (mtx_initialized(&(lock)->lock)) {
+ mtx_lock(&(lock)->lock);
+ } else {
+ panic("XXX trying to lock with un-initialized mtx!?!?\n");
+ }
+}
+
+/**
+ * @ingroup os
+ * @brief Release a recursive lock
+ *
+ * @param lock lock to release
+ */
+static inline void
+ocs_rlock_release(ocs_rlock_t *lock)
+{
+ if (mtx_initialized(&(lock)->lock)) {
+ mtx_assert(&(lock)->lock, MA_OWNED);
+ mtx_unlock(&(lock)->lock);
+ } else {
+ panic("XXX trying to unlock with un-initialized mtx!?!?\n");
+ }
+}
+
+/**
+ * @brief counting semaphore
+ *
+ * Declaration of the counting semaphore object
+ *
+ */
+typedef struct {
+ char name[32];
+ struct sema sem; /**< OS counting semaphore structure */
+} ocs_sem_t;
+
+#define OCS_SEM_FOREVER (-1)
+#define OCS_SEM_TRY (0)
+
+/**
+ * @brief Initialize a counting semaphore
+ *
+ * The semaphore is initiatlized to the value
+ *
+ * @param sem pointer to semaphore
+ * @param val initial value
+ * @param name label for the semaphore
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+extern int ocs_sem_init(ocs_sem_t *sem, int val, const char *name, ...) __attribute__((format(printf, 3, 4)));
+
+/**
+ * @brief execute a P (decrement) operation
+ *
+ * A P (decrement and block if negative) operation is performed on the semaphore.
+ *
+ * If timeout_usec is zero, the semaphore attempts one time and returns 0 if acquired.
+ * If timeout_usec is greater than zero, then the call will block until the semaphore
+ * is acquired, or a timeout occurred. If timeout_usec is less than zero, then
+ * the call will block until the semaphore is acquired.
+ *
+ * @param sem pointer to semaphore
+ * @param timeout_usec timeout in microseconds
+ *
+ * @return returns 0 for success, negative value if the semaphore was not acquired.
+ */
+
+static inline int
+ocs_sem_p(ocs_sem_t *sem, int timeout_usec)
+{
+ int32_t rc = 0;
+
+ if (timeout_usec == 0) {
+ rc = sema_trywait(&sem->sem);
+ if (rc == 0) {
+ rc = -1;
+ }
+ } else if (timeout_usec > 0) {
+ struct timeval tv;
+ uint32_t ticks;
+
+ tv.tv_sec = timeout_usec / 1000000;
+ tv.tv_usec = timeout_usec % 1000000;
+ ticks = tvtohz(&tv);
+ if (ticks == 0) {
+ ticks ++;
+ }
+ rc = sema_timedwait(&sem->sem, ticks);
+ if (rc != 0) {
+ rc = -1;
+ }
+ } else {
+ sema_wait(&sem->sem);
+ }
+ if (rc)
+ rc = -1;
+
+ return rc;
+}
+
+/**
+ * @brief perform a V (increment) operation on a counting semaphore
+ *
+ * The semaphore is incremented, unblocking one thread that is waiting on the
+ * sempahore
+ *
+ * @param sem pointer to the semaphore
+ *
+ * @return none
+ */
+
+static inline void
+ocs_sem_v(ocs_sem_t *sem)
+{
+ sema_post(&sem->sem);
+}
+
+/***************************************************************************
+ * Bitmap
+ */
+
+/**
+ * @ingroup os
+ * @typedef ocs_bitmap_t
+ * @brief Define the type used implement bit-maps
+ */
+typedef bitstr_t ocs_bitmap_t;
+
+/**
+ * @ingroup os
+ * @brief Allocate a bitmap
+ *
+ * @param n_bits Minimum number of entries in the bit-map
+ *
+ * @return pointer to the bit-map or NULL on error
+ */
+extern ocs_bitmap_t *ocs_bitmap_alloc(uint32_t n_bits);
+
+/**
+ * @ingroup os
+ * @brief Free a bit-map
+ *
+ * @param bitmap Bit-map to free
+ */
+extern void ocs_bitmap_free(ocs_bitmap_t *bitmap);
+
+/**
+ * @ingroup os
+ * @brief Find next unset bit and set it
+ *
+ * @param bitmap bit map to search
+ * @param n_bits number of bits in map
+ *
+ * @return bit position or -1 if map is full
+ */
+extern int32_t ocs_bitmap_find(ocs_bitmap_t *bitmap, uint32_t n_bits);
+
+/**
+ * @ingroup os
+ * @brief search for next (un)set bit
+ *
+ * @param bitmap bit map to search
+ * @param set search for a set or unset bit
+ * @param n_bits number of bits in map
+ *
+ * @return bit position or -1
+ */
+extern int32_t ocs_bitmap_search(ocs_bitmap_t *bitmap, uint8_t set, uint32_t n_bits);
+
+/**
+ * @ingroup os
+ * @brief clear the specified bit
+ *
+ * @param bitmap pointer to bit map
+ * @param bit bit number to clear
+ */
+extern void ocs_bitmap_clear(ocs_bitmap_t *bitmap, uint32_t bit);
+
+extern int32_t ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len);
+
+/***************************************************************************
+ * Timer Routines
+ *
+ * Functions for setting, querying and canceling timers.
+ */
+typedef struct {
+ struct callout callout;
+ struct mtx lock;
+
+ void (*func)(void *);
+ void *data;
+} ocs_timer_t;
+
+/**
+ * @ingroup os
+ * @brief Initialize and set a timer
+ *
+ * @param os OS handle
+ * @param timer pointer to the structure allocated for this timer
+ * @param func the function to call when the timer expires
+ * @param data Data to pass to the provided timer function when the timer
+ * expires.
+ * @param timeout_ms the timeout in milliseconds
+ */
+extern int32_t ocs_setup_timer(ocs_os_handle_t os, ocs_timer_t *timer, void(*func)(void *arg),
+ void *data, uint32_t timeout_ms);
+
+/**
+ * @ingroup os
+ * @brief Modify a timer's expiration
+ *
+ * @param timer pointer to the structure allocated for this timer
+ * @param timeout_ms the timeout in milliseconds
+ */
+extern int32_t ocs_mod_timer(ocs_timer_t *timer, uint32_t timeout_ms);
+
+/**
+ * @ingroup os
+ * @brief Queries to see if a timer is pending.
+ *
+ * @param timer pointer to the structure allocated for this timer
+ *
+ * @return non-zero if the timer is pending
+ */
+extern int32_t ocs_timer_pending(ocs_timer_t *timer);
+
+/**
+ * @ingroup os
+ * @brief Remove a pending timer
+ *
+ * @param timer pointer to the structure allocated for this timer
+ * expires.
+ */
+extern int32_t ocs_del_timer(ocs_timer_t *timer);
+
+/***************************************************************************
+ * Atomics
+ *
+ */
+
+typedef uint32_t ocs_atomic_t;
+
+/**
+ * @ingroup os
+ * @brief initialize an atomic
+ *
+ * @param a pointer to the atomic object
+ * @param v initial value
+ *
+ * @return none
+ */
+#define ocs_atomic_init(a, v) ocs_atomic_set(a, v)
+
+/**
+ * @ingroup os
+ * @brief adds an integer to an atomic value
+ *
+ * @param a pointer to the atomic object
+ * @param v value to increment
+ *
+ * @return the value of the atomic before incrementing.
+ */
+#define ocs_atomic_add_return(a, v) atomic_fetchadd_32(a, v)
+
+/**
+ * @ingroup os
+ * @brief subtracts an integer to an atomic value
+ *
+ * @param a pointer to the atomic object
+ * @param v value to increment
+ *
+ * @return the value of the atomic before subtracting.
+ */
+#define ocs_atomic_sub_return(a, v) atomic_fetchadd_32(a, (-(v)))
+
+/**
+ * @ingroup os
+ * @brief returns the current value of an atomic object
+ *
+ * @param a pointer to the atomic object
+ *
+ * @return the value of the atomic.
+ */
+#define ocs_atomic_read(a) atomic_load_acq_32(a)
+
+/**
+ * @ingroup os
+ * @brief sets the current value of an atomic object
+ *
+ * @param a pointer to the atomic object
+ */
+#define ocs_atomic_set(a, v) atomic_store_rel_32(a, v)
+
+/**
+ * @ingroup os
+ * @brief Sets atomic to 0, returns previous value
+ *
+ * @param a pointer to the atomic object
+ *
+ * @return the value of the atomic before the operation.
+ */
+#define ocs_atomic_read_and_clear atomic_readandclear_32(a)
+
+/**
+ * @brief OCS thread structure
+ *
+ */
+
+typedef struct ocs_thread_s ocs_thread_t;
+
+typedef int32_t (*ocs_thread_fctn)(ocs_thread_t *mythread);
+
+struct ocs_thread_s {
+ struct thread *tcb; /*<< thread control block */
+ ocs_thread_fctn fctn; /*<< thread function */
+ char *name; /*<< name of thread */
+ void *arg; /*<< pointer to thread argument */
+ ocs_atomic_t terminate; /*<< terminate request */
+ int32_t retval; /*<< return value */
+ uint32_t cpu_affinity; /*<< cpu affinity */
+};
+#define OCS_THREAD_DEFAULT_STACK_SIZE_PAGES 8
+
+/**
+ * @brief OCS thread start options
+ *
+ */
+
+typedef enum {
+ OCS_THREAD_RUN, /*<< run immediately */
+ OCS_THREAD_CREATE, /*<< create and wait for start request */
+} ocs_thread_start_e;
+
+
+extern int32_t ocs_thread_create(ocs_os_handle_t os, ocs_thread_t *thread, ocs_thread_fctn fctn,
+ const char *name, void *arg, ocs_thread_start_e start_option);
+extern int32_t ocs_thread_start(ocs_thread_t *thread);
+extern void *ocs_thread_get_arg(ocs_thread_t *mythread);
+extern int32_t ocs_thread_terminate(ocs_thread_t *thread);
+extern int32_t ocs_thread_terminate_requested(ocs_thread_t *thread);
+extern int32_t ocs_thread_get_retval(ocs_thread_t *thread);
+extern void ocs_thread_yield(ocs_thread_t *thread);
+extern ocs_thread_t *ocs_thread_self(void);
+extern int32_t ocs_thread_setcpu(ocs_thread_t *thread, uint32_t cpu);
+extern int32_t ocs_thread_getcpu(void);
+
+
+/***************************************************************************
+ * PCI
+ *
+ * Several functions below refer to a "register set". This is one or
+ * more PCI BARs that constitute a PCI address. For example, if a MMIO
+ * region is described using both BAR[0] and BAR[1], the combination of
+ * BARs defines register set 0.
+ */
+
+/**
+ * @brief tracks mapped PCI memory regions
+ */
+typedef struct ocs_pci_reg_s {
+ uint32_t rid;
+ struct resource *res;
+ bus_space_tag_t btag;
+ bus_space_handle_t bhandle;
+} ocs_pci_reg_t;
+
+#define PCI_MAX_BAR 6
+#define PCI_64BIT_BAR0 0
+
+#define PCI_VENDOR_EMULEX 0x10df /* Emulex */
+
+#define PCI_PRODUCT_EMULEX_OCE16001 0xe200 /* OneCore 16Gb FC (lancer) */
+#define PCI_PRODUCT_EMULEX_OCE16002 0xe200 /* OneCore 16Gb FC (lancer) */
+#define PCI_PRODUCT_EMULEX_LPE31004 0xe300 /* LightPulse 16Gb x 4 FC (lancer-g6) */
+#define PCI_PRODUCT_EMULEX_LPE32002 0xe300 /* LightPulse 32Gb x 2 FC (lancer-g6) */
+#define PCI_PRODUCT_EMULEX_OCE1600_VF 0xe208
+#define PCI_PRODUCT_EMULEX_OCE50102 0xe260 /* OneCore FCoE (lancer) */
+#define PCI_PRODUCT_EMULEX_OCE50102_VF 0xe268
+
+/**
+ * @ingroup os
+ * @brief Get the PCI bus, device, and function values
+ *
+ * @param ocs OS specific handle or driver context
+ * @param bus Pointer to location to store the bus number.
+ * @param dev Pointer to location to store the device number.
+ * @param func Pointer to location to store the function number.
+ *
+ * @return Returns 0.
+ */
+extern int32_t
+ocs_get_bus_dev_func(ocs_t *ocs, uint8_t* bus, uint8_t* dev, uint8_t* func);
+
+extern ocs_t *ocs_get_instance(uint32_t index);
+extern uint32_t ocs_instance(void *os);
+
+
+/**
+ * @ingroup os
+ * @brief Read a 32 bit value from the specified configuration register
+ *
+ * @param os OS specific handle or driver context
+ * @param reg register offset
+ *
+ * @return The 32 bit value
+ */
+extern uint32_t ocs_config_read32(ocs_os_handle_t os, uint32_t reg);
+
+/**
+ * @ingroup os
+ * @brief Read a 16 bit value from the specified configuration
+ * register
+ *
+ * @param os OS specific handle or driver context
+ * @param reg register offset
+ *
+ * @return The 16 bit value
+ */
+extern uint16_t ocs_config_read16(ocs_os_handle_t os, uint32_t reg);
+
+/**
+ * @ingroup os
+ * @brief Read a 8 bit value from the specified configuration
+ * register
+ *
+ * @param os OS specific handle or driver context
+ * @param reg register offset
+ *
+ * @return The 8 bit value
+ */
+extern uint8_t ocs_config_read8(ocs_os_handle_t os, uint32_t reg);
+
+/**
+ * @ingroup os
+ * @brief Write a 8 bit value to the specified configuration
+ * register
+ *
+ * @param os OS specific handle or driver context
+ * @param reg register offset
+ * @param val value to write
+ *
+ * @return None
+ */
+extern void ocs_config_write8(ocs_os_handle_t os, uint32_t reg, uint8_t val);
+
+/**
+ * @ingroup os
+ * @brief Write a 16 bit value to the specified configuration
+ * register
+ *
+ * @param os OS specific handle or driver context
+ * @param reg register offset
+ * @param val value to write
+ *
+ * @return None
+ */
+extern void ocs_config_write16(ocs_os_handle_t os, uint32_t reg, uint16_t val);
+
+/**
+ * @ingroup os
+ * @brief Write a 32 bit value to the specified configuration
+ * register
+ *
+ * @param os OS specific handle or driver context
+ * @param reg register offset
+ * @param val value to write
+ *
+ * @return None
+ */
+extern void ocs_config_write32(ocs_os_handle_t os, uint32_t reg, uint32_t val);
+
+/**
+ * @ingroup os
+ * @brief Read a PCI register
+ *
+ * @param os OS specific handle or driver context
+ * @param rset Which "register set" to use
+ * @param off Register offset
+ *
+ * @return 32 bit conents of the register
+ */
+extern uint32_t ocs_reg_read32(ocs_os_handle_t os, uint32_t rset, uint32_t off);
+
+/**
+ * @ingroup os
+ * @brief Read a PCI register
+ *
+ * @param os OS specific handle or driver context
+ * @param rset Which "register set" to use
+ * @param off Register offset
+ *
+ * @return 16 bit conents of the register
+ */
+extern uint16_t ocs_reg_read16(ocs_os_handle_t os, uint32_t rset, uint32_t off);
+
+/**
+ * @ingroup os
+ * @brief Read a PCI register
+ *
+ * @param os OS specific handle or driver context
+ * @param rset Which "register set" to use
+ * @param off Register offset
+ *
+ * @return 8 bit conents of the register
+ */
+extern uint8_t ocs_reg_read8(ocs_os_handle_t os, uint32_t rset, uint32_t off);
+
+/**
+ * @ingroup os
+ * @brief Write a PCI register
+ *
+ * @param os OS specific handle or driver context
+ * @param rset Which "register set" to use
+ * @param off Register offset
+ * @param val 32-bit value to write
+ */
+extern void ocs_reg_write32(ocs_os_handle_t os, uint32_t rset, uint32_t off, uint32_t val);
+
+/**
+ * @ingroup os
+ * @brief Write a PCI register
+ *
+ * @param os OS specific handle or driver context
+ * @param rset Which "register set" to use
+ * @param off Register offset
+ * @param val 16-bit value to write
+ */
+extern void ocs_reg_write16(ocs_os_handle_t os, uint32_t rset, uint32_t off, uint16_t val);
+
+/**
+ * @ingroup os
+ * @brief Write a PCI register
+ *
+ * @param os OS specific handle or driver context
+ * @param rset Which "register set" to use
+ * @param off Register offset
+ * @param val 8-bit value to write
+ */
+extern void ocs_reg_write8(ocs_os_handle_t os, uint32_t rset, uint32_t off, uint8_t val);
+
+/**
+ * @ingroup os
+ * @brief Disable interrupts
+ *
+ * @param os OS specific handle or driver context
+ */
+extern void ocs_intr_disable(ocs_os_handle_t os);
+
+/**
+ * @ingroup os
+ * @brief Enable interrupts
+ *
+ * @param os OS specific handle or driver context
+ */
+extern void ocs_intr_enable(ocs_os_handle_t os);
+
+/**
+ * @ingroup os
+ * @brief Return model string
+ *
+ * @param os OS specific handle or driver context
+ */
+extern const char *ocs_pci_model(uint16_t vendor, uint16_t device);
+
+extern void ocs_print_stack(void);
+
+extern void ocs_abort(void) __attribute__((noreturn));
+
+/***************************************************************************
+ * Reference counting
+ *
+ */
+
+/**
+ * @ingroup os
+ * @brief reference counter object
+ */
+typedef void (*ocs_ref_release_t)(void *arg);
+typedef struct ocs_ref_s {
+ ocs_ref_release_t release; /* release function to call */
+ void *arg;
+ uint32_t count; /* ref count; no need to be atomic if we have a lock */
+} ocs_ref_t;
+
+/**
+ * @ingroup os
+ * @brief initialize given reference object
+ *
+ * @param ref Pointer to reference object
+ * @param release Function to be called when count is 0.
+ * @param arg Argument to be passed to release function.
+ */
+static inline void
+ocs_ref_init(ocs_ref_t *ref, ocs_ref_release_t release, void *arg)
+{
+ ref->release = release;
+ ref->arg = arg;
+ ocs_atomic_init(&ref->count, 1);
+}
+
+/**
+ * @ingroup os
+ * @brief Return reference count value
+ *
+ * @param ref Pointer to reference object
+ *
+ * @return Count value of given reference object
+ */
+static inline uint32_t
+ocs_ref_read_count(ocs_ref_t *ref)
+{
+ return ocs_atomic_read(&ref->count);
+}
+
+/**
+ * @ingroup os
+ * @brief Set count on given reference object to a value.
+ *
+ * @param ref Pointer to reference object
+ * @param i Set count to this value
+ */
+static inline void
+ocs_ref_set(ocs_ref_t *ref, int i)
+{
+ ocs_atomic_set(&ref->count, i);
+}
+
+/**
+ * @ingroup os
+ * @brief Take a reference on given object.
+ *
+ * @par Description
+ * This function takes a reference on an object.
+ *
+ * Note: this function should only be used if the caller can
+ * guarantee that the reference count is >= 1 and will stay >= 1
+ * for the duration of this call (i.e. won't go to zero). If it
+ * can't (the refcount may go to zero during this call),
+ * ocs_ref_get_unless_zero() should be used instead.
+ *
+ * @param ref Pointer to reference object
+ *
+ */
+static inline void
+ocs_ref_get(ocs_ref_t *ref)
+{
+ ocs_atomic_add_return(&ref->count, 1);
+}
+
+/**
+ * @ingroup os
+ * @brief Take a reference on given object if count is not zero.
+ *
+ * @par Description
+ * This function takes a reference on an object if and only if
+ * the given reference object is "active" or valid.
+ *
+ * @param ref Pointer to reference object
+ *
+ * @return non-zero if "get" succeeded; Return zero if ref count
+ * is zero.
+ */
+static inline uint32_t
+ocs_ref_get_unless_zero(ocs_ref_t *ref)
+{
+ uint32_t rc = 0;
+ rc = ocs_atomic_read(&ref->count);
+ if (rc != 0) {
+ ocs_atomic_add_return(&ref->count, 1);
+ }
+ return rc;
+}
+
+/**
+ * @ingroup os
+ * @brief Decrement reference on given object
+ *
+ * @par Description
+ * This function decrements the reference count on the given
+ * reference object. If the reference count becomes zero, the
+ * "release" function (set during "init" time) is called.
+ *
+ * @param ref Pointer to reference object
+ *
+ * @return non-zero if release function was called; zero
+ * otherwise.
+ */
+static inline uint32_t
+ocs_ref_put(ocs_ref_t *ref)
+{
+ uint32_t rc = 0;
+ if (ocs_atomic_sub_return(&ref->count, 1) == 1) {
+ ref->release(ref->arg);
+ rc = 1;
+ }
+ return rc;
+}
+
+/**
+ * @ingroup os
+ * @brief Get the OS system ticks
+ *
+ * @return number of ticks that have occurred since the system
+ * booted.
+ */
+static inline uint64_t
+ocs_get_os_ticks(void)
+{
+ return ticks;
+}
+
+/**
+ * @ingroup os
+ * @brief Get the OS system tick frequency
+ *
+ * @return frequency of system ticks.
+ */
+static inline uint32_t
+ocs_get_os_tick_freq(void)
+{
+ return hz;
+}
+
+/*****************************************************************************
+ *
+ * CPU topology API
+ */
+
+typedef struct {
+ uint32_t num_cpus; /* Number of CPU cores */
+ uint8_t hyper; /* TRUE if threaded CPUs */
+} ocs_cpuinfo_t;
+
+extern int32_t ocs_get_cpuinfo(ocs_cpuinfo_t *cpuinfo);
+extern uint32_t ocs_get_num_cpus(void);
+
+#include "ocs_list.h"
+#include "ocs_utils.h"
+#include "ocs_mgmt.h"
+#include "ocs_common.h"
+
+#endif /* !_OCS_OS_H */
diff --git a/sys/dev/ocs_fc/ocs_pci.c b/sys/dev/ocs_fc/ocs_pci.c
new file mode 100644
index 000000000000..03f1fa3d0cb0
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_pci.c
@@ -0,0 +1,963 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define OCS_COPYRIGHT "Copyright (C) 2017 Broadcom. All rights reserved."
+
+/**
+ * @file
+ * Implementation of required FreeBSD PCI interface functions
+ */
+
+#include "ocs.h"
+#include "version.h"
+#include <sys/sysctl.h>
+#include <sys/malloc.h>
+
+static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data");
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <machine/bus.h>
+
+/**
+ * Tunable parameters for transport
+ */
+int logmask = 0;
+int ctrlmask = 2;
+int logdest = 1;
+int loglevel = LOG_INFO;
+int ramlog_size = 1*1024*1024;
+int ddump_saved_size = 0;
+static const char *queue_topology = "eq cq rq cq mq $nulp($nwq(cq wq:ulp=$rpt1)) cq wq:len=256:class=1";
+
+static void ocs_release_bus(struct ocs_softc *);
+static int32_t ocs_intr_alloc(struct ocs_softc *);
+static int32_t ocs_intr_setup(struct ocs_softc *);
+static int32_t ocs_intr_teardown(struct ocs_softc *);
+static int ocs_pci_intx_filter(void *);
+static void ocs_pci_intr(void *);
+static int32_t ocs_init_dma_tag(struct ocs_softc *ocs);
+
+static int32_t ocs_setup_fcports(ocs_t *ocs);
+
+ocs_t *ocs_devices[MAX_OCS_DEVICES];
+
+/**
+ * @brief Check support for the given device
+ *
+ * Determine support for a given device by examining the PCI vendor and
+ * device IDs
+ *
+ * @param dev device abstraction
+ *
+ * @return 0 if device is supported, ENXIO otherwise
+ */
+static int
+ocs_pci_probe(device_t dev)
+{
+ char *desc = NULL;
+
+ if (pci_get_vendor(dev) != PCI_VENDOR_EMULEX) {
+ return ENXIO;
+ }
+
+ switch (pci_get_device(dev)) {
+ case PCI_PRODUCT_EMULEX_OCE16001:
+ desc = "Emulex LightPulse FC Adapter";
+ break;
+ case PCI_PRODUCT_EMULEX_LPE31004:
+ desc = "Emulex LightPulse FC Adapter";
+ break;
+ case PCI_PRODUCT_EMULEX_OCE50102:
+ desc = "Emulex LightPulse 10GbE FCoE/NIC Adapter";
+ break;
+ default:
+ return ENXIO;
+ }
+
+ device_set_desc(dev, desc);
+
+ return BUS_PROBE_DEFAULT;
+}
+
+static int
+ocs_map_bars(device_t dev, struct ocs_softc *ocs)
+{
+
+ /*
+ * Map PCI BAR0 register into the CPU's space.
+ */
+
+ ocs->reg[0].rid = PCIR_BAR(PCI_64BIT_BAR0);
+ ocs->reg[0].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &ocs->reg[0].rid, RF_ACTIVE);
+
+ if (ocs->reg[0].res == NULL) {
+ device_printf(dev, "bus_alloc_resource failed rid=%#x\n",
+ ocs->reg[0].rid);
+ return ENXIO;
+ }
+
+ ocs->reg[0].btag = rman_get_bustag(ocs->reg[0].res);
+ ocs->reg[0].bhandle = rman_get_bushandle(ocs->reg[0].res);
+ return 0;
+}
+
+
+static int
+ocs_setup_params(struct ocs_softc *ocs)
+{
+ int32_t i = 0;
+ const char *hw_war_version;
+ /* Setup tunable parameters */
+ ocs->ctrlmask = ctrlmask;
+ ocs->speed = 0;
+ ocs->topology = 0;
+ ocs->ethernet_license = 0;
+ ocs->num_scsi_ios = 8192;
+ ocs->enable_hlm = 0;
+ ocs->hlm_group_size = 8;
+ ocs->logmask = logmask;
+
+ ocs->config_tgt = FALSE;
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "target", &i)) {
+ if (1 == i) {
+ ocs->config_tgt = TRUE;
+ device_printf(ocs->dev, "Enabling target\n");
+ }
+ }
+
+ ocs->config_ini = TRUE;
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "initiator", &i)) {
+ if (0 == i) {
+ ocs->config_ini = FALSE;
+ device_printf(ocs->dev, "Disabling initiator\n");
+ }
+ }
+ ocs->enable_ini = ocs->config_ini;
+
+ if (!ocs->config_ini && !ocs->config_tgt) {
+ device_printf(ocs->dev, "Unsupported, both initiator and target mode disabled.\n");
+ return 1;
+
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "logmask", &logmask)) {
+ device_printf(ocs->dev, "logmask = %#x\n", logmask);
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "logdest", &logdest)) {
+ device_printf(ocs->dev, "logdest = %#x\n", logdest);
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "loglevel", &loglevel)) {
+ device_printf(ocs->dev, "loglevel = %#x\n", loglevel);
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "ramlog_size", &ramlog_size)) {
+ device_printf(ocs->dev, "ramlog_size = %#x\n", ramlog_size);
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "ddump_saved_size", &ddump_saved_size)) {
+ device_printf(ocs->dev, "ddump_saved_size= %#x\n", ddump_saved_size);
+ }
+
+ /* If enabled, initailize a RAM logging buffer */
+ if (logdest & 2) {
+ ocs->ramlog = ocs_ramlog_init(ocs, ramlog_size/OCS_RAMLOG_DEFAULT_BUFFERS,
+ OCS_RAMLOG_DEFAULT_BUFFERS);
+ /* If NULL was returned, then we'll simply skip using the ramlog but */
+ /* set logdest to 1 to ensure that we at least get default logging. */
+ if (ocs->ramlog == NULL) {
+ logdest = 1;
+ }
+ }
+
+ /* initialize a saved ddump */
+ if (ddump_saved_size) {
+ if (ocs_textbuf_alloc(ocs, &ocs->ddump_saved, ddump_saved_size)) {
+ ocs_log_err(ocs, "failed to allocate memory for saved ddump\n");
+ }
+ }
+
+ if (0 == resource_string_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "hw_war_version", &hw_war_version)) {
+ device_printf(ocs->dev, "hw_war_version = %s\n", hw_war_version);
+ ocs->hw_war_version = strdup(hw_war_version, M_OCS);
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "explicit_buffer_list", &i)) {
+ ocs->explicit_buffer_list = i;
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "ethernet_license", &i)) {
+ ocs->ethernet_license = i;
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "speed", &i)) {
+ device_printf(ocs->dev, "speed = %d Mbps\n", i);
+ ocs->speed = i;
+ }
+ ocs->desc = device_get_desc(ocs->dev);
+
+ ocs_device_lock_init(ocs);
+ ocs->driver_version = STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH;
+ ocs->model = ocs_pci_model(ocs->pci_vendor, ocs->pci_device);
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "enable_hlm", &i)) {
+ device_printf(ocs->dev, "enable_hlm = %d\n", i);
+ ocs->enable_hlm = i;
+ if (ocs->enable_hlm) {
+ ocs->hlm_group_size = 8;
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "hlm_group_size", &i)) {
+ ocs->hlm_group_size = i;
+ }
+ device_printf(ocs->dev, "hlm_group_size = %d\n", i);
+ }
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "num_scsi_ios", &i)) {
+ ocs->num_scsi_ios = i;
+ device_printf(ocs->dev, "num_scsi_ios = %d\n", ocs->num_scsi_ios);
+ } else {
+ ocs->num_scsi_ios = 8192;
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "topology", &i)) {
+ ocs->topology = i;
+ device_printf(ocs->dev, "Setting topology=%#x\n", i);
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "num_vports", &i)) {
+ if (i >= 0 && i <= 254) {
+ device_printf(ocs->dev, "num_vports = %d\n", i);
+ ocs->num_vports = i;
+ } else {
+ device_printf(ocs->dev, "num_vports: %d not supported \n", i);
+ }
+ }
+
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "external_loopback", &i)) {
+ device_printf(ocs->dev, "external_loopback = %d\n", i);
+ ocs->external_loopback = i;
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "tgt_rscn_delay", &i)) {
+ device_printf(ocs->dev, "tgt_rscn_delay = %d\n", i);
+ ocs->tgt_rscn_delay_msec = i * 1000;
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "tgt_rscn_period", &i)) {
+ device_printf(ocs->dev, "tgt_rscn_period = %d\n", i);
+ ocs->tgt_rscn_period_msec = i * 1000;
+ }
+
+ if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
+ "target_io_timer", &i)) {
+ device_printf(ocs->dev, "target_io_timer = %d\n", i);
+ ocs->target_io_timer_sec = i;
+ }
+
+ hw_global.queue_topology_string = queue_topology;
+ ocs->rq_selection_policy = 0;
+ ocs->rr_quanta = 1;
+ ocs->filter_def = "0,0,0,0";
+
+ return 0;
+}
+
+static int32_t
+ocs_setup_fcports(ocs_t *ocs)
+{
+ uint32_t i = 0, role = 0;
+ uint64_t sli_wwpn, sli_wwnn;
+ size_t size;
+ ocs_xport_t *xport = ocs->xport;
+ ocs_vport_spec_t *vport;
+ ocs_fcport *fcp = NULL;
+
+ size = sizeof(ocs_fcport) * (ocs->num_vports + 1);
+
+ ocs->fcports = ocs_malloc(ocs, size, M_ZERO|M_NOWAIT);
+ if (ocs->fcports == NULL) {
+ device_printf(ocs->dev, "Can't allocate fcport \n");
+ return 1;
+ }
+
+ role = (ocs->enable_ini)? KNOB_ROLE_INITIATOR: 0 |
+ (ocs->enable_tgt)? KNOB_ROLE_TARGET: 0;
+
+ fcp = FCPORT(ocs, i);
+ fcp->role = role;
+ i++;
+
+ ocs_list_foreach(&xport->vport_list, vport) {
+ fcp = FCPORT(ocs, i);
+ vport->tgt_data = fcp;
+ fcp->vport = vport;
+ fcp->role = role;
+
+ if (ocs_hw_get_def_wwn(ocs, i, &sli_wwpn, &sli_wwnn)) {
+ ocs_log_err(ocs, "Get default wwn failed \n");
+ i++;
+ continue;
+ }
+
+ vport->wwpn = ocs_be64toh(sli_wwpn);
+ vport->wwnn = ocs_be64toh(sli_wwnn);
+ i++;
+ ocs_log_debug(ocs, "VPort wwpn: %lx wwnn: %lx \n", vport->wwpn, vport->wwnn);
+ }
+
+ return 0;
+}
+
+int32_t
+ocs_device_attach(ocs_t *ocs)
+{
+ int32_t i;
+ ocs_io_t *io = NULL;
+
+ if (ocs->attached) {
+ ocs_log_warn(ocs, "%s: Device is already attached\n", __func__);
+ return -1;
+ }
+
+ /* Allocate transport object and bring online */
+ ocs->xport = ocs_xport_alloc(ocs);
+ if (ocs->xport == NULL) {
+ device_printf(ocs->dev, "failed to allocate transport object\n");
+ return ENOMEM;
+ } else if (ocs_xport_attach(ocs->xport) != 0) {
+ device_printf(ocs->dev, "%s: failed to attach transport object\n", __func__);
+ goto fail_xport_attach;
+ } else if (ocs_xport_initialize(ocs->xport) != 0) {
+ device_printf(ocs->dev, "%s: failed to initialize transport object\n", __func__);
+ goto fail_xport_init;
+ }
+
+ if (ocs_init_dma_tag(ocs)) {
+ goto fail_intr_setup;
+ }
+
+ for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
+ if (bus_dmamap_create(ocs->buf_dmat, 0, &io->tgt_io.dmap)) {
+ device_printf(ocs->dev, "%s: bad dma map create\n", __func__);
+ }
+
+ io->tgt_io.state = OCS_CAM_IO_FREE;
+ }
+
+ if (ocs_setup_fcports(ocs)) {
+ device_printf(ocs->dev, "FCports creation failed\n");
+ goto fail_intr_setup;
+ }
+
+ if(ocs_cam_attach(ocs)) {
+ device_printf(ocs->dev, "cam attach failed \n");
+ goto fail_intr_setup;
+ }
+
+ if (ocs_intr_setup(ocs)) {
+ device_printf(ocs->dev, "Interrupt setup failed\n");
+ goto fail_intr_setup;
+ }
+
+ if (ocs->enable_ini || ocs->enable_tgt) {
+ if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE)) {
+ device_printf(ocs->dev, "Can't init port\n");
+ goto fail_xport_online;
+ }
+ }
+
+ ocs->attached = true;
+
+ return 0;
+
+fail_xport_online:
+ if (ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN)) {
+ device_printf(ocs->dev, "Transport Shutdown timed out\n");
+ }
+ ocs_intr_teardown(ocs);
+fail_intr_setup:
+fail_xport_init:
+ ocs_xport_detach(ocs->xport);
+ if (ocs->config_tgt)
+ ocs_scsi_tgt_del_device(ocs);
+
+ ocs_xport_free(ocs->xport);
+ ocs->xport = NULL;
+fail_xport_attach:
+ if (ocs->xport)
+ ocs_free(ocs, ocs->xport, sizeof(*(ocs->xport)));
+ ocs->xport = NULL;
+ return ENXIO;
+}
+
+/**
+ * @brief Connect the driver to the given device
+ *
+ * If the probe routine is successful, the OS will give the driver
+ * the opportunity to connect itself to the device. This routine
+ * maps PCI resources (memory BARs and interrupts) and initialize a
+ * hardware object.
+ *
+ * @param dev device abstraction
+ *
+ * @return 0 if the driver attaches to the device, ENXIO otherwise
+ */
+
+static int
+ocs_pci_attach(device_t dev)
+{
+ struct ocs_softc *ocs;
+ int instance;
+
+ instance = device_get_unit(dev);
+
+ ocs = (struct ocs_softc *)device_get_softc(dev);
+ if (NULL == ocs) {
+ device_printf(dev, "cannot allocate softc\n");
+ return ENOMEM;
+ }
+ memset(ocs, 0, sizeof(struct ocs_softc));
+
+ if (instance < ARRAY_SIZE(ocs_devices)) {
+ ocs_devices[instance] = ocs;
+ } else {
+ device_printf(dev, "got unexpected ocs instance number %d\n", instance);
+ }
+
+ ocs->instance_index = instance;
+
+ ocs->dev = dev;
+
+ pci_enable_io(dev, SYS_RES_MEMORY);
+ pci_enable_busmaster(dev);
+
+ ocs->pci_vendor = pci_get_vendor(dev);
+ ocs->pci_device = pci_get_device(dev);
+ snprintf(ocs->businfo, sizeof(ocs->businfo), "%02X:%02X:%02X",
+ pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
+
+ /* Map all memory BARs */
+ if (ocs_map_bars(dev, ocs)) {
+ device_printf(dev, "Failed to map pci bars\n");
+ goto release_bus;
+ }
+
+ /* create a root DMA tag for the device */
+ if (bus_dma_tag_create(bus_get_dma_tag(dev),
+ 1, /* byte alignment */
+ 0, /* no boundary restrictions */
+ BUS_SPACE_MAXADDR, /* no minimum low address */
+ BUS_SPACE_MAXADDR, /* no maximum high address */
+ NULL, /* no filter function */
+ NULL, /* or arguments */
+ BUS_SPACE_MAXSIZE, /* max size covered by tag */
+ BUS_SPACE_UNRESTRICTED, /* no segment count restrictions */
+ BUS_SPACE_MAXSIZE, /* no segment length restrictions */
+ 0, /* flags */
+ NULL, /* no lock manipulation function */
+ NULL, /* or arguments */
+ &ocs->dmat)) {
+ device_printf(dev, "parent DMA tag allocation failed\n");
+ goto release_bus;
+ }
+
+ if (ocs_intr_alloc(ocs)) {
+ device_printf(dev, "Interrupt allocation failed\n");
+ goto release_bus;
+ }
+
+ if (PCIC_SERIALBUS == pci_get_class(dev) &&
+ PCIS_SERIALBUS_FC == pci_get_subclass(dev))
+ ocs->ocs_xport = OCS_XPORT_FC;
+ else {
+ device_printf(dev, "unsupported class (%#x : %#x)\n",
+ pci_get_class(dev),
+ pci_get_class(dev));
+ goto release_bus;
+ }
+
+ /* Setup tunable parameters */
+ if (ocs_setup_params(ocs)) {
+ device_printf(ocs->dev, "failed to setup params\n");
+ goto release_bus;
+ }
+
+ if (ocs_device_attach(ocs)) {
+ device_printf(ocs->dev, "failed to attach device\n");
+ goto release_params;
+ }
+
+ ocs->fc_type = FC_TYPE_FCP;
+
+ ocs_debug_attach(ocs);
+
+ return 0;
+
+release_params:
+ ocs_ramlog_free(ocs, ocs->ramlog);
+ ocs_device_lock_free(ocs);
+ free(ocs->hw_war_version, M_OCS);
+release_bus:
+ ocs_release_bus(ocs);
+ return ENXIO;
+}
+
+/**
+ * @brief free resources when pci device detach
+ *
+ * @param ocs pointer to ocs structure
+ *
+ * @return 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_device_detach(ocs_t *ocs)
+{
+ int32_t rc = 0, i;
+ ocs_io_t *io = NULL;
+
+ if (ocs != NULL) {
+ if (!ocs->attached) {
+ ocs_log_warn(ocs, "%s: Device is not attached\n", __func__);
+ return -1;
+ }
+
+ rc = ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN);
+ if (rc) {
+ ocs_log_err(ocs, "%s: Transport Shutdown timed out\n", __func__);
+ }
+
+ ocs_intr_teardown(ocs);
+
+ if (ocs_xport_detach(ocs->xport) != 0) {
+ ocs_log_err(ocs, "%s: Transport detach failed\n", __func__);
+ }
+
+ ocs_cam_detach(ocs);
+ ocs_free(ocs, ocs->fcports, sizeof(ocs->fcports));
+
+ for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
+ if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) {
+ device_printf(ocs->dev, "%s: bad dma map destroy\n", __func__);
+ }
+ }
+ bus_dma_tag_destroy(ocs->dmat);
+ ocs_xport_free(ocs->xport);
+ ocs->xport = NULL;
+
+ ocs->attached = FALSE;
+
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief Detach the driver from the given device
+ *
+ * If the driver is a loadable module, this routine gets called at unload
+ * time. This routine will stop the device and free any allocated resources.
+ *
+ * @param dev device abstraction
+ *
+ * @return 0 if the driver detaches from the device, ENXIO otherwise
+ */
+static int
+ocs_pci_detach(device_t dev)
+{
+ struct ocs_softc *ocs;
+
+ ocs = (struct ocs_softc *)device_get_softc(dev);
+ if (!ocs) {
+ device_printf(dev, "no driver context?!?\n");
+ return -1;
+ }
+
+ if (ocs->config_tgt && ocs->enable_tgt) {
+ device_printf(dev, "can't detach with target mode enabled\n");
+ return EBUSY;
+ }
+
+ ocs_device_detach(ocs);
+
+ /*
+ * Workaround for OCS SCSI Transport quirk.
+ *
+ * CTL requires that target mode is disabled prior to unloading the
+ * driver (ie ocs->enable_tgt = FALSE), but once the target is disabled,
+ * the transport will not call ocs_scsi_tgt_del_device() which deallocates
+ * CAM resources. The workaround is to explicitly make the call here.
+ */
+ if (ocs->config_tgt)
+ ocs_scsi_tgt_del_device(ocs);
+
+ /* free strdup created buffer.*/
+ free(ocs->hw_war_version, M_OCS);
+
+ ocs_device_lock_free(ocs);
+
+ ocs_debug_detach(ocs);
+
+ ocs_ramlog_free(ocs, ocs->ramlog);
+
+ ocs_release_bus(ocs);
+
+ return 0;
+}
+
+/**
+ * @brief Notify driver of system shutdown
+ *
+ * @param dev device abstraction
+ *
+ * @return 0 if the driver attaches to the device, ENXIO otherwise
+ */
+static int
+ocs_pci_shutdown(device_t dev)
+{
+ device_printf(dev, "%s\n", __func__);
+ return 0;
+}
+
+/**
+ * @brief Release bus resources allocated within the soft context
+ *
+ * @param ocs Pointer to the driver's context
+ *
+ * @return none
+ */
+static void
+ocs_release_bus(struct ocs_softc *ocs)
+{
+
+ if (NULL != ocs) {
+ uint32_t i;
+
+ ocs_intr_teardown(ocs);
+
+ if (ocs->irq) {
+ bus_release_resource(ocs->dev, SYS_RES_IRQ,
+ rman_get_rid(ocs->irq), ocs->irq);
+
+ if (ocs->n_vec) {
+ pci_release_msi(ocs->dev);
+ ocs->n_vec = 0;
+ }
+
+ ocs->irq = NULL;
+ }
+
+ bus_dma_tag_destroy(ocs->dmat);
+
+ for (i = 0; i < PCI_MAX_BAR; i++) {
+ if (ocs->reg[i].res) {
+ bus_release_resource(ocs->dev, SYS_RES_MEMORY,
+ ocs->reg[i].rid,
+ ocs->reg[i].res);
+ }
+ }
+ }
+}
+
+/**
+ * @brief Allocate and initialize interrupts
+ *
+ * @param ocs Pointer to the driver's context
+ *
+ * @return none
+ */
+static int32_t
+ocs_intr_alloc(struct ocs_softc *ocs)
+{
+
+ ocs->n_vec = 1;
+ if (pci_alloc_msix(ocs->dev, &ocs->n_vec)) {
+ device_printf(ocs->dev, "MSI-X allocation failed\n");
+ if (pci_alloc_msi(ocs->dev, &ocs->n_vec)) {
+ device_printf(ocs->dev, "MSI allocation failed \n");
+ ocs->irqid = 0;
+ ocs->n_vec = 0;
+ } else
+ ocs->irqid = 1;
+ } else {
+ ocs->irqid = 1;
+ }
+
+ ocs->irq = bus_alloc_resource_any(ocs->dev, SYS_RES_IRQ, &ocs->irqid,
+ RF_ACTIVE | RF_SHAREABLE);
+ if (NULL == ocs->irq) {
+ device_printf(ocs->dev, "could not allocate interrupt\n");
+ return -1;
+ }
+
+ ocs->intr_ctx.vec = 0;
+ ocs->intr_ctx.softc = ocs;
+ snprintf(ocs->intr_ctx.name, sizeof(ocs->intr_ctx.name),
+ "%s_intr_%d",
+ device_get_nameunit(ocs->dev),
+ ocs->intr_ctx.vec);
+
+ return 0;
+}
+
+/**
+ * @brief Create and attach an interrupt handler
+ *
+ * @param ocs Pointer to the driver's context
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_intr_setup(struct ocs_softc *ocs)
+{
+ driver_filter_t *filter = NULL;
+
+ if (0 == ocs->n_vec) {
+ filter = ocs_pci_intx_filter;
+ }
+
+ if (bus_setup_intr(ocs->dev, ocs->irq, INTR_MPSAFE | INTR_TYPE_CAM,
+ filter, ocs_pci_intr, &ocs->intr_ctx,
+ &ocs->tag)) {
+ device_printf(ocs->dev, "could not initialize interrupt\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief Detach an interrupt handler
+ *
+ * @param ocs Pointer to the driver's context
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_intr_teardown(struct ocs_softc *ocs)
+{
+
+ if (!ocs) {
+ printf("%s: bad driver context?!?\n", __func__);
+ return -1;
+ }
+
+ if (ocs->tag) {
+ bus_teardown_intr(ocs->dev, ocs->irq, ocs->tag);
+ ocs->tag = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief PCI interrupt handler
+ *
+ * @param arg pointer to the driver's software context
+ *
+ * @return FILTER_HANDLED if interrupt is processed, FILTER_STRAY otherwise
+ */
+static int
+ocs_pci_intx_filter(void *arg)
+{
+ ocs_intr_ctx_t *intr = arg;
+ struct ocs_softc *ocs = NULL;
+ uint16_t val = 0;
+
+ if (NULL == intr) {
+ return FILTER_STRAY;
+ }
+
+ ocs = intr->softc;
+#ifndef PCIM_STATUS_INTR
+#define PCIM_STATUS_INTR 0x0008
+#endif
+ val = pci_read_config(ocs->dev, PCIR_STATUS, 2);
+ if (0xffff == val) {
+ device_printf(ocs->dev, "%s: pci_read_config(PCIR_STATUS) failed\n", __func__);
+ return FILTER_STRAY;
+ }
+ if (0 == (val & PCIM_STATUS_INTR)) {
+ return FILTER_STRAY;
+ }
+
+ val = pci_read_config(ocs->dev, PCIR_COMMAND, 2);
+ val |= PCIM_CMD_INTxDIS;
+ pci_write_config(ocs->dev, PCIR_COMMAND, val, 2);
+
+ return FILTER_SCHEDULE_THREAD;
+}
+
+/**
+ * @brief interrupt handler
+ *
+ * @param context pointer to the interrupt context
+ */
+static void
+ocs_pci_intr(void *context)
+{
+ ocs_intr_ctx_t *intr = context;
+ struct ocs_softc *ocs = intr->softc;
+
+ mtx_lock(&ocs->sim_lock);
+ ocs_hw_process(&ocs->hw, intr->vec, OCS_OS_MAX_ISR_TIME_MSEC);
+ mtx_unlock(&ocs->sim_lock);
+}
+
+/**
+ * @brief Initialize DMA tag
+ *
+ * @param ocs the driver instance's software context
+ *
+ * @return 0 on success, non-zero otherwise
+ */
+static int32_t
+ocs_init_dma_tag(struct ocs_softc *ocs)
+{
+ uint32_t max_sgl = 0;
+ uint32_t max_sge = 0;
+
+ /*
+ * IOs can't use the parent DMA tag and must create their
+ * own, based primarily on a restricted number of DMA segments.
+ * This is more of a BSD requirement than a SLI Port requirement
+ */
+ ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl);
+ ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge);
+
+ if (bus_dma_tag_create(ocs->dmat,
+ 1, /* byte alignment */
+ 0, /* no boundary restrictions */
+ BUS_SPACE_MAXADDR, /* no minimum low address */
+ BUS_SPACE_MAXADDR, /* no maximum high address */
+ NULL, /* no filter function */
+ NULL, /* or arguments */
+ BUS_SPACE_MAXSIZE, /* max size covered by tag */
+ max_sgl, /* segment count restrictions */
+ max_sge, /* segment length restrictions */
+ 0, /* flags */
+ NULL, /* no lock manipulation function */
+ NULL, /* or arguments */
+ &ocs->buf_dmat)) {
+ device_printf(ocs->dev, "%s: bad bus_dma_tag_create(buf_dmat)\n", __func__);
+ return -1;
+ }
+ return 0;
+}
+
+int32_t
+ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len)
+{
+ return -1;
+}
+
+/**
+ * @brief return pointer to ocs structure given instance index
+ *
+ * A pointer to an ocs structure is returned given an instance index.
+ *
+ * @param index index to ocs_devices array
+ *
+ * @return ocs pointer
+ */
+
+ocs_t *ocs_get_instance(uint32_t index)
+{
+ if (index < ARRAY_SIZE(ocs_devices)) {
+ return ocs_devices[index];
+ }
+ return NULL;
+}
+
+/**
+ * @brief Return instance index of an opaque ocs structure
+ *
+ * Returns the ocs instance index
+ *
+ * @param os pointer to ocs instance
+ *
+ * @return pointer to ocs instance index
+ */
+uint32_t
+ocs_instance(void *os)
+{
+ ocs_t *ocs = os;
+ return ocs->instance_index;
+}
+
+static device_method_t ocs_methods[] = {
+ DEVMETHOD(device_probe, ocs_pci_probe),
+ DEVMETHOD(device_attach, ocs_pci_attach),
+ DEVMETHOD(device_detach, ocs_pci_detach),
+ DEVMETHOD(device_shutdown, ocs_pci_shutdown),
+ {0, 0}
+};
+
+static driver_t ocs_driver = {
+ "ocs_fc",
+ ocs_methods,
+ sizeof(struct ocs_softc)
+};
+
+static devclass_t ocs_devclass;
+
+DRIVER_MODULE(ocs_fc, pci, ocs_driver, ocs_devclass, 0, 0);
+MODULE_VERSION(ocs_fc, 1);
+
diff --git a/sys/dev/ocs_fc/ocs_scsi.c b/sys/dev/ocs_fc/ocs_scsi.c
new file mode 100644
index 000000000000..20be08eddf05
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_scsi.c
@@ -0,0 +1,2960 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS Linux SCSI API base driver implementation.
+ */
+
+/**
+ * @defgroup scsi_api_base SCSI Base Target/Initiator
+ */
+
+
+#include "ocs.h"
+#include "ocs_els.h"
+#include "ocs_scsi.h"
+#if defined(OCS_ENABLE_VPD_SUPPORT)
+#include "ocs_vpd.h"
+#endif
+#include "ocs_utils.h"
+#include "ocs_device.h"
+
+#define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]"
+#define SCSI_ITT_SIZE(ocs) ((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8)
+
+#define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag
+
+#define enable_tsend_auto_resp(ocs) ((ocs->ctrlmask & OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TSEND) == 0)
+#define enable_treceive_auto_resp(ocs) ((ocs->ctrlmask & OCS_CTRLMASK_XPORT_DISABLE_AUTORSP_TRECEIVE) == 0)
+
+#define scsi_io_printf(io, fmt, ...) ocs_log_info(io->ocs, "[%s]" SCSI_IOFMT fmt, \
+ io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__)
+
+#define scsi_io_trace(io, fmt, ...) \
+ do { \
+ if (OCS_LOG_ENABLE_SCSI_TRACE(io->ocs)) \
+ scsi_io_printf(io, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define scsi_log(ocs, fmt, ...) \
+ do { \
+ if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) \
+ ocs_log_info(ocs, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static int32_t ocs_target_send_bls_resp(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg);
+static int32_t ocs_scsi_abort_io_cb(struct ocs_hw_io_s *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status,
+ uint32_t ext, void *arg);
+
+static void ocs_scsi_io_free_ovfl(ocs_io_t *io);
+static uint32_t ocs_scsi_count_sgls(ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count);
+static int ocs_scsi_dif_guard_is_crc(uint8_t direction, ocs_hw_dif_info_t *dif_info);
+static ocs_scsi_io_status_e ocs_scsi_dif_check_unknown(ocs_io_t *io, uint32_t length, uint32_t check_length, int is_crc);
+static uint32_t ocs_scsi_dif_check_guard(ocs_hw_dif_info_t *dif_info, ocs_scsi_vaddr_len_t addrlen[],
+ uint32_t addrlen_count, ocs_dif_t *dif, int is_crc);
+static uint32_t ocs_scsi_dif_check_app_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint16_t exp_app_tag, ocs_dif_t *dif);
+static uint32_t ocs_scsi_dif_check_ref_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint32_t exp_ref_tag, ocs_dif_t *dif);
+static int32_t ocs_scsi_convert_dif_info(ocs_t *ocs, ocs_scsi_dif_info_t *scsi_dif_info,
+ ocs_hw_dif_info_t *hw_dif_info);
+static int32_t ocs_scsi_io_dispatch_hw_io(ocs_io_t *io, ocs_hw_io_t *hio);
+static int32_t ocs_scsi_io_dispatch_no_hw_io(ocs_io_t *io);
+static void _ocs_scsi_io_free(void *arg);
+
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Returns a big-endian 32-bit value given a pointer.
+ *
+ * @param p Pointer to the 32-bit big-endian location.
+ *
+ * @return Returns the byte-swapped 32-bit value.
+ */
+
+static inline uint32_t
+ocs_fc_getbe32(void *p)
+{
+ return ocs_be32toh(*((uint32_t*)p));
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Enable IO allocation.
+ *
+ * @par Description
+ * The SCSI and Transport IO allocation functions are enabled. If the allocation functions
+ * are not enabled, then calls to ocs_scsi_io_alloc() (and ocs_els_io_alloc() for FC) will
+ * fail.
+ *
+ * @param node Pointer to node object.
+ *
+ * @return None.
+ */
+void
+ocs_scsi_io_alloc_enable(ocs_node_t *node)
+{
+ ocs_assert(node != NULL);
+ ocs_lock(&node->active_ios_lock);
+ node->io_alloc_enabled = TRUE;
+ ocs_unlock(&node->active_ios_lock);
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Disable IO allocation
+ *
+ * @par Description
+ * The SCSI and Transport IO allocation functions are disabled. If the allocation functions
+ * are not enabled, then calls to ocs_scsi_io_alloc() (and ocs_els_io_alloc() for FC) will
+ * fail.
+ *
+ * @param node Pointer to node object
+ *
+ * @return None.
+ */
+void
+ocs_scsi_io_alloc_disable(ocs_node_t *node)
+{
+ ocs_assert(node != NULL);
+ ocs_lock(&node->active_ios_lock);
+ node->io_alloc_enabled = FALSE;
+ ocs_unlock(&node->active_ios_lock);
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Allocate a SCSI IO context.
+ *
+ * @par Description
+ * A SCSI IO context is allocated and associated with a @c node. This function
+ * is called by an initiator-client when issuing SCSI commands to remote
+ * target devices. On completion, ocs_scsi_io_free() is called.
+ * @n @n
+ * The returned ocs_io_t structure has an element of type ocs_scsi_ini_io_t named
+ * &quot;ini_io&quot; that is declared and used by an initiator-client for private information.
+ *
+ * @param node Pointer to the associated node structure.
+ * @param role Role for IO (originator/responder).
+ *
+ * @return Returns the pointer to the IO context, or NULL.
+ *
+ */
+
+ocs_io_t *
+ocs_scsi_io_alloc(ocs_node_t *node, ocs_scsi_io_role_e role)
+{
+ ocs_t *ocs;
+ ocs_xport_t *xport;
+ ocs_io_t *io;
+
+ ocs_assert(node, NULL);
+ ocs_assert(node->ocs, NULL);
+
+ ocs = node->ocs;
+ ocs_assert(ocs->xport, NULL);
+ xport = ocs->xport;
+
+ ocs_lock(&node->active_ios_lock);
+
+ if (!node->io_alloc_enabled) {
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ io = ocs_io_alloc(ocs);
+ if (io == NULL) {
+ ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ ocs_ref_init(&io->ref, _ocs_scsi_io_free, io);
+
+ if (io->hio != NULL) {
+ ocs_log_err(node->ocs, "assertion failed: io->hio is not NULL\n");
+ ocs_unlock(&node->active_ios_lock);
+ return NULL;
+ }
+
+ /* set generic fields */
+ io->ocs = ocs;
+ io->node = node;
+
+ /* set type and name */
+ io->io_type = OCS_IO_TYPE_IO;
+ io->display_name = "scsi_io";
+
+ switch (role) {
+ case OCS_SCSI_IO_ROLE_ORIGINATOR:
+ io->cmd_ini = TRUE;
+ io->cmd_tgt = FALSE;
+ break;
+ case OCS_SCSI_IO_ROLE_RESPONDER:
+ io->cmd_ini = FALSE;
+ io->cmd_tgt = TRUE;
+ break;
+ }
+
+ /* Add to node's active_ios list */
+ ocs_list_add_tail(&node->active_ios, io);
+
+ ocs_unlock(&node->active_ios_lock);
+
+ return io;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Free a SCSI IO context (internal).
+ *
+ * @par Description
+ * The IO context previously allocated using ocs_scsi_io_alloc()
+ * is freed. This is called from within the transport layer,
+ * when the reference count goes to zero.
+ *
+ * @param arg Pointer to the IO context.
+ *
+ * @return None.
+ */
+static void
+_ocs_scsi_io_free(void *arg)
+{
+ ocs_io_t *io = (ocs_io_t *)arg;
+ ocs_t *ocs = io->ocs;
+ ocs_node_t *node = io->node;
+ int send_empty_event;
+
+ ocs_assert(io != NULL);
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+
+ ocs_assert(ocs_io_busy(io));
+
+ ocs_lock(&node->active_ios_lock);
+ ocs_list_remove(&node->active_ios, io);
+ send_empty_event = (!node->io_alloc_enabled) && ocs_list_empty(&node->active_ios);
+ ocs_unlock(&node->active_ios_lock);
+
+ if (send_empty_event) {
+ ocs_node_post_event(node, OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
+ }
+
+ io->node = NULL;
+ ocs_io_free(ocs, io);
+
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Free a SCSI IO context.
+ *
+ * @par Description
+ * The IO context previously allocated using ocs_scsi_io_alloc() is freed.
+ *
+ * @param io Pointer to the IO context.
+ *
+ * @return None.
+ */
+void
+ocs_scsi_io_free(ocs_io_t *io)
+{
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ ocs_assert(ocs_ref_read_count(&io->ref) > 0);
+ ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_scsi_io_alloc() */
+}
+
+
+
+static int32_t
+ocs_scsi_send_io(ocs_hw_io_type_e type, ocs_node_t *node, ocs_io_t *io, uint64_t lun,
+ ocs_scsi_tmf_cmd_e tmf, uint8_t *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst,
+ ocs_scsi_rsp_io_cb_t cb, void *arg);
+
+/**
+ * @brief Target response completion callback.
+ *
+ * @par Description
+ * Function is called upon the completion of a target IO request.
+ *
+ * @param hio Pointer to the HW IO structure.
+ * @param rnode Remote node associated with the IO that is completing.
+ * @param length Length of the response payload.
+ * @param status Completion status.
+ * @param ext_status Extended completion status.
+ * @param app Application-specific data (generally a pointer to the IO context).
+ *
+ * @return None.
+ */
+
+static void
+ocs_target_io_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length,
+ int32_t status, uint32_t ext_status, void *app)
+{
+ ocs_io_t *io = app;
+ ocs_t *ocs;
+ ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_GOOD;
+ uint16_t additional_length;
+ uint8_t edir;
+ uint8_t tdpv;
+ ocs_hw_dif_info_t *dif_info = &io->hw_dif;
+ int is_crc;
+
+ ocs_assert(io);
+
+ scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
+
+ ocs = io->ocs;
+ ocs_assert(ocs);
+
+ ocs_scsi_io_free_ovfl(io);
+
+ io->transferred += length;
+
+ /* Call target server completion */
+ if (io->scsi_tgt_cb) {
+ ocs_scsi_io_cb_t cb = io->scsi_tgt_cb;
+ uint32_t flags = 0;
+
+ /* Clear the callback before invoking the callback */
+ io->scsi_tgt_cb = NULL;
+
+ /* if status was good, and auto-good-response was set, then callback
+ * target-server with IO_CMPL_RSP_SENT, otherwise send IO_CMPL
+ */
+ if ((status == 0) && (io->auto_resp))
+ flags |= OCS_SCSI_IO_CMPL_RSP_SENT;
+ else
+ flags |= OCS_SCSI_IO_CMPL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = OCS_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ if (ext_status & SLI4_FC_DI_ERROR_GE) {
+ scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
+ } else if (ext_status & SLI4_FC_DI_ERROR_AE) {
+ scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR;
+ } else if (ext_status & SLI4_FC_DI_ERROR_RE) {
+ scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR;
+ } else {
+ additional_length = ((ext_status >> 16) & 0xFFFF);
+
+ /* Capture the EDIR and TDPV bits as 0 or 1 for easier printing. */
+ edir = !!(ext_status & SLI4_FC_DI_ERROR_EDIR);
+ tdpv = !!(ext_status & SLI4_FC_DI_ERROR_TDPV);
+
+ is_crc = ocs_scsi_dif_guard_is_crc(edir, dif_info);
+
+ if (edir == 0) {
+ /* For reads, we have everything in memory. Start checking from beginning. */
+ scsi_status = ocs_scsi_dif_check_unknown(io, 0, io->wire_len, is_crc);
+ } else {
+ /* For writes, use the additional length to determine where to look for the error.
+ * The additional_length field is set to 0 if it is not supported.
+ * The additional length field is valid if:
+ * . additional_length is not zero
+ * . Total Data Placed is valid
+ * . Error Direction is RX (1)
+ * . Operation is a pass thru (CRC or CKSUM on IN, and CRC or CHKSUM on OUT) (all pass-thru cases except raw)
+ */
+ if ((additional_length != 0) && (tdpv != 0) &&
+ (dif_info->dif == SLI4_DIF_PASS_THROUGH) && (dif_info->dif_oper != OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW) ) {
+ scsi_status = ocs_scsi_dif_check_unknown(io, length, additional_length, is_crc);
+ } else {
+ /* If we can't do additional checking, then fall-back to guard error */
+ scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
+ }
+ }
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
+ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
+ scsi_status = OCS_SCSI_STATUS_ABORTED;
+ break;
+ case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
+ scsi_status = OCS_SCSI_STATUS_NEXUS_LOST;
+ break;
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_status = OCS_SCSI_STATUS_NO_IO;
+ break;
+ default:
+ /* TODO: we have seen 0x0d (TX_DMA_FAILED error) */
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
+ /* target IO timed out */
+ scsi_status = OCS_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
+ break;
+
+ case SLI4_FC_WCQE_STATUS_SHUTDOWN:
+ /* Target IO cancelled by HW */
+ scsi_status = OCS_SCSI_STATUS_SHUTDOWN;
+ break;
+
+ default:
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ break;
+ }
+
+ cb(io, scsi_status, flags, io->scsi_tgt_cb_arg);
+
+ }
+ ocs_scsi_check_pending(ocs);
+}
+
+/**
+ * @brief Determine if an IO is using CRC for DIF guard format.
+ *
+ * @param direction IO direction: 1 for write, 0 for read.
+ * @param dif_info Pointer to HW DIF info data.
+ *
+ * @return Returns TRUE if using CRC, FALSE if not.
+ */
+static int
+ocs_scsi_dif_guard_is_crc(uint8_t direction, ocs_hw_dif_info_t *dif_info)
+{
+ int is_crc;
+
+ if (direction) {
+ /* For writes, check if operation is "OUT_CRC" or not */
+ switch(dif_info->dif_oper) {
+ case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
+ case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
+ case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
+ is_crc = TRUE;
+ break;
+ default:
+ is_crc = FALSE;
+ break;
+ }
+ } else {
+ /* For reads, check if operation is "IN_CRC" or not */
+ switch(dif_info->dif_oper) {
+ case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
+ case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
+ case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
+ is_crc = TRUE;
+ break;
+ default:
+ is_crc = FALSE;
+ break;
+ }
+ }
+
+ return is_crc;
+}
+
+/**
+ * @brief Check a block and DIF data, computing the appropriate SCSI status
+ *
+ * @par Description
+ * This function is used to check blocks and DIF when given an unknown DIF
+ * status using the following logic:
+ *
+ * Given the address of the last good block, and a length of bytes that includes
+ * the block with the DIF error, find the bad block. If a block is found with an
+ * app_tag or ref_tag error, then return the appropriate error. No block is expected
+ * to have a block guard error since hardware "fixes" the crc. So if no block in the
+ * range of blocks has an error, then it is presumed to be a BLOCK GUARD error.
+ *
+ * @param io Pointer to the IO object.
+ * @param length Length of bytes covering the good blocks.
+ * @param check_length Length of bytes that covers the bad block.
+ * @param is_crc True if guard is using CRC format.
+ *
+ * @return Returns SCSI status.
+ */
+
+static ocs_scsi_io_status_e
+ocs_scsi_dif_check_unknown(ocs_io_t *io, uint32_t length, uint32_t check_length, int is_crc)
+{
+ uint32_t i;
+ ocs_t *ocs = io->ocs;
+ ocs_hw_dif_info_t *dif_info = &io->hw_dif;
+ ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
+ uint32_t blocksize; /* data block size */
+ uint64_t first_check_block; /* first block following total data placed */
+ uint64_t last_check_block; /* last block to check */
+ uint32_t check_count; /* count of blocks to check */
+ ocs_scsi_vaddr_len_t addrlen[4]; /* address-length pairs returned from target */
+ int32_t addrlen_count; /* count of address-length pairs */
+ ocs_dif_t *dif; /* pointer to DIF block returned from target */
+ ocs_scsi_dif_info_t scsi_dif_info = io->scsi_dif_info;
+
+ blocksize = ocs_hw_dif_mem_blocksize(&io->hw_dif, TRUE);
+ first_check_block = length / blocksize;
+ last_check_block = ((length + check_length) / blocksize);
+ check_count = last_check_block - first_check_block;
+
+ ocs_log_debug(ocs, "blocksize %d first check_block %" PRId64 " last_check_block %" PRId64 " check_count %d\n",
+ blocksize, first_check_block, last_check_block, check_count);
+
+ for (i = first_check_block; i < last_check_block; i++) {
+ addrlen_count = ocs_scsi_get_block_vaddr(io, (scsi_dif_info.lba + i), addrlen, ARRAY_SIZE(addrlen), (void**) &dif);
+ if (addrlen_count < 0) {
+ ocs_log_test(ocs, "ocs_scsi_get_block_vaddr() failed: %d\n", addrlen_count);
+ scsi_status = OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR;
+ break;
+ }
+
+ if (! ocs_scsi_dif_check_guard(dif_info, addrlen, addrlen_count, dif, is_crc)) {
+ ocs_log_debug(ocs, "block guard check error, lba %" PRId64 "\n", scsi_dif_info.lba + i);
+ scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
+ break;
+ }
+ if (! ocs_scsi_dif_check_app_tag(ocs, dif_info, scsi_dif_info.app_tag, dif)) {
+ ocs_log_debug(ocs, "app tag check error, lba %" PRId64 "\n", scsi_dif_info.lba + i);
+ scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR;
+ break;
+ }
+ if (! ocs_scsi_dif_check_ref_tag(ocs, dif_info, (scsi_dif_info.ref_tag + i), dif)) {
+ ocs_log_debug(ocs, "ref tag check error, lba %" PRId64 "\n", scsi_dif_info.lba + i);
+ scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR;
+ break;
+ }
+
+ }
+ return scsi_status;
+}
+
+/**
+ * @brief Check the block guard of block data
+ *
+ * @par Description
+ * Using the dif_info for the transfer, check the block guard value.
+ *
+ * @param dif_info Pointer to HW DIF info data.
+ * @param addrlen Array of address length pairs.
+ * @param addrlen_count Number of entries in the addrlen[] array.
+ * @param dif Pointer to the DIF data block being checked.
+ * @param is_crc True if guard is using CRC format.
+ *
+ * @return Returns TRUE if block guard check is ok.
+ */
+static uint32_t
+ocs_scsi_dif_check_guard(ocs_hw_dif_info_t *dif_info, ocs_scsi_vaddr_len_t addrlen[], uint32_t addrlen_count,
+ ocs_dif_t *dif, int is_crc)
+{
+ uint16_t crc = dif_info->dif_seed;
+ uint32_t i;
+ uint16_t checksum;
+
+ if ((dif == NULL) || !dif_info->check_guard) {
+ return TRUE;
+ }
+
+ if (is_crc) {
+ for (i = 0; i < addrlen_count; i++) {
+ crc = ocs_scsi_dif_calc_crc(addrlen[i].vaddr, addrlen[i].length, crc);
+ }
+ return (crc == ocs_be16toh(dif->crc));
+ } else {
+ checksum = ocs_scsi_dif_calc_checksum(addrlen, addrlen_count);
+
+ return (checksum == dif->crc);
+ }
+}
+
+/**
+ * @brief Check the app tag of dif data
+ *
+ * @par Description
+ * Using the dif_info for the transfer, check the app tag.
+ *
+ * @param ocs Pointer to the ocs structure for logging.
+ * @param dif_info Pointer to HW DIF info data.
+ * @param exp_app_tag The value the app tag is expected to be.
+ * @param dif Pointer to the DIF data block being checked.
+ *
+ * @return Returns TRUE if app tag check is ok.
+ */
+static uint32_t
+ocs_scsi_dif_check_app_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint16_t exp_app_tag, ocs_dif_t *dif)
+{
+ if ((dif == NULL) || !dif_info->check_app_tag) {
+ return TRUE;
+ }
+
+ ocs_log_debug(ocs, "expected app tag 0x%x, actual 0x%x\n",
+ exp_app_tag, ocs_be16toh(dif->app_tag));
+
+ return (exp_app_tag == ocs_be16toh(dif->app_tag));
+}
+
+/**
+ * @brief Check the ref tag of dif data
+ *
+ * @par Description
+ * Using the dif_info for the transfer, check the app tag.
+ *
+ * @param ocs Pointer to the ocs structure for logging.
+ * @param dif_info Pointer to HW DIF info data.
+ * @param exp_ref_tag The value the ref tag is expected to be.
+ * @param dif Pointer to the DIF data block being checked.
+ *
+ * @return Returns TRUE if ref tag check is ok.
+ */
+static uint32_t
+ocs_scsi_dif_check_ref_tag(ocs_t *ocs, ocs_hw_dif_info_t *dif_info, uint32_t exp_ref_tag, ocs_dif_t *dif)
+{
+ if ((dif == NULL) || !dif_info->check_ref_tag) {
+ return TRUE;
+ }
+
+ if (exp_ref_tag != ocs_be32toh(dif->ref_tag)) {
+ ocs_log_debug(ocs, "expected ref tag 0x%x, actual 0x%x\n",
+ exp_ref_tag, ocs_be32toh(dif->ref_tag));
+ return FALSE;
+ } else {
+ return TRUE;
+ }
+}
+
+/**
+ * @brief Return count of SGE's required for request
+ *
+ * @par Description
+ * An accurate count of SGEs is computed and returned.
+ *
+ * @param hw_dif Pointer to HW dif information.
+ * @param sgl Pointer to SGL from back end.
+ * @param sgl_count Count of SGEs in SGL.
+ *
+ * @return Count of SGEs.
+ */
+static uint32_t
+ocs_scsi_count_sgls(ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count)
+{
+ uint32_t count = 0;
+ uint32_t i;
+
+ /* Convert DIF Information */
+ if (hw_dif->dif_oper != OCS_HW_DIF_OPER_DISABLED) {
+
+ /* If we're not DIF separate, then emit a seed SGE */
+ if (!hw_dif->dif_separate) {
+ count++;
+ }
+
+ for (i = 0; i < sgl_count; i++) {
+ /* If DIF is enabled, and DIF is separate, then append a SEED then DIF SGE */
+ if (hw_dif->dif_separate) {
+ count += 2;
+ }
+
+ count++;
+ }
+ } else {
+ count = sgl_count;
+ }
+ return count;
+}
+
+static int32_t
+ocs_scsi_build_sgls(ocs_hw_t *hw, ocs_hw_io_t *hio, ocs_hw_dif_info_t *hw_dif, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, ocs_hw_io_type_e type)
+{
+ int32_t rc;
+ uint32_t i;
+ ocs_t *ocs = hw->os;
+ uint32_t blocksize = 0;
+ uint32_t blockcount;
+
+ ocs_assert(hio, -1);
+
+ /* Initialize HW SGL */
+ rc = ocs_hw_io_init_sges(hw, hio, type);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_io_init_sges failed: %d\n", rc);
+ return -1;
+ }
+
+ /* Convert DIF Information */
+ if (hw_dif->dif_oper != OCS_HW_DIF_OPER_DISABLED) {
+
+ /* If we're not DIF separate, then emit a seed SGE */
+ if (!hw_dif->dif_separate) {
+ rc = ocs_hw_io_add_seed_sge(hw, hio, hw_dif);
+ if (rc) {
+ return rc;
+ }
+ }
+
+ /* if we are doing DIF separate, then figure out the block size so that we
+ * can update the ref tag in the DIF seed SGE. Also verify that the
+ * the sgl lengths are all multiples of the blocksize
+ */
+ if (hw_dif->dif_separate) {
+ switch(hw_dif->blk_size) {
+ case OCS_HW_DIF_BK_SIZE_512: blocksize = 512; break;
+ case OCS_HW_DIF_BK_SIZE_1024: blocksize = 1024; break;
+ case OCS_HW_DIF_BK_SIZE_2048: blocksize = 2048; break;
+ case OCS_HW_DIF_BK_SIZE_4096: blocksize = 4096; break;
+ case OCS_HW_DIF_BK_SIZE_520: blocksize = 520; break;
+ case OCS_HW_DIF_BK_SIZE_4104: blocksize = 4104; break;
+ default:
+ ocs_log_test(hw->os, "Inavlid hw_dif blocksize %d\n", hw_dif->blk_size);
+ return -1;
+ }
+ for (i = 0; i < sgl_count; i++) {
+ if ((sgl[i].len % blocksize) != 0) {
+ ocs_log_test(hw->os, "sgl[%d] len of %ld is not multiple of blocksize\n",
+ i, sgl[i].len);
+ return -1;
+ }
+ }
+ }
+
+ for (i = 0; i < sgl_count; i++) {
+ ocs_assert(sgl[i].addr, -1);
+ ocs_assert(sgl[i].len, -1);
+
+ /* If DIF is enabled, and DIF is separate, then append a SEED then DIF SGE */
+ if (hw_dif->dif_separate) {
+ rc = ocs_hw_io_add_seed_sge(hw, hio, hw_dif);
+ if (rc) {
+ return rc;
+ }
+ rc = ocs_hw_io_add_dif_sge(hw, hio, sgl[i].dif_addr);
+ if (rc) {
+ return rc;
+ }
+ /* Update the ref_tag for the next DIF seed SGE */
+ blockcount = sgl[i].len / blocksize;
+ if (hw_dif->dif_oper == OCS_HW_DIF_OPER_INSERT) {
+ hw_dif->ref_tag_repl += blockcount;
+ } else {
+ hw_dif->ref_tag_cmp += blockcount;
+ }
+ }
+
+ /* Add data SGE */
+ rc = ocs_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_io_add_sge failed: count=%d rc=%d\n",
+ sgl_count, rc);
+ return rc;
+ }
+ }
+ } else {
+ for (i = 0; i < sgl_count; i++) {
+ ocs_assert(sgl[i].addr, -1);
+ ocs_assert(sgl[i].len, -1);
+
+ /* Add data SGE */
+ rc = ocs_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_io_add_sge failed: count=%d rc=%d\n",
+ sgl_count, rc);
+ return rc;
+ }
+
+ }
+ }
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Convert SCSI API T10 DIF information into the FC HW format.
+ *
+ * @param ocs Pointer to the ocs structure for logging.
+ * @param scsi_dif_info Pointer to the SCSI API T10 DIF fields.
+ * @param hw_dif_info Pointer to the FC HW API T10 DIF fields.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static int32_t
+ocs_scsi_convert_dif_info(ocs_t *ocs, ocs_scsi_dif_info_t *scsi_dif_info, ocs_hw_dif_info_t *hw_dif_info)
+{
+ uint32_t dif_seed;
+ ocs_memset(hw_dif_info, 0, sizeof(ocs_hw_dif_info_t));
+
+ if (scsi_dif_info == NULL) {
+ hw_dif_info->dif_oper = OCS_HW_DIF_OPER_DISABLED;
+ hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_NA;
+ return 0;
+ }
+
+ /* Convert the DIF operation */
+ switch(scsi_dif_info->dif_oper) {
+ case OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC;
+ hw_dif_info->dif = SLI4_DIF_INSERT;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF;
+ hw_dif_info->dif = SLI4_DIF_STRIP;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
+ hw_dif_info->dif = SLI4_DIF_INSERT;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
+ hw_dif_info->dif = SLI4_DIF_STRIP;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC;
+ hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
+ hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
+ hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
+ hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
+ break;
+ case OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW:
+ hw_dif_info->dif_oper = OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW;
+ hw_dif_info->dif = SLI4_DIF_PASS_THROUGH;
+ break;
+ default:
+ ocs_log_test(ocs, "unhandled SCSI DIF operation %d\n",
+ scsi_dif_info->dif_oper);
+ return -1;
+ }
+
+ switch(scsi_dif_info->blk_size) {
+ case OCS_SCSI_DIF_BK_SIZE_512:
+ hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_512;
+ break;
+ case OCS_SCSI_DIF_BK_SIZE_1024:
+ hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_1024;
+ break;
+ case OCS_SCSI_DIF_BK_SIZE_2048:
+ hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_2048;
+ break;
+ case OCS_SCSI_DIF_BK_SIZE_4096:
+ hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_4096;
+ break;
+ case OCS_SCSI_DIF_BK_SIZE_520:
+ hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_520;
+ break;
+ case OCS_SCSI_DIF_BK_SIZE_4104:
+ hw_dif_info->blk_size = OCS_HW_DIF_BK_SIZE_4104;
+ break;
+ default:
+ ocs_log_test(ocs, "unhandled SCSI DIF block size %d\n",
+ scsi_dif_info->blk_size);
+ return -1;
+ }
+
+ /* If the operation is an INSERT the tags provided are the ones that should be
+ * inserted, otherwise they're the ones to be checked against. */
+ if (hw_dif_info->dif == SLI4_DIF_INSERT ) {
+ hw_dif_info->ref_tag_repl = scsi_dif_info->ref_tag;
+ hw_dif_info->app_tag_repl = scsi_dif_info->app_tag;
+ } else {
+ hw_dif_info->ref_tag_cmp = scsi_dif_info->ref_tag;
+ hw_dif_info->app_tag_cmp = scsi_dif_info->app_tag;
+ }
+
+ hw_dif_info->check_ref_tag = scsi_dif_info->check_ref_tag;
+ hw_dif_info->check_app_tag = scsi_dif_info->check_app_tag;
+ hw_dif_info->check_guard = scsi_dif_info->check_guard;
+ hw_dif_info->auto_incr_ref_tag = 1;
+ hw_dif_info->dif_separate = scsi_dif_info->dif_separate;
+ hw_dif_info->disable_app_ffff = scsi_dif_info->disable_app_ffff;
+ hw_dif_info->disable_app_ref_ffff = scsi_dif_info->disable_app_ref_ffff;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_DIF_SEED, &dif_seed);
+ hw_dif_info->dif_seed = dif_seed;
+
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief This function logs the SGLs for an IO.
+ *
+ * @param io Pointer to the IO context.
+ */
+static void ocs_log_sgl(ocs_io_t *io)
+{
+ ocs_hw_io_t *hio = io->hio;
+ sli4_sge_t *data = NULL;
+ uint32_t *dword = NULL;
+ uint32_t i;
+ uint32_t n_sge;
+
+ scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
+ ocs_addr32_hi(hio->def_sgl.phys),
+ ocs_addr32_lo(hio->def_sgl.phys));
+ n_sge = (hio->sgl == &hio->def_sgl ? hio->n_sge : hio->def_sgl_count);
+ for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
+ dword = (uint32_t*)data;
+
+ scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, dword[0], dword[1], dword[2], dword[3]);
+
+ if (dword[2] & (1U << 31)) {
+ break;
+ }
+ }
+
+ if (hio->ovfl_sgl != NULL &&
+ hio->sgl == hio->ovfl_sgl) {
+ scsi_io_trace(io, "Overflow at 0x%x 0x%08x\n",
+ ocs_addr32_hi(hio->ovfl_sgl->phys),
+ ocs_addr32_lo(hio->ovfl_sgl->phys));
+ for (i = 0, data = hio->ovfl_sgl->virt; i < hio->n_sge; i++, data++) {
+ dword = (uint32_t*)data;
+
+ scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, dword[0], dword[1], dword[2], dword[3]);
+ if (dword[2] & (1U << 31)) {
+ break;
+ }
+ }
+ }
+
+}
+
+
+/**
+ * @brief Check pending error asynchronous callback function.
+ *
+ * @par Description
+ * Invoke the HW callback function for a given IO. This function is called
+ * from the NOP mailbox completion context.
+ *
+ * @param hw Pointer to HW object.
+ * @param status Completion status.
+ * @param mqe Mailbox completion queue entry.
+ * @param arg General purpose argument.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_scsi_check_pending_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_io_t *io = arg;
+
+ if (io != NULL) {
+ if (io->hw_cb != NULL) {
+ ocs_hw_done_t cb = io->hw_cb;
+
+ io->hw_cb = NULL;
+ cb(io->hio, NULL, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief Check for pending IOs to dispatch.
+ *
+ * @par Description
+ * If there are IOs on the pending list, and a HW IO is available, then
+ * dispatch the IOs.
+ *
+ * @param ocs Pointer to the OCS structure.
+ *
+ * @return None.
+ */
+
+void
+ocs_scsi_check_pending(ocs_t *ocs)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_io_t *io;
+ ocs_hw_io_t *hio;
+ int32_t status;
+ int count = 0;
+ int dispatch;
+
+ /* Guard against recursion */
+ if (ocs_atomic_add_return(&xport->io_pending_recursing, 1)) {
+ /* This function is already running. Decrement and return. */
+ ocs_atomic_sub_return(&xport->io_pending_recursing, 1);
+ return;
+ }
+
+ do {
+ ocs_lock(&xport->io_pending_lock);
+ status = 0;
+ hio = NULL;
+ io = ocs_list_remove_head(&xport->io_pending_list);
+ if (io != NULL) {
+ if (io->io_type == OCS_IO_TYPE_ABORT) {
+ hio = NULL;
+ } else {
+ hio = ocs_hw_io_alloc(&ocs->hw);
+ if (hio == NULL) {
+ /*
+ * No HW IO available.
+ * Put IO back on the front of pending list
+ */
+ ocs_list_add_head(&xport->io_pending_list, io);
+ io = NULL;
+ } else {
+ hio->eq = io->hw_priv;
+ }
+ }
+ }
+ /* Must drop the lock before dispatching the IO */
+ ocs_unlock(&xport->io_pending_lock);
+
+ if (io != NULL) {
+ count++;
+
+ /*
+ * We pulled an IO off the pending list,
+ * and either got an HW IO or don't need one
+ */
+ ocs_atomic_sub_return(&xport->io_pending_count, 1);
+ if (hio == NULL) {
+ status = ocs_scsi_io_dispatch_no_hw_io(io);
+ } else {
+ status = ocs_scsi_io_dispatch_hw_io(io, hio);
+ }
+ if (status) {
+ /*
+ * Invoke the HW callback, but do so in the separate execution context,
+ * provided by the NOP mailbox completion processing context by using
+ * ocs_hw_async_call()
+ */
+ if (ocs_hw_async_call(&ocs->hw, ocs_scsi_check_pending_async_cb, io)) {
+ ocs_log_test(ocs, "call to ocs_hw_async_call() failed\n");
+ }
+ }
+ }
+ } while (io != NULL);
+
+
+ /*
+ * If nothing was removed from the list,
+ * we might be in a case where we need to abort an
+ * active IO and the abort is on the pending list.
+ * Look for an abort we can dispatch.
+ */
+ if (count == 0 ) {
+ dispatch = 0;
+
+ ocs_lock(&xport->io_pending_lock);
+ ocs_list_foreach(&xport->io_pending_list, io) {
+ if (io->io_type == OCS_IO_TYPE_ABORT) {
+ if (io->io_to_abort->hio != NULL) {
+ /* This IO has a HW IO, so it is active. Dispatch the abort. */
+ dispatch = 1;
+ } else {
+ /* Leave this abort on the pending list and keep looking */
+ dispatch = 0;
+ }
+ }
+ if (dispatch) {
+ ocs_list_remove(&xport->io_pending_list, io);
+ ocs_atomic_sub_return(&xport->io_pending_count, 1);
+ break;
+ }
+ }
+ ocs_unlock(&xport->io_pending_lock);
+
+ if (dispatch) {
+ status = ocs_scsi_io_dispatch_no_hw_io(io);
+ if (status) {
+ if (ocs_hw_async_call(&ocs->hw, ocs_scsi_check_pending_async_cb, io)) {
+ ocs_log_test(ocs, "call to ocs_hw_async_call() failed\n");
+ }
+ }
+ }
+ }
+
+ ocs_atomic_sub_return(&xport->io_pending_recursing, 1);
+ return;
+}
+
+/**
+ * @brief Attempt to dispatch a non-abort IO
+ *
+ * @par Description
+ * An IO is dispatched:
+ * - if the pending list is not empty, add IO to pending list
+ * and call a function to process the pending list.
+ * - if pending list is empty, try to allocate a HW IO. If none
+ * is available, place this IO at the tail of the pending IO
+ * list.
+ * - if HW IO is available, attach this IO to the HW IO and
+ * submit it.
+ *
+ * @param io Pointer to IO structure.
+ * @param cb Callback function.
+ *
+ * @return Returns 0 on success, a negative error code value on failure.
+ */
+
+int32_t
+ocs_scsi_io_dispatch(ocs_io_t *io, void *cb)
+{
+ ocs_hw_io_t *hio;
+ ocs_t *ocs = io->ocs;
+ ocs_xport_t *xport = ocs->xport;
+
+ ocs_assert(io->cmd_tgt || io->cmd_ini, -1);
+ ocs_assert((io->io_type != OCS_IO_TYPE_ABORT), -1);
+ io->hw_cb = cb;
+
+ /*
+ * if this IO already has a HW IO, then this is either not the first phase of
+ * the IO. Send it to the HW.
+ */
+ if (io->hio != NULL) {
+ return ocs_scsi_io_dispatch_hw_io(io, io->hio);
+ }
+
+ /*
+ * We don't already have a HW IO associated with the IO. First check
+ * the pending list. If not empty, add IO to the tail and process the
+ * pending list.
+ */
+ ocs_lock(&xport->io_pending_lock);
+ if (!ocs_list_empty(&xport->io_pending_list)) {
+ /*
+ * If this is a low latency request, the put at the front of the IO pending
+ * queue, otherwise put it at the end of the queue.
+ */
+ if (io->low_latency) {
+ ocs_list_add_head(&xport->io_pending_list, io);
+ } else {
+ ocs_list_add_tail(&xport->io_pending_list, io);
+ }
+ ocs_unlock(&xport->io_pending_lock);
+ ocs_atomic_add_return(&xport->io_pending_count, 1);
+ ocs_atomic_add_return(&xport->io_total_pending, 1);
+
+ /* process pending list */
+ ocs_scsi_check_pending(ocs);
+ return 0;
+ }
+ ocs_unlock(&xport->io_pending_lock);
+
+ /*
+ * We don't have a HW IO associated with the IO and there's nothing
+ * on the pending list. Attempt to allocate a HW IO and dispatch it.
+ */
+ hio = ocs_hw_io_alloc(&io->ocs->hw);
+ if (hio == NULL) {
+
+ /* Couldn't get a HW IO. Save this IO on the pending list */
+ ocs_lock(&xport->io_pending_lock);
+ ocs_list_add_tail(&xport->io_pending_list, io);
+ ocs_unlock(&xport->io_pending_lock);
+
+ ocs_atomic_add_return(&xport->io_total_pending, 1);
+ ocs_atomic_add_return(&xport->io_pending_count, 1);
+ return 0;
+ }
+
+ /* We successfully allocated a HW IO; dispatch to HW */
+ return ocs_scsi_io_dispatch_hw_io(io, hio);
+}
+
+/**
+ * @brief Attempt to dispatch an Abort IO.
+ *
+ * @par Description
+ * An Abort IO is dispatched:
+ * - if the pending list is not empty, add IO to pending list
+ * and call a function to process the pending list.
+ * - if pending list is empty, send abort to the HW.
+ *
+ * @param io Pointer to IO structure.
+ * @param cb Callback function.
+ *
+ * @return Returns 0 on success, a negative error code value on failure.
+ */
+
+int32_t
+ocs_scsi_io_dispatch_abort(ocs_io_t *io, void *cb)
+{
+ ocs_t *ocs = io->ocs;
+ ocs_xport_t *xport = ocs->xport;
+
+ ocs_assert((io->io_type == OCS_IO_TYPE_ABORT), -1);
+ io->hw_cb = cb;
+
+ /*
+ * For aborts, we don't need a HW IO, but we still want to pass through
+ * the pending list to preserve ordering. Thus, if the pending list is
+ * not empty, add this abort to the pending list and process the pending list.
+ */
+ ocs_lock(&xport->io_pending_lock);
+ if (!ocs_list_empty(&xport->io_pending_list)) {
+ ocs_list_add_tail(&xport->io_pending_list, io);
+ ocs_unlock(&xport->io_pending_lock);
+ ocs_atomic_add_return(&xport->io_pending_count, 1);
+ ocs_atomic_add_return(&xport->io_total_pending, 1);
+
+ /* process pending list */
+ ocs_scsi_check_pending(ocs);
+ return 0;
+ }
+ ocs_unlock(&xport->io_pending_lock);
+
+ /* nothing on pending list, dispatch abort */
+ return ocs_scsi_io_dispatch_no_hw_io(io);
+
+}
+
+/**
+ * @brief Dispatch IO
+ *
+ * @par Description
+ * An IO and its associated HW IO is dispatched to the HW.
+ *
+ * @param io Pointer to IO structure.
+ * @param hio Pointer to HW IO structure from which IO will be
+ * dispatched.
+ *
+ * @return Returns 0 on success, a negative error code value on failure.
+ */
+
+static int32_t
+ocs_scsi_io_dispatch_hw_io(ocs_io_t *io, ocs_hw_io_t *hio)
+{
+ int32_t rc;
+ ocs_t *ocs = io->ocs;
+
+ /* Got a HW IO; update ini/tgt_task_tag with HW IO info and dispatch */
+ io->hio = hio;
+ if (io->cmd_tgt) {
+ io->tgt_task_tag = hio->indicator;
+ } else if (io->cmd_ini) {
+ io->init_task_tag = hio->indicator;
+ }
+ io->hw_tag = hio->reqtag;
+
+ hio->eq = io->hw_priv;
+
+ /* Copy WQ steering */
+ switch(io->wq_steering) {
+ case OCS_SCSI_WQ_STEERING_CLASS >> OCS_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = OCS_HW_WQ_STEERING_CLASS;
+ break;
+ case OCS_SCSI_WQ_STEERING_REQUEST >> OCS_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = OCS_HW_WQ_STEERING_REQUEST;
+ break;
+ case OCS_SCSI_WQ_STEERING_CPU >> OCS_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = OCS_HW_WQ_STEERING_CPU;
+ break;
+ }
+
+
+ switch (io->io_type) {
+ case OCS_IO_TYPE_IO: {
+ uint32_t max_sgl;
+ uint32_t total_count;
+ uint32_t host_allocated;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl);
+ ocs_hw_get(&ocs->hw, OCS_HW_SGL_CHAINING_HOST_ALLOCATED, &host_allocated);
+
+ /*
+ * If the requested SGL is larger than the default size, then we can allocate
+ * an overflow SGL.
+ */
+ total_count = ocs_scsi_count_sgls(&io->hw_dif, io->sgl, io->sgl_count);
+
+ /*
+ * Lancer requires us to allocate the chained memory area, but
+ * Skyhawk must use the SGL list associated with another XRI.
+ */
+ if (host_allocated && total_count > max_sgl) {
+ /* Compute count needed, the number extra plus 1 for the link sge */
+ uint32_t count = total_count - max_sgl + 1;
+ rc = ocs_dma_alloc(ocs, &io->ovfl_sgl, count*sizeof(sli4_sge_t), 64);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_dma_alloc overflow sgl failed\n");
+ break;
+ }
+ rc = ocs_hw_io_register_sgl(&ocs->hw, io->hio, &io->ovfl_sgl, count);
+ if (rc) {
+ ocs_scsi_io_free_ovfl(io);
+ ocs_log_err(ocs, "ocs_hw_io_register_sgl() failed\n");
+ break;
+ }
+ /* EVT: update chained_io_count */
+ io->node->chained_io_count++;
+ }
+
+ rc = ocs_scsi_build_sgls(&ocs->hw, io->hio, &io->hw_dif, io->sgl, io->sgl_count, io->hio_type);
+ if (rc) {
+ ocs_scsi_io_free_ovfl(io);
+ break;
+ }
+
+ if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) {
+ ocs_log_sgl(io);
+ }
+
+ if (io->app_id) {
+ io->iparam.fcp_tgt.app_id = io->app_id;
+ }
+
+ rc = ocs_hw_io_send(&io->ocs->hw, io->hio_type, io->hio, io->wire_len, &io->iparam, &io->node->rnode,
+ io->hw_cb, io);
+ break;
+ }
+ case OCS_IO_TYPE_ELS:
+ case OCS_IO_TYPE_CT: {
+ rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio,
+ &io->els_req, io->wire_len,
+ &io->els_rsp, &io->node->rnode, &io->iparam,
+ io->hw_cb, io);
+ break;
+ }
+ case OCS_IO_TYPE_CT_RESP: {
+ rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio,
+ &io->els_rsp, io->wire_len,
+ NULL, &io->node->rnode, &io->iparam,
+ io->hw_cb, io);
+ break;
+ }
+ case OCS_IO_TYPE_BLS_RESP: {
+ /* no need to update tgt_task_tag for BLS response since the RX_ID
+ * will be specified by the payload, not the XRI */
+ rc = ocs_hw_srrs_send(&ocs->hw, io->hio_type, io->hio,
+ NULL, 0, NULL, &io->node->rnode, &io->iparam, io->hw_cb, io);
+ break;
+ }
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -1;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * @brief Dispatch IO
+ *
+ * @par Description
+ * An IO that does require a HW IO is dispatched to the HW.
+ *
+ * @param io Pointer to IO structure.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static int32_t
+ocs_scsi_io_dispatch_no_hw_io(ocs_io_t *io)
+{
+ int32_t rc;
+
+ switch (io->io_type) {
+ case OCS_IO_TYPE_ABORT: {
+ ocs_hw_io_t *hio_to_abort = NULL;
+ ocs_assert(io->io_to_abort, -1);
+ hio_to_abort = io->io_to_abort->hio;
+
+ if (hio_to_abort == NULL) {
+ /*
+ * If "IO to abort" does not have an associated HW IO, immediately
+ * make callback with success. The command must have been sent to
+ * the backend, but the data phase has not yet started, so we don't
+ * have a HW IO.
+ *
+ * Note: since the backend shims should be taking a reference
+ * on io_to_abort, it should not be possible to have been completed
+ * and freed by the backend before the abort got here.
+ */
+ scsi_io_printf(io, "IO: " SCSI_IOFMT " not active\n",
+ SCSI_IOFMT_ARGS(io->io_to_abort));
+ ((ocs_hw_done_t)io->hw_cb)(io->hio, NULL, 0, SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
+ rc = 0;
+ } else {
+ /* HW IO is valid, abort it */
+ scsi_io_printf(io, "aborting " SCSI_IOFMT "\n", SCSI_IOFMT_ARGS(io->io_to_abort));
+ rc = ocs_hw_io_abort(&io->ocs->hw, hio_to_abort, io->send_abts,
+ io->hw_cb, io);
+ if (rc) {
+ int status = SLI4_FC_WCQE_STATUS_SUCCESS;
+ if ((rc != OCS_HW_RTN_IO_NOT_ACTIVE) &&
+ (rc != OCS_HW_RTN_IO_ABORT_IN_PROGRESS)) {
+ status = -1;
+ scsi_io_printf(io, "Failed to abort IO: " SCSI_IOFMT " status=%d\n",
+ SCSI_IOFMT_ARGS(io->io_to_abort), rc);
+ }
+ ((ocs_hw_done_t)io->hw_cb)(io->hio, NULL, 0, status, 0, io);
+ rc = 0;
+ }
+ }
+
+ break;
+ }
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -1;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Send read/write data.
+ *
+ * @par Description
+ * This call is made by a target-server to initiate a SCSI read or write data phase, transferring
+ * data between the target to the remote initiator. The payload is specified by the
+ * scatter-gather list @c sgl of length @c sgl_count. The @c wire_len argument
+ * specifies the payload length (independent of the scatter-gather list cumulative length).
+ * @n @n
+ * The @c flags argument has one bit, OCS_SCSI_LAST_DATAPHASE, which is a hint to the base
+ * driver that it may use auto SCSI response features if the hardware supports it.
+ * @n @n
+ * Upon completion, the callback function @b cb is called with flags indicating that the
+ * IO has completed (OCS_SCSI_IO_COMPL) and another data phase or response may be sent;
+ * that the IO has completed and no response needs to be sent (OCS_SCSI_IO_COMPL_NO_RSP);
+ * or that the IO was aborted (OCS_SCSI_IO_ABORTED).
+ *
+ * @param io Pointer to the IO context.
+ * @param flags Flags controlling the sending of data.
+ * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
+ * @param sgl Pointer to the payload scatter-gather list.
+ * @param sgl_count Count of the scatter-gather list elements.
+ * @param xwire_len Length of the payload on wire, in bytes.
+ * @param type HW IO type.
+ * @param enable_ar Enable auto-response if true.
+ * @param cb Completion callback.
+ * @param arg Application-supplied callback data.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static inline int32_t
+ocs_scsi_xfer_data(ocs_io_t *io, uint32_t flags,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t xwire_len,
+ ocs_hw_io_type_e type, int enable_ar,
+ ocs_scsi_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+ ocs_t *ocs;
+ uint32_t disable_ar_tgt_dif = FALSE;
+ size_t residual = 0;
+
+ if ((dif_info != NULL) && (dif_info->dif_oper == OCS_SCSI_DIF_OPER_DISABLED)) {
+ dif_info = NULL;
+ }
+
+ ocs_assert(io, -1);
+
+ if (dif_info != NULL) {
+ ocs_hw_get(&io->ocs->hw, OCS_HW_DISABLE_AR_TGT_DIF, &disable_ar_tgt_dif);
+ if (disable_ar_tgt_dif) {
+ enable_ar = FALSE;
+ }
+ }
+
+ io->sgl_count = sgl_count;
+
+ /* If needed, copy SGL */
+ if (sgl && (sgl != io->sgl)) {
+ ocs_assert(sgl_count <= io->sgl_allocated, -1);
+ ocs_memcpy(io->sgl, sgl, sgl_count*sizeof(*io->sgl));
+ }
+
+ ocs = io->ocs;
+ ocs_assert(ocs, -1);
+ ocs_assert(io->node, -1);
+
+ scsi_io_trace(io, "%s wire_len %d\n", (type == OCS_HW_IO_TARGET_READ) ? "send" : "recv", xwire_len);
+
+ ocs_assert(sgl, -1);
+ ocs_assert(sgl_count > 0, -1);
+ ocs_assert(io->exp_xfer_len > io->transferred, -1);
+
+ io->hio_type = type;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ rc = ocs_scsi_convert_dif_info(ocs, dif_info, &io->hw_dif);
+ if (rc) {
+ return rc;
+ }
+
+ /* If DIF is used, then save lba for error recovery */
+ if (dif_info) {
+ io->scsi_dif_info = *dif_info;
+ }
+
+ io->wire_len = MIN(xwire_len, io->exp_xfer_len - io->transferred);
+ residual = (xwire_len - io->wire_len);
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = io->transferred;
+ io->iparam.fcp_tgt.dif_oper = io->hw_dif.dif;
+ io->iparam.fcp_tgt.blk_size = io->hw_dif.blk_size;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* if this is the last data phase and there is no residual, enable
+ * auto-good-response
+ */
+ if (enable_ar && (flags & OCS_SCSI_LAST_DATAPHASE) &&
+ (residual == 0) && ((io->transferred + io->wire_len) == io->exp_xfer_len) && (!(flags & OCS_SCSI_NO_AUTO_RESPONSE))) {
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+ io->auto_resp = TRUE;
+ } else {
+ io->auto_resp = FALSE;
+ }
+
+ /* save this transfer length */
+ io->xfer_req = io->wire_len;
+
+ /* Adjust the transferred count to account for overrun
+ * when the residual is calculated in ocs_scsi_send_resp
+ */
+ io->transferred += residual;
+
+ /* Adjust the SGL size if there is overrun */
+
+ if (residual) {
+ ocs_scsi_sgl_t *sgl_ptr = &io->sgl[sgl_count-1];
+
+ while (residual) {
+ size_t len = sgl_ptr->len;
+ if ( len > residual) {
+ sgl_ptr->len = len - residual;
+ residual = 0;
+ } else {
+ sgl_ptr->len = 0;
+ residual -= len;
+ io->sgl_count--;
+ }
+ sgl_ptr--;
+ }
+ }
+
+ /* Set latency and WQ steering */
+ io->low_latency = (flags & OCS_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & OCS_SCSI_WQ_STEERING_MASK) >> OCS_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & OCS_SCSI_WQ_CLASS_MASK) >> OCS_SCSI_WQ_CLASS_SHIFT;
+
+ return ocs_scsi_io_dispatch(io, ocs_target_io_cb);
+}
+
+
+int32_t
+ocs_scsi_send_rd_data(ocs_io_t *io, uint32_t flags,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len,
+ ocs_scsi_io_cb_t cb, void *arg)
+{
+ return ocs_scsi_xfer_data(io, flags, dif_info, sgl, sgl_count, len, OCS_HW_IO_TARGET_READ,
+ enable_tsend_auto_resp(io->ocs), cb, arg);
+}
+
+int32_t
+ocs_scsi_recv_wr_data(ocs_io_t *io, uint32_t flags,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len,
+ ocs_scsi_io_cb_t cb, void *arg)
+{
+ return ocs_scsi_xfer_data(io, flags, dif_info, sgl, sgl_count, len, OCS_HW_IO_TARGET_WRITE,
+ enable_treceive_auto_resp(io->ocs), cb, arg);
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Free overflow SGL.
+ *
+ * @par Description
+ * Free the overflow SGL if it is present.
+ *
+ * @param io Pointer to IO object.
+ *
+ * @return None.
+ */
+static void
+ocs_scsi_io_free_ovfl(ocs_io_t *io) {
+ if (io->ovfl_sgl.size) {
+ ocs_dma_free(io->ocs, &io->ovfl_sgl);
+ }
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Send response data.
+ *
+ * @par Description
+ * This function is used by a target-server to send the SCSI response data to a remote
+ * initiator node. The target-server populates the @c ocs_scsi_cmd_resp_t
+ * argument with scsi status, status qualifier, sense data, and response data, as
+ * needed.
+ * @n @n
+ * Upon completion, the callback function @c cb is invoked. The target-server will generally
+ * clean up its IO context resources and call ocs_scsi_io_complete().
+ *
+ * @param io Pointer to the IO context.
+ * @param flags Flags to control sending of the SCSI response.
+ * @param rsp Pointer to the response data populated by the caller.
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_scsi_send_resp(ocs_io_t *io, uint32_t flags, ocs_scsi_cmd_resp_t *rsp, ocs_scsi_io_cb_t cb, void *arg)
+{
+ ocs_t *ocs;
+ int32_t residual;
+ int auto_resp = TRUE; /* Always try auto resp */
+ uint8_t scsi_status = 0;
+ uint16_t scsi_status_qualifier = 0;
+ uint8_t *sense_data = NULL;
+ uint32_t sense_data_length = 0;
+
+ ocs_assert(io, -1);
+
+ ocs = io->ocs;
+ ocs_assert(ocs, -1);
+
+ ocs_assert(io->node, -1);
+
+ ocs_scsi_convert_dif_info(ocs, NULL, &io->hw_dif);
+
+ if (rsp) {
+ scsi_status = rsp->scsi_status;
+ scsi_status_qualifier = rsp->scsi_status_qualifier;
+ sense_data = rsp->sense_data;
+ sense_data_length = rsp->sense_data_length;
+ residual = rsp->residual;
+ } else {
+ residual = io->exp_xfer_len - io->transferred;
+ }
+
+ io->wire_len = 0;
+ io->hio_type = OCS_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* Set low latency queueing request */
+ io->low_latency = (flags & OCS_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & OCS_SCSI_WQ_STEERING_MASK) >> OCS_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & OCS_SCSI_WQ_CLASS_MASK) >> OCS_SCSI_WQ_CLASS_SHIFT;
+
+ if ((scsi_status != 0) || residual || sense_data_length) {
+ fcp_rsp_iu_t *fcprsp = io->rspbuf.virt;
+
+ if (!fcprsp) {
+ ocs_log_err(ocs, "NULL response buffer\n");
+ return -1;
+ }
+
+ auto_resp = FALSE;
+
+ ocs_memset(fcprsp, 0, sizeof(*fcprsp));
+
+ io->wire_len += (sizeof(*fcprsp) - sizeof(fcprsp->data));
+
+ fcprsp->scsi_status = scsi_status;
+ *((uint16_t*)fcprsp->status_qualifier) = ocs_htobe16(scsi_status_qualifier);
+
+ /* set residual status if necessary */
+ if (residual != 0) {
+ /* FCP: if data transferred is less than the amount expected, then this is an
+ * underflow. If data transferred would have been greater than the amount expected
+ * then this is an overflow
+ */
+ if (residual > 0) {
+ fcprsp->flags |= FCP_RESID_UNDER;
+ *((uint32_t *)fcprsp->fcp_resid) = ocs_htobe32(residual);
+ } else {
+ fcprsp->flags |= FCP_RESID_OVER;
+ *((uint32_t *)fcprsp->fcp_resid) = ocs_htobe32(-residual);
+ }
+ }
+
+ if (sense_data && sense_data_length) {
+ ocs_assert(sense_data_length <= sizeof(fcprsp->data), -1);
+ fcprsp->flags |= FCP_SNS_LEN_VALID;
+ ocs_memcpy(fcprsp->data, sense_data, sense_data_length);
+ *((uint32_t*)fcprsp->fcp_sns_len) = ocs_htobe32(sense_data_length);
+ io->wire_len += sense_data_length;
+ }
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+ }
+
+ if (auto_resp) {
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+ }
+
+ return ocs_scsi_io_dispatch(io, ocs_target_io_cb);
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Send TMF response data.
+ *
+ * @par Description
+ * This function is used by a target-server to send SCSI TMF response data to a remote
+ * initiator node.
+ * Upon completion, the callback function @c cb is invoked. The target-server will generally
+ * clean up its IO context resources and call ocs_scsi_io_complete().
+ *
+ * @param io Pointer to the IO context.
+ * @param rspcode TMF response code.
+ * @param addl_rsp_info Additional TMF response information (may be NULL for zero data).
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_scsi_send_tmf_resp(ocs_io_t *io, ocs_scsi_tmf_resp_e rspcode, uint8_t addl_rsp_info[3],
+ ocs_scsi_io_cb_t cb, void *arg)
+{
+ int32_t rc = -1;
+ ocs_t *ocs = NULL;
+ fcp_rsp_iu_t *fcprsp = NULL;
+ fcp_rsp_info_t *rspinfo = NULL;
+ uint8_t fcp_rspcode;
+
+ ocs_assert(io, -1);
+ ocs_assert(io->ocs, -1);
+ ocs_assert(io->node, -1);
+
+ ocs = io->ocs;
+
+ io->wire_len = 0;
+ ocs_scsi_convert_dif_info(ocs, NULL, &io->hw_dif);
+
+ switch(rspcode) {
+ case OCS_SCSI_TMF_FUNCTION_COMPLETE:
+ fcp_rspcode = FCP_TMF_COMPLETE;
+ break;
+ case OCS_SCSI_TMF_FUNCTION_SUCCEEDED:
+ case OCS_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
+ fcp_rspcode = FCP_TMF_SUCCEEDED;
+ break;
+ case OCS_SCSI_TMF_FUNCTION_REJECTED:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ case OCS_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
+ fcp_rspcode = FCP_TMF_INCORRECT_LUN;
+ break;
+ case OCS_SCSI_TMF_SERVICE_DELIVERY:
+ fcp_rspcode = FCP_TMF_FAILED;
+ break;
+ default:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ }
+
+ io->hio_type = OCS_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ if (io->tmf_cmd == OCS_SCSI_TMF_ABORT_TASK) {
+ rc = ocs_target_send_bls_resp(io, cb, arg);
+ return rc;
+ }
+
+ /* populate the FCP TMF response */
+ fcprsp = io->rspbuf.virt;
+ ocs_memset(fcprsp, 0, sizeof(*fcprsp));
+
+ fcprsp->flags |= FCP_RSP_LEN_VALID;
+
+ rspinfo = (fcp_rsp_info_t*) fcprsp->data;
+ if (addl_rsp_info != NULL) {
+ ocs_memcpy(rspinfo->addl_rsp_info, addl_rsp_info, sizeof(rspinfo->addl_rsp_info));
+ }
+ rspinfo->rsp_code = fcp_rspcode;
+
+ io->wire_len = sizeof(*fcprsp) - sizeof(fcprsp->data) + sizeof(*rspinfo);
+
+ *((uint32_t*)fcprsp->fcp_rsp_len) = ocs_htobe32(sizeof(*rspinfo));
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ rc = ocs_scsi_io_dispatch(io, ocs_target_io_cb);
+
+ return rc;
+}
+
+
+/**
+ * @brief Process target abort callback.
+ *
+ * @par Description
+ * Accepts HW abort requests.
+ *
+ * @param hio HW IO context.
+ * @param rnode Remote node.
+ * @param length Length of response data.
+ * @param status Completion status.
+ * @param ext_status Extended completion status.
+ * @param app Application-specified callback data.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static int32_t
+ocs_target_abort_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
+{
+ ocs_io_t *io = app;
+ ocs_t *ocs;
+ ocs_scsi_io_status_e scsi_status;
+
+ ocs_assert(io, -1);
+ ocs_assert(io->ocs, -1);
+
+ ocs = io->ocs;
+
+ if (io->abort_cb) {
+ ocs_scsi_io_cb_t abort_cb = io->abort_cb;
+ void *abort_cb_arg = io->abort_cb_arg;
+
+ io->abort_cb = NULL;
+ io->abort_cb_arg = NULL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = OCS_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_status = OCS_SCSI_STATUS_NO_IO;
+ break;
+ case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
+ scsi_status = OCS_SCSI_STATUS_ABORT_IN_PROGRESS;
+ break;
+ default:
+ /* TODO: we have seen 0x15 (abort in progress) */
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ scsi_status = OCS_SCSI_STATUS_CHECK_RESPONSE;
+ break;
+ default:
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ break;
+ }
+ /* invoke callback */
+ abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
+ }
+
+ ocs_assert(io != io->io_to_abort, -1);
+
+ /* done with IO to abort */
+ ocs_ref_put(&io->io_to_abort->ref); /* ocs_ref_get(): ocs_scsi_tgt_abort_io() */
+
+ ocs_io_free(ocs, io);
+
+ ocs_scsi_check_pending(ocs);
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Abort a target IO.
+ *
+ * @par Description
+ * This routine is called from a SCSI target-server. It initiates an abort of a
+ * previously-issued target data phase or response request.
+ *
+ * @param io IO context.
+ * @param cb SCSI target server callback.
+ * @param arg SCSI target server supplied callback argument.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+ocs_scsi_tgt_abort_io(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg)
+{
+ ocs_t *ocs;
+ ocs_xport_t *xport;
+ int32_t rc;
+
+ ocs_io_t *abort_io = NULL;
+ ocs_assert(io, -1);
+ ocs_assert(io->node, -1);
+ ocs_assert(io->ocs, -1);
+
+ ocs = io->ocs;
+ xport = ocs->xport;
+
+ /* take a reference on IO being aborted */
+ if ((ocs_ref_get_unless_zero(&io->ref) == 0)) {
+ /* command no longer active */
+ scsi_io_printf(io, "command no longer active\n");
+ return -1;
+ }
+
+ /*
+ * allocate a new IO to send the abort request. Use ocs_io_alloc() directly, as
+ * we need an IO object that will not fail allocation due to allocations being
+ * disabled (in ocs_scsi_io_alloc())
+ */
+ abort_io = ocs_io_alloc(ocs);
+ if (abort_io == NULL) {
+ ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
+ ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
+ return -1;
+ }
+
+ /* Save the target server callback and argument */
+ ocs_assert(abort_io->hio == NULL, -1);
+
+ /* set generic fields */
+ abort_io->cmd_tgt = TRUE;
+ abort_io->node = io->node;
+
+ /* set type and abort-specific fields */
+ abort_io->io_type = OCS_IO_TYPE_ABORT;
+ abort_io->display_name = "tgt_abort";
+ abort_io->io_to_abort = io;
+ abort_io->send_abts = FALSE;
+ abort_io->abort_cb = cb;
+ abort_io->abort_cb_arg = arg;
+
+ /* now dispatch IO */
+ rc = ocs_scsi_io_dispatch_abort(abort_io, ocs_target_abort_cb);
+ if (rc) {
+ ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
+ }
+ return rc;
+}
+
+/**
+ * @brief Process target BLS response callback.
+ *
+ * @par Description
+ * Accepts HW abort requests.
+ *
+ * @param hio HW IO context.
+ * @param rnode Remote node.
+ * @param length Length of response data.
+ * @param status Completion status.
+ * @param ext_status Extended completion status.
+ * @param app Application-specified callback data.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static int32_t
+ocs_target_bls_resp_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length, int32_t status, uint32_t ext_status, void *app)
+{
+ ocs_io_t *io = app;
+ ocs_t *ocs;
+ ocs_scsi_io_status_e bls_status;
+
+ ocs_assert(io, -1);
+ ocs_assert(io->ocs, -1);
+
+ ocs = io->ocs;
+
+ /* BLS isn't really a "SCSI" concept, but use SCSI status */
+ if (status) {
+ io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
+ bls_status = OCS_SCSI_STATUS_ERROR;
+ } else {
+ bls_status = OCS_SCSI_STATUS_GOOD;
+ }
+
+ if (io->bls_cb) {
+ ocs_scsi_io_cb_t bls_cb = io->bls_cb;
+ void *bls_cb_arg = io->bls_cb_arg;
+
+ io->bls_cb = NULL;
+ io->bls_cb_arg = NULL;
+
+ /* invoke callback */
+ bls_cb(io, bls_status, 0, bls_cb_arg);
+ }
+
+ ocs_scsi_check_pending(ocs);
+ return 0;
+}
+
+/**
+ * @brief Complete abort request.
+ *
+ * @par Description
+ * An abort request is completed by posting a BA_ACC for the IO that requested the abort.
+ *
+ * @param io Pointer to the IO context.
+ * @param cb Callback function to invoke upon completion.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static int32_t
+ocs_target_send_bls_resp(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+ fc_ba_acc_payload_t *acc;
+
+ ocs_assert(io, -1);
+
+ /* fill out IO structure with everything needed to send BA_ACC */
+ ocs_memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.bls.ox_id = io->init_task_tag;
+ io->iparam.bls.rx_id = io->abort_rx_id;
+
+ acc = (void *)io->iparam.bls.payload;
+
+ ocs_memset(io->iparam.bls.payload, 0, sizeof(io->iparam.bls.payload));
+ acc->ox_id = io->iparam.bls.ox_id;
+ acc->rx_id = io->iparam.bls.rx_id;
+ acc->high_seq_cnt = UINT16_MAX;
+
+ /* generic io fields have already been populated */
+
+ /* set type and BLS-specific fields */
+ io->io_type = OCS_IO_TYPE_BLS_RESP;
+ io->display_name = "bls_rsp";
+ io->hio_type = OCS_HW_BLS_ACC;
+ io->bls_cb = cb;
+ io->bls_cb_arg = arg;
+
+ /* dispatch IO */
+ rc = ocs_scsi_io_dispatch(io, ocs_target_bls_resp_cb);
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Notify the base driver that the IO is complete.
+ *
+ * @par Description
+ * This function is called by a target-server to notify the base driver that an IO
+ * has completed, allowing for the base driver to free resources.
+ * @n
+ * @n @b Note: This function is not called by initiator-clients.
+ *
+ * @param io Pointer to IO context.
+ *
+ * @return None.
+ */
+void
+ocs_scsi_io_complete(ocs_io_t *io)
+{
+ ocs_assert(io);
+
+ if (!ocs_io_busy(io)) {
+ ocs_log_test(io->ocs, "Got completion for non-busy io with tag 0x%x\n", io->tag);
+ return;
+ }
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ ocs_assert(ocs_ref_read_count(&io->ref) > 0);
+ ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_scsi_io_alloc() */
+}
+
+
+/**
+ * @brief Handle initiator IO completion.
+ *
+ * @par Description
+ * This callback is made upon completion of an initiator operation (initiator read/write command).
+ *
+ * @param hio HW IO context.
+ * @param rnode Remote node.
+ * @param length Length of completion data.
+ * @param status Completion status.
+ * @param ext_status Extended completion status.
+ * @param app Application-specified callback data.
+ *
+ * @return None.
+ */
+
+static void
+ocs_initiator_io_cb(ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t length,
+ int32_t status, uint32_t ext_status, void *app)
+{
+ ocs_io_t *io = app;
+ ocs_t *ocs;
+ ocs_scsi_io_status_e scsi_status;
+
+ ocs_assert(io);
+ ocs_assert(io->scsi_ini_cb);
+
+ scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
+
+ ocs = io->ocs;
+ ocs_assert(ocs);
+
+ ocs_scsi_io_free_ovfl(io);
+
+ /* Call target server completion */
+ if (io->scsi_ini_cb) {
+ fcp_rsp_iu_t *fcprsp = io->rspbuf.virt;
+ ocs_scsi_cmd_resp_t rsp;
+ ocs_scsi_rsp_io_cb_t cb = io->scsi_ini_cb;
+ uint32_t flags = 0;
+ uint8_t *pd = fcprsp->data;
+
+ /* Clear the callback before invoking the callback */
+ io->scsi_ini_cb = NULL;
+
+ ocs_memset(&rsp, 0, sizeof(rsp));
+
+ /* Unless status is FCP_RSP_FAILURE, fcprsp is not filled in */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = OCS_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ scsi_status = OCS_SCSI_STATUS_CHECK_RESPONSE;
+ rsp.scsi_status = fcprsp->scsi_status;
+ rsp.scsi_status_qualifier = ocs_be16toh(*((uint16_t*)fcprsp->status_qualifier));
+
+ if (fcprsp->flags & FCP_RSP_LEN_VALID) {
+ rsp.response_data = pd;
+ rsp.response_data_length = ocs_fc_getbe32(fcprsp->fcp_rsp_len);
+ pd += rsp.response_data_length;
+ }
+ if (fcprsp->flags & FCP_SNS_LEN_VALID) {
+ uint32_t sns_len = ocs_fc_getbe32(fcprsp->fcp_sns_len);
+ rsp.sense_data = pd;
+ rsp.sense_data_length = sns_len;
+ pd += sns_len;
+ }
+ /* Set residual */
+ if (fcprsp->flags & FCP_RESID_OVER) {
+ rsp.residual = -ocs_fc_getbe32(fcprsp->fcp_resid);
+ rsp.response_wire_length = length;
+ } else if (fcprsp->flags & FCP_RESID_UNDER) {
+ rsp.residual = ocs_fc_getbe32(fcprsp->fcp_resid);
+ rsp.response_wire_length = length;
+ }
+
+ /*
+ * Note: The FCP_RSP_FAILURE can be returned for initiator IOs when the total data
+ * placed does not match the requested length even if the status is good. If
+ * the status is all zeroes, then we have to assume that a frame(s) were
+ * dropped and change the status to LOCAL_REJECT/OUT_OF_ORDER_DATA
+ */
+ if (length != io->wire_len) {
+ uint32_t rsp_len = ext_status;
+ uint8_t *rsp_bytes = io->rspbuf.virt;
+ uint32_t i;
+ uint8_t all_zeroes = (rsp_len > 0);
+ /* Check if the rsp is zero */
+ for (i = 0; i < rsp_len; i++) {
+ if (rsp_bytes[i] != 0) {
+ all_zeroes = FALSE;
+ break;
+ }
+ }
+ if (all_zeroes) {
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ ocs_log_test(io->ocs, "[%s]" SCSI_IOFMT "local reject=0x%02x\n",
+ io->node->display_name, SCSI_IOFMT_ARGS(io),
+ SLI4_FC_LOCAL_REJECT_OUT_OF_ORDER_DATA);
+ }
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ if (ext_status == SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT) {
+ scsi_status = OCS_SCSI_STATUS_COMMAND_TIMEOUT;
+ } else {
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ if (ext_status & 0x01) {
+ scsi_status = OCS_SCSI_STATUS_DIF_GUARD_ERROR;
+ } else if (ext_status & 0x02) {
+ scsi_status = OCS_SCSI_STATUS_DIF_APP_TAG_ERROR;
+ } else if (ext_status & 0x04) {
+ scsi_status = OCS_SCSI_STATUS_DIF_REF_TAG_ERROR;
+ } else {
+ scsi_status = OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR;
+ }
+ break;
+ default:
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ break;
+ }
+
+ cb(io, scsi_status, &rsp, flags, io->scsi_ini_cb_arg);
+
+ }
+ ocs_scsi_check_pending(ocs);
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Initiate initiator read IO.
+ *
+ * @par Description
+ * This call is made by an initiator-client to send a SCSI read command. The payload
+ * for the command is given by a scatter-gather list @c sgl for @c sgl_count
+ * entries.
+ * @n @n
+ * Upon completion, the callback @b cb is invoked and passed request status.
+ * If the command completed successfully, the callback is given SCSI response data.
+ *
+ * @param node Pointer to the node.
+ * @param io Pointer to the IO context.
+ * @param lun LUN value.
+ * @param cdb Pointer to the CDB.
+ * @param cdb_len Length of the CDB.
+ * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
+ * @param sgl Pointer to the scatter-gather list.
+ * @param sgl_count Count of the scatter-gather list elements.
+ * @param wire_len Length of the payload.
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_scsi_send_rd_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len,
+ ocs_scsi_rsp_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+
+ rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_READ, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count,
+ wire_len, 0, cb, arg);
+
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Initiate initiator write IO.
+ *
+ * @par Description
+ * This call is made by an initiator-client to send a SCSI write command. The payload
+ * for the command is given by a scatter-gather list @c sgl for @c sgl_count
+ * entries.
+ * @n @n
+ * Upon completion, the callback @c cb is invoked and passed request status. If the command
+ * completed successfully, the callback is given SCSI response data.
+ *
+ * @param node Pointer to the node.
+ * @param io Pointer to IO context.
+ * @param lun LUN value.
+ * @param cdb Pointer to the CDB.
+ * @param cdb_len Length of the CDB.
+ * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
+ * @param sgl Pointer to the scatter-gather list.
+ * @param sgl_count Count of the scatter-gather list elements.
+ * @param wire_len Length of the payload.
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t ocs_scsi_send_wr_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len,
+ ocs_scsi_rsp_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+
+ rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_WRITE, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count,
+ wire_len, 0, cb, arg);
+
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Initiate initiator write IO.
+ *
+ * @par Description
+ * This call is made by an initiator-client to send a SCSI write command. The payload
+ * for the command is given by a scatter-gather list @c sgl for @c sgl_count
+ * entries.
+ * @n @n
+ * Upon completion, the callback @c cb is invoked and passed request status. If the command
+ * completed successfully, the callback is given SCSI response data.
+ *
+ * @param node Pointer to the node.
+ * @param io Pointer to IO context.
+ * @param lun LUN value.
+ * @param cdb Pointer to the CDB.
+ * @param cdb_len Length of the CDB.
+ * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
+ * @param sgl Pointer to the scatter-gather list.
+ * @param sgl_count Count of the scatter-gather list elements.
+ * @param wire_len Length of the payload.
+ * @param first_burst Number of first burst bytes to send.
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_scsi_send_wr_io_first_burst(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst,
+ ocs_scsi_rsp_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+
+ rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_WRITE, node, io, lun, 0, cdb, cdb_len, dif_info, sgl, sgl_count,
+ wire_len, 0, cb, arg);
+
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Initiate initiator SCSI command with no data.
+ *
+ * @par Description
+ * This call is made by an initiator-client to send a SCSI command with no data.
+ * @n @n
+ * Upon completion, the callback @c cb is invoked and passed request status. If the command
+ * completed successfully, the callback is given SCSI response data.
+ *
+ * @param node Pointer to the node.
+ * @param io Pointer to the IO context.
+ * @param lun LUN value.
+ * @param cdb Pointer to the CDB.
+ * @param cdb_len Length of the CDB.
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t ocs_scsi_send_nodata_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
+ ocs_scsi_rsp_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+
+ rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_NODATA, node, io, lun, 0, cdb, cdb_len, NULL, NULL, 0, 0, 0, cb, arg);
+
+ return rc;
+}
+/**
+ * @ingroup scsi_api_base
+ * @brief Initiate initiator task management operation.
+ *
+ * @par Description
+ * This command is used to send a SCSI task management function command. If the command
+ * requires it (QUERY_TASK_SET for example), a payload may be associated with the command.
+ * If no payload is required, then @c sgl_count may be zero and @c sgl is ignored.
+ * @n @n
+ * Upon completion @c cb is invoked with status and SCSI response data.
+ *
+ * @param node Pointer to the node.
+ * @param io Pointer to the IO context.
+ * @param io_to_abort Pointer to the IO context to abort in the
+ * case of OCS_SCSI_TMF_ABORT_TASK. Note: this can point to the
+ * same the same ocs_io_t as @c io, provided that @c io does not
+ * have any outstanding work requests.
+ * @param lun LUN value.
+ * @param tmf Task management command.
+ * @param sgl Pointer to the scatter-gather list.
+ * @param sgl_count Count of the scatter-gather list elements.
+ * @param len Length of the payload.
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_scsi_send_tmf(ocs_node_t *node, ocs_io_t *io, ocs_io_t *io_to_abort, uint64_t lun, ocs_scsi_tmf_cmd_e tmf,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len, ocs_scsi_rsp_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+ ocs_assert(io, -1);
+
+ if (tmf == OCS_SCSI_TMF_ABORT_TASK) {
+ ocs_assert(io_to_abort, -1);
+
+ /* take a reference on IO being aborted */
+ if ((ocs_ref_get_unless_zero(&io_to_abort->ref) == 0)) {
+ /* command no longer active */
+ scsi_io_printf(io, "command no longer active\n");
+ return -1;
+ }
+ /* generic io fields have already been populated */
+
+ /* abort-specific fields */
+ io->io_type = OCS_IO_TYPE_ABORT;
+ io->display_name = "abort_task";
+ io->io_to_abort = io_to_abort;
+ io->send_abts = TRUE;
+ io->scsi_ini_cb = cb;
+ io->scsi_ini_cb_arg = arg;
+
+ /* now dispatch IO */
+ rc = ocs_scsi_io_dispatch_abort(io, ocs_scsi_abort_io_cb);
+ if (rc) {
+ scsi_io_printf(io, "Failed to dispatch abort\n");
+ ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
+ }
+ } else {
+ io->display_name = "tmf";
+ rc = ocs_scsi_send_io(OCS_HW_IO_INITIATOR_READ, node, io, lun, tmf, NULL, 0, NULL,
+ sgl, sgl_count, len, 0, cb, arg);
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Send an FCP IO.
+ *
+ * @par Description
+ * An FCP read/write IO command, with optional task management flags, is sent to @c node.
+ *
+ * @param type HW IO type to send.
+ * @param node Pointer to the node destination of the IO.
+ * @param io Pointer to the IO context.
+ * @param lun LUN value.
+ * @param tmf Task management command.
+ * @param cdb Pointer to the SCSI CDB.
+ * @param cdb_len Length of the CDB, in bytes.
+ * @param dif_info Pointer to the T10 DIF fields, or NULL if no DIF.
+ * @param sgl Pointer to the scatter-gather list.
+ * @param sgl_count Number of SGL entries in SGL.
+ * @param wire_len Payload length, in bytes, of data on wire.
+ * @param first_burst Number of first burst bytes to send.
+ * @param cb Completion callback.
+ * @param arg Application-specified completion callback argument.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+/* tc: could elminiate LUN, as it's part of the IO structure */
+
+static int32_t ocs_scsi_send_io(ocs_hw_io_type_e type, ocs_node_t *node, ocs_io_t *io, uint64_t lun,
+ ocs_scsi_tmf_cmd_e tmf, uint8_t *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst,
+ ocs_scsi_rsp_io_cb_t cb, void *arg)
+{
+ int32_t rc;
+ ocs_t *ocs;
+ fcp_cmnd_iu_t *cmnd;
+ uint32_t cmnd_bytes = 0;
+ uint32_t *fcp_dl;
+ uint8_t tmf_flags = 0;
+
+ ocs_assert(io->node, -1);
+ ocs_assert(io->node == node, -1);
+ ocs_assert(io, -1);
+ ocs = io->ocs;
+ ocs_assert(cb, -1);
+
+ io->sgl_count = sgl_count;
+
+ /* Copy SGL if needed */
+ if (sgl != io->sgl) {
+ ocs_assert(sgl_count <= io->sgl_allocated, -1);
+ ocs_memcpy(io->sgl, sgl, sizeof(*io->sgl) * sgl_count);
+ }
+
+ /* save initiator and target task tags for debugging */
+ io->tgt_task_tag = 0xffff;
+
+ io->wire_len = wire_len;
+ io->hio_type = type;
+
+ if (OCS_LOG_ENABLE_SCSI_TRACE(ocs)) {
+ char buf[80];
+ ocs_textbuf_t txtbuf;
+ uint32_t i;
+
+ ocs_textbuf_init(ocs, &txtbuf, buf, sizeof(buf));
+
+ ocs_textbuf_printf(&txtbuf, "cdb%d: ", cdb_len);
+ for (i = 0; i < cdb_len; i ++) {
+ ocs_textbuf_printf(&txtbuf, "%02X%s", cdb[i], (i == (cdb_len-1)) ? "" : " ");
+ }
+ scsi_io_printf(io, "%s len %d, %s\n", (io->hio_type == OCS_HW_IO_INITIATOR_READ) ? "read" :
+ (io->hio_type == OCS_HW_IO_INITIATOR_WRITE) ? "write" : "", io->wire_len,
+ ocs_textbuf_get_buffer(&txtbuf));
+ }
+
+
+ ocs_assert(io->cmdbuf.virt, -1);
+
+ cmnd = io->cmdbuf.virt;
+
+ ocs_assert(sizeof(*cmnd) <= io->cmdbuf.size, -1);
+
+ ocs_memset(cmnd, 0, sizeof(*cmnd));
+
+ /* Default FCP_CMND IU doesn't include additional CDB bytes but does include FCP_DL */
+ cmnd_bytes = sizeof(fcp_cmnd_iu_t) - sizeof(cmnd->fcp_cdb_and_dl) + sizeof(uint32_t);
+
+ fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl));
+
+ if (cdb) {
+ if (cdb_len <= 16) {
+ ocs_memcpy(cmnd->fcp_cdb, cdb, cdb_len);
+ } else {
+ uint32_t addl_cdb_bytes;
+
+ ocs_memcpy(cmnd->fcp_cdb, cdb, 16);
+ addl_cdb_bytes = cdb_len - 16;
+ ocs_memcpy(cmnd->fcp_cdb_and_dl, &(cdb[16]), addl_cdb_bytes);
+ /* additional_fcp_cdb_length is in words, not bytes */
+ cmnd->additional_fcp_cdb_length = (addl_cdb_bytes + 3) / 4;
+ fcp_dl += cmnd->additional_fcp_cdb_length;
+
+ /* Round up additional CDB bytes */
+ cmnd_bytes += (addl_cdb_bytes + 3) & ~0x3;
+ }
+ }
+
+ be64enc(cmnd->fcp_lun, CAM_EXTLUN_BYTE_SWIZZLE(lun));
+
+ if (node->fcp2device) {
+ if(ocs_get_crn(node, &cmnd->command_reference_number,
+ lun)) {
+ return -1;
+ }
+ }
+
+ switch (tmf) {
+ case OCS_SCSI_TMF_QUERY_TASK_SET:
+ tmf_flags = FCP_QUERY_TASK_SET;
+ break;
+ case OCS_SCSI_TMF_ABORT_TASK_SET:
+ tmf_flags = FCP_ABORT_TASK_SET;
+ break;
+ case OCS_SCSI_TMF_CLEAR_TASK_SET:
+ tmf_flags = FCP_CLEAR_TASK_SET;
+ break;
+ case OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
+ tmf_flags = FCP_QUERY_ASYNCHRONOUS_EVENT;
+ break;
+ case OCS_SCSI_TMF_LOGICAL_UNIT_RESET:
+ tmf_flags = FCP_LOGICAL_UNIT_RESET;
+ break;
+ case OCS_SCSI_TMF_CLEAR_ACA:
+ tmf_flags = FCP_CLEAR_ACA;
+ break;
+ case OCS_SCSI_TMF_TARGET_RESET:
+ tmf_flags = FCP_TARGET_RESET;
+ break;
+ default:
+ tmf_flags = 0;
+ }
+ cmnd->task_management_flags = tmf_flags;
+
+ *fcp_dl = ocs_htobe32(io->wire_len);
+
+ switch (io->hio_type) {
+ case OCS_HW_IO_INITIATOR_READ:
+ cmnd->rddata = 1;
+ break;
+ case OCS_HW_IO_INITIATOR_WRITE:
+ cmnd->wrdata = 1;
+ break;
+ case OCS_HW_IO_INITIATOR_NODATA:
+ /* sets neither */
+ break;
+ default:
+ ocs_log_test(ocs, "bad IO type %d\n", io->hio_type);
+ return -1;
+ }
+
+ rc = ocs_scsi_convert_dif_info(ocs, dif_info, &io->hw_dif);
+ if (rc) {
+ return rc;
+ }
+
+ io->scsi_ini_cb = cb;
+ io->scsi_ini_cb_arg = arg;
+
+ /* set command and response buffers in the iparam */
+ io->iparam.fcp_ini.cmnd = &io->cmdbuf;
+ io->iparam.fcp_ini.cmnd_size = cmnd_bytes;
+ io->iparam.fcp_ini.rsp = &io->rspbuf;
+ io->iparam.fcp_ini.flags = 0;
+ io->iparam.fcp_ini.dif_oper = io->hw_dif.dif;
+ io->iparam.fcp_ini.blk_size = io->hw_dif.blk_size;
+ io->iparam.fcp_ini.timeout = io->timeout;
+ io->iparam.fcp_ini.first_burst = first_burst;
+
+ return ocs_scsi_io_dispatch(io, ocs_initiator_io_cb);
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Callback for an aborted IO.
+ *
+ * @par Description
+ * Callback function invoked upon completion of an IO abort request.
+ *
+ * @param hio HW IO context.
+ * @param rnode Remote node.
+ * @param len Response length.
+ * @param status Completion status.
+ * @param ext_status Extended completion status.
+ * @param arg Application-specific callback, usually IO context.
+
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static int32_t
+ocs_scsi_abort_io_cb(struct ocs_hw_io_s *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status,
+ uint32_t ext_status, void *arg)
+{
+ ocs_io_t *io = arg;
+ ocs_t *ocs;
+ ocs_scsi_io_status_e scsi_status = OCS_SCSI_STATUS_GOOD;
+
+ ocs_assert(io, -1);
+ ocs_assert(ocs_io_busy(io), -1);
+ ocs_assert(io->ocs, -1);
+ ocs_assert(io->io_to_abort, -1);
+ ocs = io->ocs;
+
+ ocs_log_debug(ocs, "status %d ext %d\n", status, ext_status);
+
+ /* done with IO to abort */
+ ocs_ref_put(&io->io_to_abort->ref); /* ocs_ref_get(): ocs_scsi_send_tmf() */
+
+ ocs_scsi_io_free_ovfl(io);
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = OCS_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ if (ext_status == SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED) {
+ scsi_status = OCS_SCSI_STATUS_ABORTED;
+ } else if (ext_status == SLI4_FC_LOCAL_REJECT_NO_XRI) {
+ scsi_status = OCS_SCSI_STATUS_NO_IO;
+ } else if (ext_status == SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS) {
+ scsi_status = OCS_SCSI_STATUS_ABORT_IN_PROGRESS;
+ } else {
+ ocs_log_test(ocs, "Unhandled local reject 0x%x/0x%x\n", status, ext_status);
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ }
+ break;
+ default:
+ scsi_status = OCS_SCSI_STATUS_ERROR;
+ break;
+ }
+
+ if (io->scsi_ini_cb) {
+ (*io->scsi_ini_cb)(io, scsi_status, NULL, 0, io->scsi_ini_cb_arg);
+ } else {
+ ocs_scsi_io_free(io);
+ }
+
+ ocs_scsi_check_pending(ocs);
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Return SCSI API integer valued property.
+ *
+ * @par Description
+ * This function is called by a target-server or initiator-client to
+ * retrieve an integer valued property.
+ *
+ * @param ocs Pointer to the ocs.
+ * @param prop Property value to return.
+ *
+ * @return Returns a value, or 0 if invalid property was requested.
+ */
+uint32_t
+ocs_scsi_get_property(ocs_t *ocs, ocs_scsi_property_e prop)
+{
+ ocs_xport_t *xport = ocs->xport;
+ uint32_t val;
+
+ switch (prop) {
+ case OCS_SCSI_MAX_SGE:
+ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &val)) {
+ return val;
+ }
+ break;
+ case OCS_SCSI_MAX_SGL:
+ if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) {
+ /*
+ * If chain SGL test-mode is enabled, the number of HW SGEs
+ * has been limited; report back original max.
+ */
+ return (OCS_FC_MAX_SGL);
+ }
+ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &val)) {
+ return val;
+ }
+ break;
+ case OCS_SCSI_MAX_IOS:
+ return ocs_io_pool_allocated(xport->io_pool);
+ case OCS_SCSI_DIF_CAPABLE:
+ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &val)) {
+ return val;
+ }
+ break;
+ case OCS_SCSI_MAX_FIRST_BURST:
+ return 0;
+ case OCS_SCSI_DIF_MULTI_SEPARATE:
+ if (ocs_hw_get(&ocs->hw, OCS_HW_DIF_MULTI_SEPARATE, &val) == 0) {
+ return val;
+ }
+ break;
+ case OCS_SCSI_ENABLE_TASK_SET_FULL:
+ /* Return FALSE if we are send frame capable */
+ if (ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &val) == 0) {
+ return ! val;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ocs_log_debug(ocs, "invalid property request %d\n", prop);
+ return 0;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Return a property pointer.
+ *
+ * @par Description
+ * This function is called by a target-server or initiator-client to
+ * retrieve a pointer to the requested property.
+ *
+ * @param ocs Pointer to the ocs.
+ * @param prop Property value to return.
+ *
+ * @return Returns pointer to the requested property, or NULL otherwise.
+ */
+void *ocs_scsi_get_property_ptr(ocs_t *ocs, ocs_scsi_property_e prop)
+{
+ void *rc = NULL;
+
+ switch (prop) {
+ case OCS_SCSI_WWNN:
+ rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE);
+ break;
+ case OCS_SCSI_WWPN:
+ rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT);
+ break;
+ case OCS_SCSI_PORTNUM:
+ rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_PORTNUM);
+ break;
+ case OCS_SCSI_BIOS_VERSION_STRING:
+ rc = ocs_hw_get_ptr(&ocs->hw, OCS_HW_BIOS_VERSION_STRING);
+ break;
+#if defined(OCS_ENABLE_VPD_SUPPORT)
+ case OCS_SCSI_SERIALNUMBER:
+ {
+ uint8_t *pvpd;
+ uint32_t vpd_len;
+
+ if (ocs_hw_get(&ocs->hw, OCS_HW_VPD_LEN, &vpd_len)) {
+ ocs_log_test(ocs, "Can't get VPD length\n");
+ rc = "\012sn-unknown";
+ break;
+ }
+
+ pvpd = ocs_hw_get_ptr(&ocs->hw, OCS_HW_VPD);
+ if (pvpd) {
+ rc = ocs_find_vpd(pvpd, vpd_len, "SN");
+ }
+
+ if (rc == NULL ||
+ ocs_strlen(rc) == 0) {
+ /* Note: VPD is missing, using wwnn for serial number */
+ scsi_log(ocs, "Note: VPD is missing, using wwnn for serial number\n");
+ /* Use the last 32 bits of the WWN */
+ if ((ocs == NULL) || (ocs->domain == NULL) || (ocs->domain->sport == NULL)) {
+ rc = "\011(Unknown)";
+ } else {
+ rc = &ocs->domain->sport->wwnn_str[8];
+ }
+ }
+ break;
+ }
+ case OCS_SCSI_PARTNUMBER:
+ {
+ uint8_t *pvpd;
+ uint32_t vpd_len;
+
+ if (ocs_hw_get(&ocs->hw, OCS_HW_VPD_LEN, &vpd_len)) {
+ ocs_log_test(ocs, "Can't get VPD length\n");
+ rc = "\012pn-unknown";
+ break;
+ }
+ pvpd = ocs_hw_get_ptr(&ocs->hw, OCS_HW_VPD);
+ if (pvpd) {
+ rc = ocs_find_vpd(pvpd, vpd_len, "PN");
+ if (rc == NULL) {
+ rc = "\012pn-unknown";
+ }
+ } else {
+ rc = "\012pn-unknown";
+ }
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+
+ if (rc == NULL) {
+ ocs_log_debug(ocs, "invalid property request %d\n", prop);
+ }
+ return rc;
+}
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Notify that delete initiator is complete.
+ *
+ * @par Description
+ * Sent by the target-server to notify the base driver that the work started from
+ * ocs_scsi_del_initiator() is now complete and that it is safe for the node to
+ * release the rest of its resources.
+ *
+ * @param node Pointer to the node.
+ *
+ * @return None.
+ */
+void
+ocs_scsi_del_initiator_complete(ocs_node_t *node)
+{
+ /* Notify the node to resume */
+ ocs_node_post_event(node, OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
+}
+
+
+/**
+ * @ingroup scsi_api_base
+ * @brief Notify that delete target is complete.
+ *
+ * @par Description
+ * Sent by the initiator-client to notify the base driver that the work started from
+ * ocs_scsi_del_target() is now complete and that it is safe for the node to
+ * release the rest of its resources.
+ *
+ * @param node Pointer to the node.
+ *
+ * @return None.
+ */
+void
+ocs_scsi_del_target_complete(ocs_node_t *node)
+{
+ /* Notify the node to resume */
+ ocs_node_post_event(node, OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+}
+
+
+/**
+ * @brief Update transferred count
+ *
+ * @par Description
+ * Updates io->transferred, as required when using first burst, when the amount
+ * of first burst data processed differs from the amount of first burst
+ * data received.
+ *
+ * @param io Pointer to the io object.
+ * @param transferred Number of bytes transferred out of first burst buffers.
+ *
+ * @return None.
+ */
+void
+ocs_scsi_update_first_burst_transferred(ocs_io_t *io, uint32_t transferred)
+{
+ io->transferred = transferred;
+}
+
+/**
+ * @brief Register bounce callback for multi-threading.
+ *
+ * @par Description
+ * Register the back end bounce function.
+ *
+ * @param ocs Pointer to device object.
+ * @param fctn Function pointer of bounce function.
+ *
+ * @return None.
+ */
+void
+ocs_scsi_register_bounce(ocs_t *ocs, void(*fctn)(void(*fctn)(void *arg), void *arg, uint32_t s_id, uint32_t d_id,
+ uint32_t ox_id))
+{
+ ocs_hw_rtn_e rc;
+
+ rc = ocs_hw_callback(&ocs->hw, OCS_HW_CB_BOUNCE, fctn, NULL);
+ if (rc) {
+ ocs_log_test(ocs, "ocs_hw_callback(OCS_HW_CB_BOUNCE) failed: %d\n", rc);
+ }
+}
diff --git a/sys/dev/ocs_fc/ocs_scsi.h b/sys/dev/ocs_fc/ocs_scsi.h
new file mode 100644
index 000000000000..dedd43efcbc2
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_scsi.h
@@ -0,0 +1,454 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS SCSI API declarations
+ *
+ */
+
+#if !defined(__OCS_SCSI_H__)
+#define __OCS_SCSI_H__
+
+#include "ocs_ddump.h"
+#include "ocs_mgmt.h"
+#include "ocs_utils.h"
+
+
+/* ocs_scsi_rcv_cmd() ocs_scsi_rcv_tmf() flags */
+#define OCS_SCSI_CMD_DIR_IN (1U << 0)
+#define OCS_SCSI_CMD_DIR_OUT (1U << 1)
+#define OCS_SCSI_CMD_SIMPLE (1U << 2)
+#define OCS_SCSI_CMD_HEAD_OF_QUEUE (1U << 3)
+#define OCS_SCSI_CMD_ORDERED (1U << 4)
+#define OCS_SCSI_CMD_UNTAGGED (1U << 5)
+#define OCS_SCSI_CMD_ACA (1U << 6)
+#define OCS_SCSI_FIRST_BURST_ERR (1U << 7)
+#define OCS_SCSI_FIRST_BURST_ABORTED (1U << 8)
+
+/* ocs_scsi_send_rd_data/recv_wr_data/send_resp flags */
+#define OCS_SCSI_LAST_DATAPHASE (1U << 0)
+#define OCS_SCSI_NO_AUTO_RESPONSE (1U << 1)
+#define OCS_SCSI_LOW_LATENCY (1U << 2)
+
+#define OCS_SCSI_WQ_STEERING_SHIFT (16)
+#define OCS_SCSI_WQ_STEERING_MASK (0xf << OCS_SCSI_WQ_STEERING_SHIFT)
+#define OCS_SCSI_WQ_STEERING_CLASS (0 << OCS_SCSI_WQ_STEERING_SHIFT)
+#define OCS_SCSI_WQ_STEERING_REQUEST (1 << OCS_SCSI_WQ_STEERING_SHIFT)
+#define OCS_SCSI_WQ_STEERING_CPU (2 << OCS_SCSI_WQ_STEERING_SHIFT)
+
+#define OCS_SCSI_WQ_CLASS_SHIFT (20)
+#define OCS_SCSI_WQ_CLASS_MASK (0xf << OCS_SCSI_WQ_CLASS_SHIFT)
+#define OCS_SCSI_WQ_CLASS(x) ((x & OCS_SCSI_WQ_CLASS_MASK) << OCS_SCSI_WQ_CLASS_SHIFT)
+
+#define OCS_SCSI_WQ_CLASS_LOW_LATENCY (1)
+
+/*!
+ * @defgroup scsi_api_base SCSI Base Target/Initiator
+ * @defgroup scsi_api_target SCSI Target
+ * @defgroup scsi_api_initiator SCSI Initiator
+ */
+
+/**
+ * @brief SCSI command response.
+ *
+ * This structure is used by target-servers to specify SCSI status and
+ * sense data. In this case all but the @b residual element are used. For
+ * initiator-clients, this structure is used by the SCSI API to convey the
+ * response data for issued commands, including the residual element.
+ */
+typedef struct {
+ uint8_t scsi_status; /**< SCSI status */
+ uint16_t scsi_status_qualifier; /**< SCSI status qualifier */
+ uint8_t *response_data; /**< pointer to response data buffer */
+ uint32_t response_data_length; /**< length of response data buffer (bytes) */
+ uint8_t *sense_data; /**< pointer to sense data buffer */
+ uint32_t sense_data_length; /**< length of sense data buffer (bytes) */
+ int32_t residual; /**< command residual (not used for target), positive value
+ * indicates an underflow, negative value indicates overflow
+ */
+ uint32_t response_wire_length; /**< Command response length received in wcqe */
+} ocs_scsi_cmd_resp_t;
+
+/* Status values returned by IO callbacks */
+typedef enum {
+ OCS_SCSI_STATUS_GOOD = 0,
+ OCS_SCSI_STATUS_ABORTED,
+ OCS_SCSI_STATUS_ERROR,
+ OCS_SCSI_STATUS_DIF_GUARD_ERROR,
+ OCS_SCSI_STATUS_DIF_REF_TAG_ERROR,
+ OCS_SCSI_STATUS_DIF_APP_TAG_ERROR,
+ OCS_SCSI_STATUS_DIF_UNKNOWN_ERROR,
+ OCS_SCSI_STATUS_PROTOCOL_CRC_ERROR,
+ OCS_SCSI_STATUS_NO_IO,
+ OCS_SCSI_STATUS_ABORT_IN_PROGRESS,
+ OCS_SCSI_STATUS_CHECK_RESPONSE,
+ OCS_SCSI_STATUS_COMMAND_TIMEOUT,
+ OCS_SCSI_STATUS_TIMEDOUT_AND_ABORTED,
+ OCS_SCSI_STATUS_SHUTDOWN,
+ OCS_SCSI_STATUS_NEXUS_LOST,
+
+} ocs_scsi_io_status_e;
+
+/* SCSI command status */
+#define SCSI_STATUS_GOOD 0x00
+#define SCSI_STATUS_CHECK_CONDITION 0x02
+#define SCSI_STATUS_CONDITION_MET 0x04
+#define SCSI_STATUS_BUSY 0x08
+#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
+#define SCSI_STATUS_TASK_SET_FULL 0x28
+#define SCSI_STATUS_ACA_ACTIVE 0x30
+#define SCSI_STATUS_TASK_ABORTED 0x40
+
+
+
+/* Callback used by send_rd_data(), recv_wr_data(), send_resp() */
+typedef int32_t (*ocs_scsi_io_cb_t)(ocs_io_t *io, ocs_scsi_io_status_e status, uint32_t flags,
+ void *arg);
+
+/* Callback used by send_rd_io(), send_wr_io() */
+typedef int32_t (*ocs_scsi_rsp_io_cb_t)(ocs_io_t *io, ocs_scsi_io_status_e status, ocs_scsi_cmd_resp_t *rsp,
+ uint32_t flags, void *arg);
+
+/* ocs_scsi_cb_t flags */
+#define OCS_SCSI_IO_CMPL (1U << 0) /* IO completed */
+#define OCS_SCSI_IO_CMPL_RSP_SENT (1U << 1) /* IO completed, response sent */
+#define OCS_SCSI_IO_ABORTED (1U << 2) /* IO was aborted */
+
+/* ocs_scsi_recv_tmf() request values */
+typedef enum {
+ OCS_SCSI_TMF_ABORT_TASK = 1,
+ OCS_SCSI_TMF_QUERY_TASK_SET,
+ OCS_SCSI_TMF_ABORT_TASK_SET,
+ OCS_SCSI_TMF_CLEAR_TASK_SET,
+ OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT,
+ OCS_SCSI_TMF_LOGICAL_UNIT_RESET,
+ OCS_SCSI_TMF_CLEAR_ACA,
+ OCS_SCSI_TMF_TARGET_RESET,
+} ocs_scsi_tmf_cmd_e;
+
+/* ocs_scsi_send_tmf_resp() response values */
+typedef enum {
+ OCS_SCSI_TMF_FUNCTION_COMPLETE = 1,
+ OCS_SCSI_TMF_FUNCTION_SUCCEEDED,
+ OCS_SCSI_TMF_FUNCTION_IO_NOT_FOUND,
+ OCS_SCSI_TMF_FUNCTION_REJECTED,
+ OCS_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER,
+ OCS_SCSI_TMF_SERVICE_DELIVERY,
+} ocs_scsi_tmf_resp_e;
+
+/**
+ * @brief property names for ocs_scsi_get_property() functions
+ */
+
+typedef enum {
+ OCS_SCSI_MAX_SGE,
+ OCS_SCSI_MAX_SGL,
+ OCS_SCSI_WWNN,
+ OCS_SCSI_WWPN,
+ OCS_SCSI_SERIALNUMBER,
+ OCS_SCSI_PARTNUMBER,
+ OCS_SCSI_PORTNUM,
+ OCS_SCSI_BIOS_VERSION_STRING,
+ OCS_SCSI_MAX_IOS,
+ OCS_SCSI_DIF_CAPABLE,
+ OCS_SCSI_DIF_MULTI_SEPARATE,
+ OCS_SCSI_MAX_FIRST_BURST,
+ OCS_SCSI_ENABLE_TASK_SET_FULL,
+} ocs_scsi_property_e;
+
+#define DIF_SIZE 8
+
+/**
+ * @brief T10 DIF operations
+ *
+ * WARNING: do not reorder or insert to this list without making appropriate changes in ocs_dif.c
+ */
+typedef enum {
+ OCS_SCSI_DIF_OPER_DISABLED,
+ OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC,
+ OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF,
+ OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM,
+ OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF,
+ OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC,
+ OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM,
+ OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM,
+ OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC,
+ OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW,
+} ocs_scsi_dif_oper_e;
+#define OCS_SCSI_DIF_OPER_PASS_THRU OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC
+#define OCS_SCSI_DIF_OPER_STRIP OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF
+#define OCS_SCSI_DIF_OPER_INSERT OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC
+
+/**
+ * @brief T10 DIF block sizes
+ */
+typedef enum {
+ OCS_SCSI_DIF_BK_SIZE_512,
+ OCS_SCSI_DIF_BK_SIZE_1024,
+ OCS_SCSI_DIF_BK_SIZE_2048,
+ OCS_SCSI_DIF_BK_SIZE_4096,
+ OCS_SCSI_DIF_BK_SIZE_520,
+ OCS_SCSI_DIF_BK_SIZE_4104
+} ocs_scsi_dif_blk_size_e;
+
+/**
+ * @brief generic scatter-gather list structure
+ */
+
+typedef struct ocs_scsi_sgl_s {
+ uintptr_t addr; /**< physical address */
+ uintptr_t dif_addr; /**< address of DIF segment, zero if DIF is interleaved */
+ size_t len; /**< length */
+} ocs_scsi_sgl_t;
+
+
+/**
+ * @brief T10 DIF information passed to the transport
+ */
+
+typedef struct ocs_scsi_dif_info_s {
+ ocs_scsi_dif_oper_e dif_oper;
+ ocs_scsi_dif_blk_size_e blk_size;
+ uint32_t ref_tag;
+ uint32_t app_tag:16,
+ check_ref_tag:1,
+ check_app_tag:1,
+ check_guard:1,
+ dif_separate:1,
+
+ /* If the APP TAG is 0xFFFF, disable checking the REF TAG and CRC fields */
+ disable_app_ffff:1,
+
+ /* if the APP TAG is 0xFFFF and REF TAG is 0xFFFF_FFFF, disable checking the received CRC field. */
+ disable_app_ref_ffff:1,
+ :10;
+ uint64_t lba;
+} ocs_scsi_dif_info_t;
+
+/* Return values for calls from base driver to target-server/initiator-client */
+#define OCS_SCSI_CALL_COMPLETE 0 /* All work is done */
+#define OCS_SCSI_CALL_ASYNC 1 /* Work will be completed asynchronously */
+
+/* Calls from target/initiator to base driver */
+
+typedef enum {
+ OCS_SCSI_IO_ROLE_ORIGINATOR,
+ OCS_SCSI_IO_ROLE_RESPONDER,
+} ocs_scsi_io_role_e;
+
+extern void ocs_scsi_io_alloc_enable(ocs_node_t *node);
+extern void ocs_scsi_io_alloc_disable(ocs_node_t *node);
+extern ocs_io_t *ocs_scsi_io_alloc(ocs_node_t *node, ocs_scsi_io_role_e role);
+extern void ocs_scsi_io_free(ocs_io_t *io);
+extern ocs_io_t *ocs_io_get_instance(ocs_t *ocs, uint32_t index);
+
+extern void ocs_scsi_register_bounce(ocs_t *ocs, void(*fctn)(void(*fctn)(void *arg), void *arg,
+ uint32_t s_id, uint32_t d_id, uint32_t ox_id));
+
+/* Calls from base driver to target-server */
+
+extern int32_t ocs_scsi_tgt_driver_init(void);
+extern int32_t ocs_scsi_tgt_driver_exit(void);
+extern int32_t ocs_scsi_tgt_io_init(ocs_io_t *io);
+extern int32_t ocs_scsi_tgt_io_exit(ocs_io_t *io);
+extern int32_t ocs_scsi_tgt_new_device(ocs_t *ocs);
+extern int32_t ocs_scsi_tgt_del_device(ocs_t *ocs);
+extern int32_t ocs_scsi_tgt_new_domain(ocs_domain_t *domain);
+extern void ocs_scsi_tgt_del_domain(ocs_domain_t *domain);
+extern int32_t ocs_scsi_tgt_new_sport(ocs_sport_t *sport);
+extern void ocs_scsi_tgt_del_sport(ocs_sport_t *sport);
+extern void ocs_scsi_sport_deleted(ocs_sport_t *sport);
+extern int32_t ocs_scsi_validate_initiator(ocs_node_t *node);
+extern int32_t ocs_scsi_new_initiator(ocs_node_t *node);
+typedef enum {
+ OCS_SCSI_INITIATOR_DELETED,
+ OCS_SCSI_INITIATOR_MISSING,
+} ocs_scsi_del_initiator_reason_e;
+extern int32_t ocs_scsi_del_initiator(ocs_node_t *node, ocs_scsi_del_initiator_reason_e reason);
+extern int32_t ocs_scsi_recv_cmd(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags);
+extern int32_t ocs_scsi_recv_cmd_first_burst(ocs_io_t *io, uint64_t lun, uint8_t *cdb, uint32_t cdb_len, uint32_t flags,
+ ocs_dma_t first_burst_buffers[], uint32_t first_burst_bytes);
+extern int32_t ocs_scsi_recv_tmf(ocs_io_t *tmfio, uint64_t lun, ocs_scsi_tmf_cmd_e cmd, ocs_io_t *abortio,
+ uint32_t flags);
+extern ocs_sport_t *ocs_sport_get_instance(ocs_domain_t *domain, uint32_t index);
+extern ocs_domain_t *ocs_domain_get_instance(ocs_t *ocs, uint32_t index);
+
+
+/* Calls from target-server to base driver */
+
+extern int32_t ocs_scsi_send_rd_data(ocs_io_t *io, uint32_t flags,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count,
+ uint32_t wire_len, ocs_scsi_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_recv_wr_data(ocs_io_t *io, uint32_t flags,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count,
+ uint32_t wire_len, ocs_scsi_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_send_resp(ocs_io_t *io, uint32_t flags, ocs_scsi_cmd_resp_t *rsp,
+ ocs_scsi_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_send_tmf_resp(ocs_io_t *io, ocs_scsi_tmf_resp_e rspcode, uint8_t addl_rsp_info[3],
+ ocs_scsi_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_tgt_abort_io(ocs_io_t *io, ocs_scsi_io_cb_t cb, void *arg);
+extern void ocs_scsi_io_complete(ocs_io_t *io);
+extern uint32_t ocs_scsi_get_property(ocs_t *ocs, ocs_scsi_property_e prop);
+extern void *ocs_scsi_get_property_ptr(ocs_t *ocs, ocs_scsi_property_e prop);
+
+extern void ocs_scsi_del_initiator_complete(ocs_node_t *node);
+
+extern void ocs_scsi_update_first_burst_transferred(ocs_io_t *io, uint32_t transferred);
+
+/* Calls from base driver to initiator-client */
+
+extern int32_t ocs_scsi_ini_driver_init(void);
+extern int32_t ocs_scsi_ini_driver_exit(void);
+extern int32_t ocs_scsi_ini_io_init(ocs_io_t *io);
+extern int32_t ocs_scsi_ini_io_exit(ocs_io_t *io);
+extern int32_t ocs_scsi_ini_new_device(ocs_t *ocs);
+extern int32_t ocs_scsi_ini_del_device(ocs_t *ocs);
+extern int32_t ocs_scsi_ini_new_domain(ocs_domain_t *domain);
+extern void ocs_scsi_ini_del_domain(ocs_domain_t *domain);
+extern int32_t ocs_scsi_ini_new_sport(ocs_sport_t *sport);
+extern void ocs_scsi_ini_del_sport(ocs_sport_t *sport);
+extern int32_t ocs_scsi_new_target(ocs_node_t *node);
+
+typedef enum {
+ OCS_SCSI_TARGET_DELETED,
+ OCS_SCSI_TARGET_MISSING,
+} ocs_scsi_del_target_reason_e;
+extern int32_t ocs_scsi_del_target(ocs_node_t *node, ocs_scsi_del_target_reason_e reason);
+
+/* Calls from the initiator-client to the base driver */
+
+extern int32_t ocs_scsi_send_rd_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_rsp_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_send_wr_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, ocs_scsi_rsp_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_send_wr_io_first_burst(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len,
+ ocs_scsi_dif_info_t *dif_info,
+ ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t wire_len, uint32_t first_burst,
+ ocs_scsi_rsp_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_send_tmf(ocs_node_t *node, ocs_io_t *io, ocs_io_t *io_to_abort, uint64_t lun,
+ ocs_scsi_tmf_cmd_e tmf, ocs_scsi_sgl_t *sgl, uint32_t sgl_count, uint32_t len, ocs_scsi_rsp_io_cb_t cb, void *arg);
+extern int32_t ocs_scsi_send_nodata_io(ocs_node_t *node, ocs_io_t *io, uint64_t lun, void *cdb, uint32_t cdb_len, ocs_scsi_rsp_io_cb_t cb, void *arg);
+extern void ocs_scsi_del_target_complete(ocs_node_t *node);
+
+typedef enum {
+ OCS_SCSI_DDUMP_DEVICE,
+ OCS_SCSI_DDUMP_DOMAIN,
+ OCS_SCSI_DDUMP_SPORT,
+ OCS_SCSI_DDUMP_NODE,
+ OCS_SCSI_DDUMP_IO,
+} ocs_scsi_ddump_type_e;
+
+/* base driver to target/initiator */
+
+struct ocs_scsi_vaddr_len_s {
+ void *vaddr;
+ uint32_t length;
+} ;
+extern int32_t ocs_scsi_get_block_vaddr(ocs_io_t *io, uint64_t blocknumber, ocs_scsi_vaddr_len_t addrlen[],
+ uint32_t max_addrlen, void **dif_vaddr);
+
+extern void ocs_scsi_ini_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj);
+extern void ocs_scsi_tgt_ddump(ocs_textbuf_t *textbuf, ocs_scsi_ddump_type_e type, void *obj);
+
+/* Calls within base driver */
+extern int32_t ocs_scsi_io_dispatch(ocs_io_t *io, void *cb);
+extern int32_t ocs_scsi_io_dispatch_abort(ocs_io_t *io, void *cb);
+extern void ocs_scsi_check_pending(ocs_t *ocs);
+
+extern uint32_t ocs_scsi_dif_blocksize(ocs_scsi_dif_info_t *dif_info);
+extern int32_t ocs_scsi_dif_set_blocksize(ocs_scsi_dif_info_t *dif_info, uint32_t blocksize);
+extern int32_t ocs_scsi_dif_mem_blocksize(ocs_scsi_dif_info_t *dif_info, int wiretomem);
+extern int32_t ocs_scsi_dif_wire_blocksize(ocs_scsi_dif_info_t *dif_info, int wiretomem);
+
+
+uint32_t ocs_get_crn(ocs_node_t *node, uint8_t *crn, uint64_t lun);
+void ocs_del_crn(ocs_node_t *node);
+void ocs_reset_crn(ocs_node_t *node, uint64_t lun);
+
+/**
+ * @brief Notification from base driver that domain is in force-free path.
+ *
+ * @par Description Domain is forcefully going away. Cleanup any resources associated with it.
+ *
+ * @param domain Pointer to domain being free'd.
+ *
+ * @return None.
+ */
+
+static inline void
+ocs_scsi_notify_domain_force_free(ocs_domain_t *domain)
+{
+ /* Nothing to do */
+ return;
+}
+
+/**
+ * @brief Notification from base driver that sport is in force-free path.
+ *
+ * @par Description Sport is forcefully going away. Cleanup any resources associated with it.
+ *
+ * @param sport Pointer to sport being free'd.
+ *
+ * @return None.
+ */
+
+static inline void
+ocs_scsi_notify_sport_force_free(ocs_sport_t *sport)
+{
+ /* Nothing to do */
+ return;
+}
+
+
+/**
+ * @brief Notification from base driver that node is in force-free path.
+ *
+ * @par Description Node is forcefully going away. Cleanup any resources associated with it.
+ *
+ * @param node Pointer to node being free'd.
+ *
+ * @return None.
+ */
+
+static inline void
+ocs_scsi_notify_node_force_free(ocs_node_t *node)
+{
+ /* Nothing to do */
+ return;
+}
+#endif /* __OCS_SCSI_H__ */
diff --git a/sys/dev/ocs_fc/ocs_sm.c b/sys/dev/ocs_fc/ocs_sm.c
new file mode 100644
index 000000000000..d56240d5343a
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_sm.c
@@ -0,0 +1,207 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Generic state machine framework.
+ */
+
+#include "ocs_os.h"
+#include "ocs_sm.h"
+
+const char *ocs_sm_id[] = {
+ "common",
+ "domain",
+ "login"
+};
+
+/**
+ * @brief Post an event to a context.
+ *
+ * @param ctx State machine context
+ * @param evt Event to post
+ * @param data Event-specific data (if any)
+ *
+ * @return 0 if successfully posted event; -1 if state machine
+ * is disabled
+ */
+int
+ocs_sm_post_event(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
+{
+ if (ctx->current_state) {
+ ctx->current_state(ctx, evt, data);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+/**
+ * @brief Transition to a new state.
+ */
+void
+ocs_sm_transition(ocs_sm_ctx_t *ctx, ocs_sm_function_t state, void *data)
+{
+ if (ctx->current_state == state) {
+ ocs_sm_post_event(ctx, OCS_EVT_REENTER, data);
+ } else {
+ ocs_sm_post_event(ctx, OCS_EVT_EXIT, data);
+ ctx->current_state = state;
+ ocs_sm_post_event(ctx, OCS_EVT_ENTER, data);
+ }
+}
+
+/**
+ * @brief Disable further state machine processing.
+ */
+void
+ocs_sm_disable(ocs_sm_ctx_t *ctx)
+{
+ ctx->current_state = NULL;
+}
+
+const char *ocs_sm_event_name(ocs_sm_event_t evt)
+{
+ switch (evt) {
+ #define RETEVT(x) case x: return #x;
+ RETEVT(OCS_EVT_ENTER)
+ RETEVT(OCS_EVT_REENTER)
+ RETEVT(OCS_EVT_EXIT)
+ RETEVT(OCS_EVT_SHUTDOWN)
+ RETEVT(OCS_EVT_RESPONSE)
+ RETEVT(OCS_EVT_RESUME)
+ RETEVT(OCS_EVT_TIMER_EXPIRED)
+ RETEVT(OCS_EVT_ERROR)
+ RETEVT(OCS_EVT_SRRS_ELS_REQ_OK)
+ RETEVT(OCS_EVT_SRRS_ELS_CMPL_OK)
+ RETEVT(OCS_EVT_SRRS_ELS_REQ_FAIL)
+ RETEVT(OCS_EVT_SRRS_ELS_CMPL_FAIL)
+ RETEVT(OCS_EVT_SRRS_ELS_REQ_RJT)
+ RETEVT(OCS_EVT_NODE_ATTACH_OK)
+ RETEVT(OCS_EVT_NODE_ATTACH_FAIL)
+ RETEVT(OCS_EVT_NODE_FREE_OK)
+ RETEVT(OCS_EVT_ELS_REQ_TIMEOUT)
+ RETEVT(OCS_EVT_ELS_REQ_ABORTED)
+ RETEVT(OCS_EVT_ABORT_ELS)
+ RETEVT(OCS_EVT_ELS_ABORT_CMPL)
+
+ RETEVT(OCS_EVT_DOMAIN_FOUND)
+ RETEVT(OCS_EVT_DOMAIN_ALLOC_OK)
+ RETEVT(OCS_EVT_DOMAIN_ALLOC_FAIL)
+ RETEVT(OCS_EVT_DOMAIN_REQ_ATTACH)
+ RETEVT(OCS_EVT_DOMAIN_ATTACH_OK)
+ RETEVT(OCS_EVT_DOMAIN_ATTACH_FAIL)
+ RETEVT(OCS_EVT_DOMAIN_LOST)
+ RETEVT(OCS_EVT_DOMAIN_FREE_OK)
+ RETEVT(OCS_EVT_DOMAIN_FREE_FAIL)
+ RETEVT(OCS_EVT_HW_DOMAIN_REQ_ATTACH)
+ RETEVT(OCS_EVT_HW_DOMAIN_REQ_FREE)
+ RETEVT(OCS_EVT_ALL_CHILD_NODES_FREE)
+
+ RETEVT(OCS_EVT_SPORT_ALLOC_OK)
+ RETEVT(OCS_EVT_SPORT_ALLOC_FAIL)
+ RETEVT(OCS_EVT_SPORT_ATTACH_OK)
+ RETEVT(OCS_EVT_SPORT_ATTACH_FAIL)
+ RETEVT(OCS_EVT_SPORT_FREE_OK)
+ RETEVT(OCS_EVT_SPORT_FREE_FAIL)
+ RETEVT(OCS_EVT_SPORT_TOPOLOGY_NOTIFY)
+ RETEVT(OCS_EVT_HW_PORT_ALLOC_OK)
+ RETEVT(OCS_EVT_HW_PORT_ALLOC_FAIL)
+ RETEVT(OCS_EVT_HW_PORT_ATTACH_OK)
+ RETEVT(OCS_EVT_HW_PORT_REQ_ATTACH)
+ RETEVT(OCS_EVT_HW_PORT_REQ_FREE)
+ RETEVT(OCS_EVT_HW_PORT_FREE_OK)
+
+ RETEVT(OCS_EVT_NODE_FREE_FAIL)
+
+ RETEVT(OCS_EVT_ABTS_RCVD)
+
+ RETEVT(OCS_EVT_NODE_MISSING)
+ RETEVT(OCS_EVT_NODE_REFOUND)
+ RETEVT(OCS_EVT_SHUTDOWN_IMPLICIT_LOGO)
+ RETEVT(OCS_EVT_SHUTDOWN_EXPLICIT_LOGO)
+
+ RETEVT(OCS_EVT_ELS_FRAME)
+ RETEVT(OCS_EVT_PLOGI_RCVD)
+ RETEVT(OCS_EVT_FLOGI_RCVD)
+ RETEVT(OCS_EVT_LOGO_RCVD)
+ RETEVT(OCS_EVT_PRLI_RCVD)
+ RETEVT(OCS_EVT_PRLO_RCVD)
+ RETEVT(OCS_EVT_PDISC_RCVD)
+ RETEVT(OCS_EVT_FDISC_RCVD)
+ RETEVT(OCS_EVT_ADISC_RCVD)
+ RETEVT(OCS_EVT_RSCN_RCVD)
+ RETEVT(OCS_EVT_SCR_RCVD)
+ RETEVT(OCS_EVT_ELS_RCVD)
+ RETEVT(OCS_EVT_LAST)
+ RETEVT(OCS_EVT_FCP_CMD_RCVD)
+
+ RETEVT(OCS_EVT_RFT_ID_RCVD)
+ RETEVT(OCS_EVT_RFF_ID_RCVD)
+ RETEVT(OCS_EVT_GNN_ID_RCVD)
+ RETEVT(OCS_EVT_GPN_ID_RCVD)
+ RETEVT(OCS_EVT_GFPN_ID_RCVD)
+ RETEVT(OCS_EVT_GFF_ID_RCVD)
+ RETEVT(OCS_EVT_GID_FT_RCVD)
+ RETEVT(OCS_EVT_GID_PT_RCVD)
+ RETEVT(OCS_EVT_RPN_ID_RCVD)
+ RETEVT(OCS_EVT_RNN_ID_RCVD)
+ RETEVT(OCS_EVT_RCS_ID_RCVD)
+ RETEVT(OCS_EVT_RSNN_NN_RCVD)
+ RETEVT(OCS_EVT_RSPN_ID_RCVD)
+ RETEVT(OCS_EVT_RHBA_RCVD)
+ RETEVT(OCS_EVT_RPA_RCVD)
+
+ RETEVT(OCS_EVT_GIDPT_DELAY_EXPIRED)
+
+ RETEVT(OCS_EVT_ABORT_IO)
+ RETEVT(OCS_EVT_ABORT_IO_NO_RESP)
+ RETEVT(OCS_EVT_IO_CMPL)
+ RETEVT(OCS_EVT_IO_CMPL_ERRORS)
+ RETEVT(OCS_EVT_RESP_CMPL)
+ RETEVT(OCS_EVT_ABORT_CMPL)
+ RETEVT(OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY)
+ RETEVT(OCS_EVT_NODE_DEL_INI_COMPLETE)
+ RETEVT(OCS_EVT_NODE_DEL_TGT_COMPLETE)
+ RETEVT(OCS_EVT_IO_ABORTED_BY_TMF)
+ RETEVT(OCS_EVT_IO_ABORT_IGNORED)
+ RETEVT(OCS_EVT_IO_FIRST_BURST)
+ RETEVT(OCS_EVT_IO_FIRST_BURST_ERR)
+ RETEVT(OCS_EVT_IO_FIRST_BURST_ABORTED)
+
+ default:
+ break;
+ #undef RETEVT
+ }
+ return "unknown";
+}
diff --git a/sys/dev/ocs_fc/ocs_sm.h b/sys/dev/ocs_fc/ocs_sm.h
new file mode 100644
index 000000000000..190c2df253e6
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_sm.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Generic state machine framework declarations.
+ */
+
+#ifndef _OCS_SM_H
+#define _OCS_SM_H
+
+/**
+ * State Machine (SM) IDs.
+ */
+enum {
+ OCS_SM_COMMON = 0,
+ OCS_SM_DOMAIN,
+ OCS_SM_PORT,
+ OCS_SM_LOGIN,
+ OCS_SM_LAST
+};
+
+#define OCS_SM_EVENT_SHIFT 24
+#define OCS_SM_EVENT_START(id) ((id) << OCS_SM_EVENT_SHIFT)
+
+extern const char *ocs_sm_id[]; /**< String format of the above enums. */
+
+/**
+ * State Machine events.
+ */
+typedef enum {
+ /* Common Events */
+ OCS_EVT_ENTER = OCS_SM_EVENT_START(OCS_SM_COMMON),
+ OCS_EVT_REENTER,
+ OCS_EVT_EXIT,
+ OCS_EVT_SHUTDOWN,
+ OCS_EVT_ALL_CHILD_NODES_FREE,
+ OCS_EVT_RESUME,
+ OCS_EVT_TIMER_EXPIRED,
+
+ /* Domain Events */
+ OCS_EVT_RESPONSE = OCS_SM_EVENT_START(OCS_SM_DOMAIN),
+ OCS_EVT_ERROR,
+
+ OCS_EVT_DOMAIN_FOUND,
+ OCS_EVT_DOMAIN_ALLOC_OK,
+ OCS_EVT_DOMAIN_ALLOC_FAIL,
+ OCS_EVT_DOMAIN_REQ_ATTACH,
+ OCS_EVT_DOMAIN_ATTACH_OK,
+ OCS_EVT_DOMAIN_ATTACH_FAIL,
+ OCS_EVT_DOMAIN_LOST,
+ OCS_EVT_DOMAIN_FREE_OK,
+ OCS_EVT_DOMAIN_FREE_FAIL,
+ OCS_EVT_HW_DOMAIN_REQ_ATTACH,
+ OCS_EVT_HW_DOMAIN_REQ_FREE,
+
+ /* Sport Events */
+ OCS_EVT_SPORT_ALLOC_OK = OCS_SM_EVENT_START(OCS_SM_PORT),
+ OCS_EVT_SPORT_ALLOC_FAIL,
+ OCS_EVT_SPORT_ATTACH_OK,
+ OCS_EVT_SPORT_ATTACH_FAIL,
+ OCS_EVT_SPORT_FREE_OK,
+ OCS_EVT_SPORT_FREE_FAIL,
+ OCS_EVT_SPORT_TOPOLOGY_NOTIFY,
+ OCS_EVT_HW_PORT_ALLOC_OK,
+ OCS_EVT_HW_PORT_ALLOC_FAIL,
+ OCS_EVT_HW_PORT_ATTACH_OK,
+ OCS_EVT_HW_PORT_REQ_ATTACH,
+ OCS_EVT_HW_PORT_REQ_FREE,
+ OCS_EVT_HW_PORT_FREE_OK,
+
+ /* Login Events */
+ OCS_EVT_SRRS_ELS_REQ_OK = OCS_SM_EVENT_START(OCS_SM_LOGIN),
+ OCS_EVT_SRRS_ELS_CMPL_OK,
+ OCS_EVT_SRRS_ELS_REQ_FAIL,
+ OCS_EVT_SRRS_ELS_CMPL_FAIL,
+ OCS_EVT_SRRS_ELS_REQ_RJT,
+ OCS_EVT_NODE_ATTACH_OK,
+ OCS_EVT_NODE_ATTACH_FAIL,
+ OCS_EVT_NODE_FREE_OK,
+ OCS_EVT_NODE_FREE_FAIL,
+ OCS_EVT_ELS_FRAME,
+ OCS_EVT_ELS_REQ_TIMEOUT,
+ OCS_EVT_ELS_REQ_ABORTED,
+ OCS_EVT_ABORT_ELS, /**< request an ELS IO be aborted */
+ OCS_EVT_ELS_ABORT_CMPL, /**< ELS abort process complete */
+
+ OCS_EVT_ABTS_RCVD,
+
+ OCS_EVT_NODE_MISSING, /**< node is not in the GID_PT payload */
+ OCS_EVT_NODE_REFOUND, /**< node is allocated and in the GID_PT payload */
+ OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, /**< node shutting down due to PLOGI recvd (implicit logo) */
+ OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, /**< node shutting down due to LOGO recvd/sent (explicit logo) */
+
+ OCS_EVT_PLOGI_RCVD,
+ OCS_EVT_FLOGI_RCVD,
+ OCS_EVT_LOGO_RCVD,
+ OCS_EVT_RRQ_RCVD,
+ OCS_EVT_PRLI_RCVD,
+ OCS_EVT_PRLO_RCVD,
+ OCS_EVT_PDISC_RCVD,
+ OCS_EVT_FDISC_RCVD,
+ OCS_EVT_ADISC_RCVD,
+ OCS_EVT_RSCN_RCVD,
+ OCS_EVT_SCR_RCVD,
+ OCS_EVT_ELS_RCVD,
+
+ OCS_EVT_FCP_CMD_RCVD,
+
+ /* Used by fabric emulation */
+ OCS_EVT_RFT_ID_RCVD,
+ OCS_EVT_RFF_ID_RCVD,
+ OCS_EVT_GNN_ID_RCVD,
+ OCS_EVT_GPN_ID_RCVD,
+ OCS_EVT_GFPN_ID_RCVD,
+ OCS_EVT_GFF_ID_RCVD,
+ OCS_EVT_GID_FT_RCVD,
+ OCS_EVT_GID_PT_RCVD,
+ OCS_EVT_RPN_ID_RCVD,
+ OCS_EVT_RNN_ID_RCVD,
+ OCS_EVT_RCS_ID_RCVD,
+ OCS_EVT_RSNN_NN_RCVD,
+ OCS_EVT_RSPN_ID_RCVD,
+ OCS_EVT_RHBA_RCVD,
+ OCS_EVT_RPA_RCVD,
+
+ OCS_EVT_GIDPT_DELAY_EXPIRED,
+
+ /* SCSI Target Server events */
+ OCS_EVT_ABORT_IO,
+ OCS_EVT_ABORT_IO_NO_RESP,
+ OCS_EVT_IO_CMPL,
+ OCS_EVT_IO_CMPL_ERRORS,
+ OCS_EVT_RESP_CMPL,
+ OCS_EVT_ABORT_CMPL,
+ OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY,
+ OCS_EVT_NODE_DEL_INI_COMPLETE,
+ OCS_EVT_NODE_DEL_TGT_COMPLETE,
+ OCS_EVT_IO_ABORTED_BY_TMF,
+ OCS_EVT_IO_ABORT_IGNORED,
+ OCS_EVT_IO_FIRST_BURST,
+ OCS_EVT_IO_FIRST_BURST_ERR,
+ OCS_EVT_IO_FIRST_BURST_ABORTED,
+
+ /* Must be last */
+ OCS_EVT_LAST
+} ocs_sm_event_t;
+
+/* Declare ocs_sm_ctx_s */
+typedef struct ocs_sm_ctx_s ocs_sm_ctx_t;
+
+/* State machine state function */
+typedef void *(*ocs_sm_function_t)(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+
+/* State machine context header */
+struct ocs_sm_ctx_s {
+ ocs_sm_function_t current_state;
+ const char *description;
+ void *app; /** Application-specific handle. */
+};
+
+extern int ocs_sm_post_event(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
+extern void ocs_sm_transition(ocs_sm_ctx_t *, ocs_sm_function_t, void *);
+extern void ocs_sm_disable(ocs_sm_ctx_t *ctx);
+extern const char *ocs_sm_event_name(ocs_sm_event_t evt);
+
+#if 0
+#define smtrace(sm) ocs_log_debug(NULL, "%s: %-20s --> %s\n", sm, ocs_sm_event_name(evt), __func__)
+#else
+#define smtrace(...)
+#endif
+
+#endif /* ! _OCS_SM_H */
diff --git a/sys/dev/ocs_fc/ocs_sport.c b/sys/dev/ocs_fc/ocs_sport.c
new file mode 100644
index 000000000000..d39d45180723
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_sport.c
@@ -0,0 +1,1926 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Details SLI port (sport) functions.
+ */
+
+
+#include "ocs.h"
+#include "ocs_fabric.h"
+#include "ocs_els.h"
+#include "ocs_device.h"
+
+static void ocs_vport_update_spec(ocs_sport_t *sport);
+static void ocs_vport_link_down(ocs_sport_t *sport);
+
+void ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *sport);
+void ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *sport);
+int ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *sport);
+int ocs_mgmt_sport_set(char *parent, char *name, char *value, void *sport);
+int ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length, void *sport);
+static ocs_mgmt_functions_t sport_mgmt_functions = {
+ .get_list_handler = ocs_mgmt_sport_list,
+ .get_handler = ocs_mgmt_sport_get,
+ .get_all_handler = ocs_mgmt_sport_get_all,
+ .set_handler = ocs_mgmt_sport_set,
+ .exec_handler = ocs_mgmt_sport_exec,
+};
+
+/*!
+@defgroup sport_sm SLI Port (sport) State Machine: States
+*/
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port HW callback.
+ *
+ * @par Description
+ * This function is called in response to a HW sport event. This code resolves
+ * the reference to the sport object, and posts the corresponding event.
+ *
+ * @param arg Pointer to the OCS context.
+ * @param event HW sport event.
+ * @param data Application-specific event (pointer to the sport).
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int32_t
+ocs_port_cb(void *arg, ocs_hw_port_event_e event, void *data)
+{
+ ocs_t *ocs = arg;
+ ocs_sli_port_t *sport = data;
+
+ switch (event) {
+ case OCS_HW_PORT_ALLOC_OK:
+ ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_OK\n");
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_OK, NULL);
+ break;
+ case OCS_HW_PORT_ALLOC_FAIL:
+ ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_FAIL\n");
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_FAIL, NULL);
+ break;
+ case OCS_HW_PORT_ATTACH_OK:
+ ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_OK\n");
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_OK, NULL);
+ break;
+ case OCS_HW_PORT_ATTACH_FAIL:
+ ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_FAIL\n");
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_FAIL, NULL);
+ break;
+ case OCS_HW_PORT_FREE_OK:
+ ocs_log_debug(ocs, "OCS_HW_PORT_FREE_OK\n");
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_OK, NULL);
+ break;
+ case OCS_HW_PORT_FREE_FAIL:
+ ocs_log_debug(ocs, "OCS_HW_PORT_FREE_FAIL\n");
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_FAIL, NULL);
+ break;
+ default:
+ ocs_log_test(ocs, "unknown event %#x\n", event);
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Allocate a SLI port object.
+ *
+ * @par Description
+ * A sport object is allocated and associated with the domain. Various
+ * structure members are initialized.
+ *
+ * @param domain Pointer to the domain structure.
+ * @param wwpn World wide port name in host endian.
+ * @param wwnn World wide node name in host endian.
+ * @param fc_id Port ID of sport may be specified, use UINT32_MAX to fabric choose
+ * @param enable_ini Enables initiator capability on this port using a non-zero value.
+ * @param enable_tgt Enables target capability on this port using a non-zero value.
+ *
+ * @return Pointer to an ocs_sport_t object; or NULL.
+ */
+
+ocs_sport_t *
+ocs_sport_alloc(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn, uint32_t fc_id, uint8_t enable_ini, uint8_t enable_tgt)
+{
+ ocs_sport_t *sport;
+
+ if (domain->ocs->ctrlmask & OCS_CTRLMASK_INHIBIT_INITIATOR) {
+ enable_ini = 0;
+ }
+
+ /* Return a failure if this sport has already been allocated */
+ if (wwpn != 0) {
+ sport = ocs_sport_find_wwn(domain, wwnn, wwpn);
+ if (sport != NULL) {
+ ocs_log_test(domain->ocs, "Failed: SPORT %016llx %016llx already allocated\n",
+ (unsigned long long)wwnn, (unsigned long long)wwpn);
+ return NULL;
+ }
+ }
+
+ sport = ocs_malloc(domain->ocs, sizeof(*sport), OCS_M_NOWAIT | OCS_M_ZERO);
+ if (sport) {
+ sport->ocs = domain->ocs;
+ ocs_snprintf(sport->display_name, sizeof(sport->display_name), "------");
+ sport->domain = domain;
+ sport->lookup = spv_new(domain->ocs);
+ sport->instance_index = domain->sport_instance_count++;
+ ocs_sport_lock_init(sport);
+ ocs_list_init(&sport->node_list, ocs_node_t, link);
+ sport->sm.app = sport;
+ sport->enable_ini = enable_ini;
+ sport->enable_tgt = enable_tgt;
+ sport->enable_rscn = (sport->enable_ini || (sport->enable_tgt && enable_target_rscn(sport->ocs)));
+
+ /* Copy service parameters from domain */
+ ocs_memcpy(sport->service_params, domain->service_params, sizeof(fc_plogi_payload_t));
+
+ /* Update requested fc_id */
+ sport->fc_id = fc_id;
+
+ /* Update the sport's service parameters for the new wwn's */
+ sport->wwpn = wwpn;
+ sport->wwnn = wwnn;
+ ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx" , (unsigned long long)wwnn);
+
+ /* Initialize node group list */
+ ocs_lock_init(sport->ocs, &sport->node_group_lock, "node_group_lock[%d]", sport->instance_index);
+ ocs_list_init(&sport->node_group_dir_list, ocs_node_group_dir_t, link);
+
+ /* if this is the "first" sport of the domain, then make it the "phys" sport */
+ ocs_domain_lock(domain);
+ if (ocs_list_empty(&domain->sport_list)) {
+ domain->sport = sport;
+ }
+
+ ocs_list_add_tail(&domain->sport_list, sport);
+ ocs_domain_unlock(domain);
+
+ sport->mgmt_functions = &sport_mgmt_functions;
+
+ ocs_log_debug(domain->ocs, "[%s] allocate sport\n", sport->display_name);
+ }
+ return sport;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Free a SLI port object.
+ *
+ * @par Description
+ * The sport object is freed.
+ *
+ * @param sport Pointer to the SLI port object.
+ *
+ * @return None.
+ */
+
+void
+ocs_sport_free(ocs_sport_t *sport)
+{
+ ocs_domain_t *domain;
+ ocs_node_group_dir_t *node_group_dir;
+ ocs_node_group_dir_t *node_group_dir_next;
+ int post_all_free = FALSE;
+
+ if (sport) {
+ domain = sport->domain;
+ ocs_log_debug(domain->ocs, "[%s] free sport\n", sport->display_name);
+ ocs_domain_lock(domain);
+ ocs_list_remove(&domain->sport_list, sport);
+ ocs_sport_lock(sport);
+ spv_del(sport->lookup);
+ sport->lookup = NULL;
+
+ ocs_lock(&domain->lookup_lock);
+ /* Remove the sport from the domain's sparse vector lookup table */
+ spv_set(domain->lookup, sport->fc_id, NULL);
+ ocs_unlock(&domain->lookup_lock);
+
+ /* if this is the physical sport, then clear it out of the domain */
+ if (sport == domain->sport) {
+ domain->sport = NULL;
+ }
+
+ /*
+ * If the domain's sport_list is empty, then post the ALL_NODES_FREE event to the domain,
+ * after the lock is released. The domain may be free'd as a result of the event.
+ */
+ if (ocs_list_empty(&domain->sport_list)) {
+ post_all_free = TRUE;
+ }
+
+ /* Free any node group directories */
+ ocs_lock(&sport->node_group_lock);
+ ocs_list_foreach_safe(&sport->node_group_dir_list, node_group_dir, node_group_dir_next) {
+ ocs_unlock(&sport->node_group_lock);
+ ocs_node_group_dir_free(node_group_dir);
+ ocs_lock(&sport->node_group_lock);
+ }
+ ocs_unlock(&sport->node_group_lock);
+ ocs_sport_unlock(sport);
+ ocs_domain_unlock(domain);
+
+ if (post_all_free) {
+ ocs_domain_post_event(domain, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
+ }
+
+ ocs_sport_lock_free(sport);
+ ocs_lock_free(&sport->node_group_lock);
+ ocs_scsi_sport_deleted(sport);
+
+ ocs_free(domain->ocs, sport, sizeof(*sport));
+
+ }
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Free memory resources of a SLI port object.
+ *
+ * @par Description
+ * After the sport object is freed, its child objects are freed.
+ *
+ * @param sport Pointer to the SLI port object.
+ *
+ * @return None.
+ */
+
+void ocs_sport_force_free(ocs_sport_t *sport)
+{
+ ocs_node_t *node;
+ ocs_node_t *next;
+
+ /* shutdown sm processing */
+ ocs_sm_disable(&sport->sm);
+
+ ocs_scsi_notify_sport_force_free(sport);
+
+ ocs_sport_lock(sport);
+ ocs_list_foreach_safe(&sport->node_list, node, next) {
+ ocs_node_force_free(node);
+ }
+ ocs_sport_unlock(sport);
+ ocs_sport_free(sport);
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Return a SLI port object, given an instance index.
+ *
+ * @par Description
+ * A pointer to a sport object is returned, given its instance @c index.
+ *
+ * @param domain Pointer to the domain.
+ * @param index Instance index value to find.
+ *
+ * @return Returns a pointer to the ocs_sport_t object; or NULL.
+ */
+
+ocs_sport_t *
+ocs_sport_get_instance(ocs_domain_t *domain, uint32_t index)
+{
+ ocs_sport_t *sport;
+
+ ocs_domain_lock(domain);
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if (sport->instance_index == index) {
+ ocs_domain_unlock(domain);
+ return sport;
+ }
+ }
+ ocs_domain_unlock(domain);
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Find a SLI port object, given an FC_ID.
+ *
+ * @par Description
+ * Returns a pointer to the sport object, given an FC_ID.
+ *
+ * @param domain Pointer to the domain.
+ * @param d_id FC_ID to find.
+ *
+ * @return Returns a pointer to the ocs_sport_t; or NULL.
+ */
+
+ocs_sport_t *
+ocs_sport_find(ocs_domain_t *domain, uint32_t d_id)
+{
+ ocs_sport_t *sport;
+
+ ocs_assert(domain, NULL);
+ ocs_lock(&domain->lookup_lock);
+ if (domain->lookup == NULL) {
+ ocs_log_test(domain->ocs, "assertion failed: domain->lookup is not valid\n");
+ ocs_unlock(&domain->lookup_lock);
+ return NULL;
+ }
+
+ sport = spv_get(domain->lookup, d_id);
+ ocs_unlock(&domain->lookup_lock);
+ return sport;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Find a SLI port, given the WWNN and WWPN.
+ *
+ * @par Description
+ * Return a pointer to a sport, given the WWNN and WWPN.
+ *
+ * @param domain Pointer to the domain.
+ * @param wwnn World wide node name.
+ * @param wwpn World wide port name.
+ *
+ * @return Returns a pointer to a SLI port, if found; or NULL.
+ */
+
+ocs_sport_t *
+ocs_sport_find_wwn(ocs_domain_t *domain, uint64_t wwnn, uint64_t wwpn)
+{
+ ocs_sport_t *sport = NULL;
+
+ ocs_domain_lock(domain);
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if ((sport->wwnn == wwnn) && (sport->wwpn == wwpn)) {
+ ocs_domain_unlock(domain);
+ return sport;
+ }
+ }
+ ocs_domain_unlock(domain);
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Request a SLI port attach.
+ *
+ * @par Description
+ * External call to request an attach for a sport, given an FC_ID.
+ *
+ * @param sport Pointer to the sport context.
+ * @param fc_id FC_ID of which to attach.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int32_t
+ocs_sport_attach(ocs_sport_t *sport, uint32_t fc_id)
+{
+ ocs_hw_rtn_e rc;
+ ocs_node_t *node;
+
+ /* Set our lookup */
+ ocs_lock(&sport->domain->lookup_lock);
+ spv_set(sport->domain->lookup, fc_id, sport);
+ ocs_unlock(&sport->domain->lookup_lock);
+
+ /* Update our display_name */
+ ocs_node_fcid_display(fc_id, sport->display_name, sizeof(sport->display_name));
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, node) {
+ ocs_node_update_display_name(node);
+ }
+ ocs_sport_unlock(sport);
+ ocs_log_debug(sport->ocs, "[%s] attach sport: fc_id x%06x\n", sport->display_name, fc_id);
+
+ rc = ocs_hw_port_attach(&sport->ocs->hw, sport, fc_id);
+ if (rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(sport->ocs, "ocs_hw_port_attach failed: %d\n", rc);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * @brief Common SLI port state machine declarations and initialization.
+ */
+#define std_sport_state_decl() \
+ ocs_sport_t *sport = NULL; \
+ ocs_domain_t *domain = NULL; \
+ ocs_t *ocs = NULL; \
+ \
+ ocs_assert(ctx, NULL); \
+ sport = ctx->app; \
+ ocs_assert(sport, NULL); \
+ \
+ domain = sport->domain; \
+ ocs_assert(domain, NULL); \
+ ocs = sport->ocs; \
+ ocs_assert(ocs, NULL);
+
+/**
+ * @brief Common SLI port state machine trace logging.
+ */
+#define sport_sm_trace(sport) \
+ do { \
+ if (OCS_LOG_ENABLE_DOMAIN_SM_TRACE(ocs)) \
+ ocs_log_debug(ocs, "[%s] %-20s\n", sport->display_name, ocs_sm_event_name(evt)); \
+ } while (0)
+
+
+/**
+ * @brief SLI port state machine: Common event handler.
+ *
+ * @par Description
+ * Handle common sport events.
+ *
+ * @param funcname Function name to display.
+ * @param ctx Sport state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+static void *
+__ocs_sport_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ switch(evt) {
+ case OCS_EVT_ENTER:
+ case OCS_EVT_REENTER:
+ case OCS_EVT_EXIT:
+ case OCS_EVT_ALL_CHILD_NODES_FREE:
+ break;
+ case OCS_EVT_SPORT_ATTACH_OK:
+ ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
+ break;
+ case OCS_EVT_SHUTDOWN: {
+ ocs_node_t *node;
+ ocs_node_t *node_next;
+ int node_list_empty;
+
+ /* Flag this sport as shutting down */
+ sport->shutting_down = 1;
+
+ if (sport->is_vport) {
+ ocs_vport_link_down(sport);
+ }
+
+ ocs_sport_lock(sport);
+ node_list_empty = ocs_list_empty(&sport->node_list);
+ ocs_sport_unlock(sport);
+
+ if (node_list_empty) {
+ /* sm: node list is empty / ocs_hw_port_free
+ * Remove the sport from the domain's sparse vector lookup table */
+ ocs_lock(&domain->lookup_lock);
+ spv_set(domain->lookup, sport->fc_id, NULL);
+ ocs_unlock(&domain->lookup_lock);
+ ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
+ if (ocs_hw_port_free(&ocs->hw, sport)) {
+ ocs_log_test(sport->ocs, "ocs_hw_port_free failed\n");
+ /* Not much we can do, free the sport anyways */
+ ocs_sport_free(sport);
+ }
+ } else {
+ /* sm: node list is not empty / shutdown nodes */
+ ocs_sm_transition(ctx, __ocs_sport_wait_shutdown, NULL);
+ ocs_sport_lock(sport);
+ ocs_list_foreach_safe(&sport->node_list, node, node_next) {
+ /*
+ * If this is a vport, logout of the fabric controller so that it
+ * deletes the vport on the switch.
+ */
+ if((node->rnode.fc_id == FC_ADDR_FABRIC) && (sport->is_vport)) {
+ /* if link is down, don't send logo */
+ if (sport->ocs->hw.link.status == SLI_LINK_STATUS_DOWN) {
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
+ } else {
+ ocs_log_debug(ocs,"[%s] sport shutdown vport,sending logo to node\n",
+ node->display_name);
+
+ if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
+ 0, NULL, NULL) == NULL) {
+ /* failed to send LOGO, go ahead and cleanup node anyways */
+ node_printf(node, "Failed to send LOGO\n");
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ } else {
+ /* sent LOGO, wait for response */
+ ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
+ }
+ }
+ } else {
+ ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
+ }
+ }
+ ocs_sport_unlock(sport);
+ }
+ break;
+ }
+ default:
+ ocs_log_test(sport->ocs, "[%s] %-20s %-20s not handled\n", sport->display_name, funcname, ocs_sm_event_name(evt));
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port state machine: Physical sport allocated.
+ *
+ * @par Description
+ * This is the initial state for sport objects.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_sport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ sport_sm_trace(sport);
+
+ switch(evt) {
+ /* the physical sport is attached */
+ case OCS_EVT_SPORT_ATTACH_OK:
+ ocs_assert(sport == domain->sport, NULL);
+ ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
+ break;
+
+ case OCS_EVT_SPORT_ALLOC_OK:
+ /* ignore */
+ break;
+ default:
+ __ocs_sport_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port state machine: Handle initial virtual port events.
+ *
+ * @par Description
+ * This state is entered when a virtual port is instantiated,
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_sport_vport_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ sport_sm_trace(sport);
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ uint64_t be_wwpn = ocs_htobe64(sport->wwpn);
+
+ if (sport->wwpn == 0) {
+ ocs_log_debug(ocs, "vport: letting f/w select WWN\n");
+ }
+
+ if (sport->fc_id != UINT32_MAX) {
+ ocs_log_debug(ocs, "vport: hard coding port id: %x\n", sport->fc_id);
+ }
+
+ ocs_sm_transition(ctx, __ocs_sport_vport_wait_alloc, NULL);
+ /* If wwpn is zero, then we'll let the f/w */
+ if (ocs_hw_port_alloc(&ocs->hw, sport, sport->domain,
+ (sport->wwpn == 0) ? NULL : (uint8_t *)&be_wwpn)) {
+ ocs_log_err(ocs, "Can't allocate port\n");
+ break;
+ }
+
+
+ break;
+ }
+ default:
+ __ocs_sport_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port state machine: Wait for the HW SLI port allocation to complete.
+ *
+ * @par Description
+ * Waits for the HW sport allocation request to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_sport_vport_wait_alloc(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ sport_sm_trace(sport);
+
+ switch(evt) {
+ case OCS_EVT_SPORT_ALLOC_OK: {
+ fc_plogi_payload_t *sp = (fc_plogi_payload_t*) sport->service_params;
+ ocs_node_t *fabric;
+
+ /* If we let f/w assign wwn's, then sport wwn's with those returned by hw */
+ if (sport->wwnn == 0) {
+ sport->wwnn = ocs_be64toh(sport->sli_wwnn);
+ sport->wwpn = ocs_be64toh(sport->sli_wwpn);
+ ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long) sport->wwpn);
+ }
+
+ /* Update the sport's service parameters */
+ sp->port_name_hi = ocs_htobe32((uint32_t) (sport->wwpn >> 32ll));
+ sp->port_name_lo = ocs_htobe32((uint32_t) sport->wwpn);
+ sp->node_name_hi = ocs_htobe32((uint32_t) (sport->wwnn >> 32ll));
+ sp->node_name_lo = ocs_htobe32((uint32_t) sport->wwnn);
+
+ /* if sport->fc_id is uninitialized, then request that the fabric node use FDISC
+ * to find an fc_id. Otherwise we're restoring vports, or we're in
+ * fabric emulation mode, so attach the fc_id
+ */
+ if (sport->fc_id == UINT32_MAX) {
+ fabric = ocs_node_alloc(sport, FC_ADDR_FABRIC, FALSE, FALSE);
+ if (fabric == NULL) {
+ ocs_log_err(ocs, "ocs_node_alloc() failed\n");
+ return NULL;
+ }
+ ocs_node_transition(fabric, __ocs_vport_fabric_init, NULL);
+ } else {
+ ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long)sport->wwpn);
+ ocs_sport_attach(sport, sport->fc_id);
+ }
+ ocs_sm_transition(ctx, __ocs_sport_vport_allocated, NULL);
+ break;
+ }
+ default:
+ __ocs_sport_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port state machine: virtual sport allocated.
+ *
+ * @par Description
+ * This state is entered after the sport is allocated; it then waits for a fabric node
+ * FDISC to complete, which requests a sport attach.
+ * The sport attach complete is handled in this state.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_sport_vport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ sport_sm_trace(sport);
+
+ switch(evt) {
+ case OCS_EVT_SPORT_ATTACH_OK: {
+ ocs_node_t *node;
+
+ if (!(domain->femul_enable)) {
+ /* Find our fabric node, and forward this event */
+ node = ocs_node_find(sport, FC_ADDR_FABRIC);
+ if (node == NULL) {
+ ocs_log_test(ocs, "can't find node %06x\n", FC_ADDR_FABRIC);
+ break;
+ }
+ /* sm: / forward sport attach to fabric node */
+ ocs_node_post_event(node, evt, NULL);
+ }
+ ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
+ break;
+ }
+ default:
+ __ocs_sport_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port state machine: Attached.
+ *
+ * @par Description
+ * State entered after the sport attach has completed.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_sport_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ sport_sm_trace(sport);
+
+ switch(evt) {
+ case OCS_EVT_ENTER: {
+ ocs_node_t *node;
+
+ ocs_log_debug(ocs, "[%s] SPORT attached WWPN %016llx WWNN %016llx \n", (unsigned long long)sport->display_name,
+ sport->wwpn, sport->wwnn);
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, node) {
+ ocs_node_update_display_name(node);
+ }
+ ocs_sport_unlock(sport);
+ sport->tgt_id = sport->fc_id;
+ if (sport->enable_ini) {
+ ocs_scsi_ini_new_sport(sport);
+ }
+ if (sport->enable_tgt) {
+ ocs_scsi_tgt_new_sport(sport);
+ }
+
+ /* Update the vport (if its not the physical sport) parameters */
+ if (sport->is_vport) {
+ ocs_vport_update_spec(sport);
+ }
+
+ break;
+ }
+
+ case OCS_EVT_EXIT:
+ ocs_log_debug(ocs, "[%s] SPORT deattached WWPN %016llx WWNN %016llx \n", (unsigned long long)sport->display_name,
+ sport->wwpn, sport->wwnn);
+ if (sport->enable_ini) {
+ ocs_scsi_ini_del_sport(sport);
+ }
+ if (sport->enable_tgt) {
+ ocs_scsi_tgt_del_sport(sport);
+ }
+ break;
+ default:
+ __ocs_sport_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port state machine: Wait for the node shutdowns to complete.
+ *
+ * @par Description
+ * Waits for the ALL_CHILD_NODES_FREE event to be posted from the node
+ * shutdown process.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_sport_wait_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ sport_sm_trace(sport);
+
+ switch(evt) {
+ case OCS_EVT_SPORT_ALLOC_OK:
+ case OCS_EVT_SPORT_ALLOC_FAIL:
+ case OCS_EVT_SPORT_ATTACH_OK:
+ case OCS_EVT_SPORT_ATTACH_FAIL:
+ /* ignore these events - just wait for the all free event */
+ break;
+
+ case OCS_EVT_ALL_CHILD_NODES_FREE: {
+ /* Remove the sport from the domain's sparse vector lookup table */
+ ocs_lock(&domain->lookup_lock);
+ spv_set(domain->lookup, sport->fc_id, NULL);
+ ocs_unlock(&domain->lookup_lock);
+ ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
+ if (ocs_hw_port_free(&ocs->hw, sport)) {
+ ocs_log_err(sport->ocs, "ocs_hw_port_free failed\n");
+ /* Not much we can do, free the sport anyways */
+ ocs_sport_free(sport);
+ }
+ break;
+ }
+ default:
+ __ocs_sport_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief SLI port state machine: Wait for the HW's port free to complete.
+ *
+ * @par Description
+ * Waits for the HW's port free to complete.
+ *
+ * @param ctx Remote node state machine context.
+ * @param evt Event to process.
+ * @param arg Per event optional argument.
+ *
+ * @return Returns NULL.
+ */
+
+void *
+__ocs_sport_wait_port_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
+{
+ std_sport_state_decl();
+
+ sport_sm_trace(sport);
+
+ switch(evt) {
+ case OCS_EVT_SPORT_ATTACH_OK:
+ /* Ignore as we are waiting for the free CB */
+ break;
+ case OCS_EVT_SPORT_FREE_OK: {
+ /* All done, free myself */
+ ocs_sport_free(sport);
+ break;
+ }
+ default:
+ __ocs_sport_common(__func__, ctx, evt, arg);
+ return NULL;
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Start the vports on a domain
+ *
+ * @par Description
+ * Use the vport specification to find the associated vports and start them.
+ *
+ * @param domain Pointer to the domain context.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+int32_t
+ocs_vport_start(ocs_domain_t *domain)
+{
+ ocs_t *ocs = domain->ocs;
+ ocs_xport_t *xport = ocs->xport;
+ ocs_vport_spec_t *vport;
+ ocs_vport_spec_t *next;
+ ocs_sport_t *sport;
+ int32_t rc = 0;
+
+ ocs_device_lock(ocs);
+ ocs_list_foreach_safe(&xport->vport_list, vport, next) {
+ if (vport->domain_instance == domain->instance_index &&
+ vport->sport == NULL) {
+ /* If role not set, skip this vport */
+ if (!(vport->enable_ini || vport->enable_tgt)) {
+ continue;
+ }
+
+ /* Allocate a sport */
+ vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
+ vport->enable_ini, vport->enable_tgt);
+ if (sport == NULL) {
+ rc = -1;
+ } else {
+ sport->is_vport = 1;
+ sport->tgt_data = vport->tgt_data;
+ sport->ini_data = vport->ini_data;
+
+ /* Transition to vport_init */
+ ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
+ }
+ }
+ }
+ ocs_device_unlock(ocs);
+ return rc;
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Clear the sport reference in the vport specification.
+ *
+ * @par Description
+ * Clear the sport pointer on the vport specification when the vport is torn down. This allows it to be
+ * re-created when the link is re-established.
+ *
+ * @param sport Pointer to the sport context.
+ */
+static void
+ocs_vport_link_down(ocs_sport_t *sport)
+{
+ ocs_t *ocs = sport->ocs;
+ ocs_xport_t *xport = ocs->xport;
+ ocs_vport_spec_t *vport;
+
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&xport->vport_list, vport) {
+ if (vport->sport == sport) {
+ vport->sport = NULL;
+ break;
+ }
+ }
+ ocs_device_unlock(ocs);
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Allocate a new virtual SLI port.
+ *
+ * @par Description
+ * A new sport is created, in response to an external management request.
+ *
+ * @n @b Note: If the WWPN is zero, the firmware will assign the WWNs.
+ *
+ * @param domain Pointer to the domain context.
+ * @param wwpn World wide port name.
+ * @param wwnn World wide node name
+ * @param fc_id Requested port ID (used in fabric emulation mode).
+ * @param ini TRUE, if port is created as an initiator node.
+ * @param tgt TRUE, if port is created as a target node.
+ * @param tgt_data Pointer to target specific data
+ * @param ini_data Pointer to initiator specific data
+ * @param restore_vport If TRUE, then the vport will be re-created automatically
+ * on link disruption.
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+int32_t
+ocs_sport_vport_new(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn,
+ uint32_t fc_id, uint8_t ini, uint8_t tgt, void *tgt_data,
+ void *ini_data, uint8_t restore_vport)
+{
+ ocs_sport_t *sport;
+
+ if (ini && (domain->ocs->enable_ini == 0)) {
+ ocs_log_test(domain->ocs, "driver initiator functionality not enabled\n");
+ return -1;
+ }
+
+ if (tgt && (domain->ocs->enable_tgt == 0)) {
+ ocs_log_test(domain->ocs, "driver target functionality not enabled\n");
+ return -1;
+ }
+
+ /* Create a vport spec if we need to recreate this vport after a link up event */
+ if (restore_vport) {
+ if (ocs_vport_create_spec(domain->ocs, wwnn, wwpn, fc_id, ini, tgt, tgt_data, ini_data)) {
+ ocs_log_test(domain->ocs, "failed to create vport object entry\n");
+ return -1;
+ }
+ return ocs_vport_start(domain);
+ }
+
+ /* Allocate a sport */
+ sport = ocs_sport_alloc(domain, wwpn, wwnn, fc_id, ini, tgt);
+
+ if (sport == NULL) {
+ return -1;
+ }
+
+ sport->is_vport = 1;
+ sport->tgt_data = tgt_data;
+ sport->ini_data = ini_data;
+
+ /* Transition to vport_init */
+ ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
+
+ return 0;
+}
+
+int32_t
+ocs_sport_vport_alloc(ocs_domain_t *domain, ocs_vport_spec_t *vport)
+{
+ ocs_sport_t *sport = NULL;
+
+ if (domain == NULL) {
+ return (0);
+ }
+
+ ocs_assert((vport->sport == NULL), -1);
+
+ /* Allocate a sport */
+ vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, UINT32_MAX, vport->enable_ini, vport->enable_tgt);
+
+ if (sport == NULL) {
+ return -1;
+ }
+
+ sport->is_vport = 1;
+ sport->tgt_data = vport->tgt_data;
+ sport->ini_data = vport->tgt_data;
+
+ /* Transition to vport_init */
+ ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
+
+ return (0);
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Remove a previously-allocated virtual port.
+ *
+ * @par Description
+ * A previously-allocated virtual port is removed by posting the shutdown event to the
+ * sport with a matching WWN.
+ *
+ * @param ocs Pointer to the device object.
+ * @param domain Pointer to the domain structure (may be NULL).
+ * @param wwpn World wide port name of the port to delete (host endian).
+ * @param wwnn World wide node name of the port to delete (host endian).
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int32_t ocs_sport_vport_del(ocs_t *ocs, ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_sport_t *sport;
+ int found = 0;
+ ocs_vport_spec_t *vport;
+ ocs_vport_spec_t *next;
+ uint32_t instance;
+
+ /* If no domain is given, use instance 0, otherwise use domain instance */
+ if (domain == NULL) {
+ instance = 0;
+ } else {
+ instance = domain->instance_index;
+ }
+
+ /* walk the ocs_vport_list and remove from there */
+
+ ocs_device_lock(ocs);
+ ocs_list_foreach_safe(&xport->vport_list, vport, next) {
+ if ((vport->domain_instance == instance) &&
+ (vport->wwpn == wwpn) && (vport->wwnn == wwnn)) {
+ vport->sport = NULL;
+ break;
+ }
+ }
+ ocs_device_unlock(ocs);
+
+ if (domain == NULL) {
+ /* No domain means no sport to look for */
+ return 0;
+ }
+
+ ocs_domain_lock(domain);
+ ocs_list_foreach(&domain->sport_list, sport) {
+ if ((sport->wwpn == wwpn) && (sport->wwnn == wwnn)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ /* Shutdown this SPORT */
+ ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
+ }
+ ocs_domain_unlock(domain);
+ return 0;
+}
+
+/**
+ * @brief Force free all saved vports.
+ *
+ * @par Description
+ * Delete all device vports.
+ *
+ * @param ocs Pointer to the device object.
+ *
+ * @return None.
+ */
+
+void
+ocs_vport_del_all(ocs_t *ocs)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_vport_spec_t *vport;
+ ocs_vport_spec_t *next;
+
+ ocs_device_lock(ocs);
+ ocs_list_foreach_safe(&xport->vport_list, vport, next) {
+ ocs_list_remove(&xport->vport_list, vport);
+ ocs_free(ocs, vport, sizeof(*vport));
+ }
+ ocs_device_unlock(ocs);
+}
+
+/**
+ * @ingroup sport_sm
+ * @brief Generate a SLI port ddump.
+ *
+ * @par Description
+ * Generates the SLI port ddump data.
+ *
+ * @param textbuf Pointer to the text buffer.
+ * @param sport Pointer to the SLI-4 port.
+ *
+ * @return Returns 0 on success, or a negative value on failure.
+ */
+
+int
+ocs_ddump_sport(ocs_textbuf_t *textbuf, ocs_sli_port_t *sport)
+{
+ ocs_node_t *node;
+ ocs_node_group_dir_t *node_group_dir;
+ int retval = 0;
+
+ ocs_ddump_section(textbuf, "sport", sport->instance_index);
+ ocs_ddump_value(textbuf, "display_name", "%s", sport->display_name);
+
+ ocs_ddump_value(textbuf, "is_vport", "%d", sport->is_vport);
+ ocs_ddump_value(textbuf, "enable_ini", "%d", sport->enable_ini);
+ ocs_ddump_value(textbuf, "enable_tgt", "%d", sport->enable_tgt);
+ ocs_ddump_value(textbuf, "shutting_down", "%d", sport->shutting_down);
+ ocs_ddump_value(textbuf, "topology", "%d", sport->topology);
+ ocs_ddump_value(textbuf, "p2p_winner", "%d", sport->p2p_winner);
+ ocs_ddump_value(textbuf, "p2p_port_id", "%06x", sport->p2p_port_id);
+ ocs_ddump_value(textbuf, "p2p_remote_port_id", "%06x", sport->p2p_remote_port_id);
+ ocs_ddump_value(textbuf, "wwpn", "%016llx", (unsigned long long)sport->wwpn);
+ ocs_ddump_value(textbuf, "wwnn", "%016llx", (unsigned long long)sport->wwnn);
+ /*TODO: service_params */
+
+ ocs_ddump_value(textbuf, "indicator", "x%x", sport->indicator);
+ ocs_ddump_value(textbuf, "fc_id", "x%06x", sport->fc_id);
+ ocs_ddump_value(textbuf, "index", "%d", sport->index);
+
+ ocs_display_sparams(NULL, "sport_sparams", 1, textbuf, sport->service_params+4);
+
+ /* HLM dump */
+ ocs_ddump_section(textbuf, "hlm", sport->instance_index);
+ ocs_lock(&sport->node_group_lock);
+ ocs_list_foreach(&sport->node_group_dir_list, node_group_dir) {
+ ocs_remote_node_group_t *remote_node_group;
+
+ ocs_ddump_section(textbuf, "node_group_dir", node_group_dir->instance_index);
+
+ ocs_ddump_value(textbuf, "node_group_list_count", "%d", node_group_dir->node_group_list_count);
+ ocs_ddump_value(textbuf, "next_idx", "%d", node_group_dir->next_idx);
+ ocs_list_foreach(&node_group_dir->node_group_list, remote_node_group) {
+ ocs_ddump_section(textbuf, "node_group", remote_node_group->instance_index);
+ ocs_ddump_value(textbuf, "indicator", "x%x", remote_node_group->indicator);
+ ocs_ddump_value(textbuf, "index", "x%x", remote_node_group->index);
+ ocs_ddump_value(textbuf, "instance_index", "x%x", remote_node_group->instance_index);
+ ocs_ddump_endsection(textbuf, "node_group", 0);
+ }
+ ocs_ddump_endsection(textbuf, "node_group_dir", 0);
+ }
+ ocs_unlock(&sport->node_group_lock);
+ ocs_ddump_endsection(textbuf, "hlm", sport->instance_index);
+
+ ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
+ ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
+
+ /* Dump all the nodes */
+ if (ocs_sport_lock_try(sport) != TRUE) {
+ /* Didn't get lock */
+ return -1;
+ }
+ /* Here the sport lock is held */
+ ocs_list_foreach(&sport->node_list, node) {
+ retval = ocs_ddump_node(textbuf, node);
+ if (retval != 0) {
+ break;
+ }
+ }
+ ocs_sport_unlock(sport);
+
+ ocs_ddump_endsection(textbuf, "sport", sport->index);
+
+ return retval;
+}
+
+
+void
+ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *object)
+{
+ ocs_node_t *node;
+ ocs_sport_t *sport = (ocs_sport_t *)object;
+
+ ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
+
+ /* Add my status values to textbuf */
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "index");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "is_vport");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_ini");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_tgt");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_winner");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_port_id");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_remote_port_id");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
+ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
+
+ if (ocs_sport_lock_try(sport) == TRUE) {
+
+ /* If we get here, then we are holding the sport lock */
+ ocs_list_foreach(&sport->node_list, node) {
+ if ((node->mgmt_functions) && (node->mgmt_functions->get_list_handler)) {
+ node->mgmt_functions->get_list_handler(textbuf, node);
+ }
+
+ }
+ ocs_sport_unlock(sport);
+ }
+
+ ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
+}
+
+int
+ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
+{
+ ocs_node_t *node;
+ ocs_sport_t *sport = (ocs_sport_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
+
+ snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+ char *unqualified_name = name + strlen(qualifier) +1;
+
+ /* See if it's a value I can supply */
+ if (ocs_strcmp(unqualified_name, "indicator") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "index") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "display_name") == 0) {
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "is_vport") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport", sport->is_vport);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "enable_ini") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini", sport->enable_ini);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "enable_tgt") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt", sport->enable_tgt);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "p2p_winner") == 0) {
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner", sport->p2p_winner);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "p2p_port_id") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "p2p_remote_port_id") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx", (unsigned long long)sport->wwpn);
+ retval = 0;
+ } else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
+ retval = 0;
+ } else {
+ /* If I didn't know the value of this status pass the request to each of my children */
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, node) {
+ if ((node->mgmt_functions) && (node->mgmt_functions->get_handler)) {
+ retval = node->mgmt_functions->get_handler(textbuf, qualifier, name, node);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+ }
+ ocs_sport_unlock(sport);
+ }
+ }
+
+ ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
+
+ return retval;
+}
+
+void
+ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *object)
+{
+ ocs_node_t *node;
+ ocs_sport_t *sport = (ocs_sport_t *)object;
+
+ ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
+
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
+ ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport", sport->is_vport);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini", sport->enable_ini);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt", sport->enable_tgt);
+ ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner", sport->p2p_winner);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx" , (unsigned long long)sport->wwpn);
+ ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
+
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, node) {
+ if ((node->mgmt_functions) && (node->mgmt_functions->get_all_handler)) {
+ node->mgmt_functions->get_all_handler(textbuf, node);
+ }
+ }
+ ocs_sport_unlock(sport);
+
+ ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
+}
+
+int
+ocs_mgmt_sport_set(char *parent, char *name, char *value, void *object)
+{
+ ocs_node_t *node;
+ ocs_sport_t *sport = (ocs_sport_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
+ /* The sport has no settable values. Pass the request to each node. */
+
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, node) {
+ if ((node->mgmt_functions) && (node->mgmt_functions->set_handler)) {
+ retval = node->mgmt_functions->set_handler(qualifier, name, value, node);
+ }
+ if (retval == 0) {
+ break;
+ }
+ }
+ ocs_sport_unlock(sport);
+ }
+
+ return retval;
+}
+
+
+int
+ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
+ void *arg_out, uint32_t arg_out_length, void *object)
+{
+ ocs_node_t *node;
+ ocs_sport_t *sport = (ocs_sport_t *)object;
+ char qualifier[80];
+ int retval = -1;
+
+ snprintf(qualifier, sizeof(qualifier), "%s.sport%d", parent, sport->instance_index);
+
+ /* If it doesn't start with my qualifier I don't know what to do with it */
+ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
+
+ /* See if it's an action I can perform */
+
+ /* if (ocs_strcmp ....
+ * {
+ * } else
+ */
+
+ {
+ /* If I didn't know how to do this action pass the request to each of my children */
+ ocs_sport_lock(sport);
+ ocs_list_foreach(&sport->node_list, node) {
+ if ((node->mgmt_functions) && (node->mgmt_functions->exec_handler)) {
+ retval = node->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
+ arg_out, arg_out_length, node);
+ }
+
+ if (retval == 0) {
+ break;
+ }
+
+ }
+ ocs_sport_unlock(sport);
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * @brief Save the virtual port's parameters.
+ *
+ * @par Description
+ * The information required to restore a virtual port is saved.
+ *
+ * @param sport Pointer to the sport context.
+ *
+ * @return None.
+ */
+
+static void
+ocs_vport_update_spec(ocs_sport_t *sport)
+{
+ ocs_t *ocs = sport->ocs;
+ ocs_xport_t *xport = ocs->xport;
+ ocs_vport_spec_t *vport;
+
+ ocs_device_lock(ocs);
+ ocs_list_foreach(&xport->vport_list, vport) {
+ if (vport->sport == sport) {
+ vport->wwnn = sport->wwnn;
+ vport->wwpn = sport->wwpn;
+ vport->tgt_data = sport->tgt_data;
+ vport->ini_data = sport->ini_data;
+ break;
+ }
+ }
+ ocs_device_unlock(ocs);
+}
+
+/**
+ * @brief Create a saved vport entry.
+ *
+ * A saved vport entry is added to the vport list, which is restored following
+ * a link up. This function is used to allow vports to be created the first time
+ * the link comes up without having to go through the ioctl() API.
+ *
+ * @param ocs Pointer to device context.
+ * @param wwnn World wide node name (may be zero for auto-select).
+ * @param wwpn World wide port name (may be zero for auto-select).
+ * @param fc_id Requested port ID (used in fabric emulation mode).
+ * @param enable_ini TRUE if vport is to be an initiator port.
+ * @param enable_tgt TRUE if vport is to be a target port.
+ * @param tgt_data Pointer to target specific data.
+ * @param ini_data Pointer to initiator specific data.
+ *
+ * @return None.
+ */
+
+int8_t
+ocs_vport_create_spec(ocs_t *ocs, uint64_t wwnn, uint64_t wwpn, uint32_t fc_id, uint32_t enable_ini, uint32_t enable_tgt, void *tgt_data, void *ini_data)
+{
+ ocs_xport_t *xport = ocs->xport;
+ ocs_vport_spec_t *vport;
+
+ /* walk the ocs_vport_list and return failure if a valid(vport with non zero WWPN and WWNN) vport entry
+ is already created */
+ ocs_list_foreach(&xport->vport_list, vport) {
+ if ((wwpn && (vport->wwpn == wwpn)) && (wwnn && (vport->wwnn == wwnn))) {
+ ocs_log_test(ocs, "Failed: VPORT %016llx %016llx already allocated\n",
+ (unsigned long long)wwnn, (unsigned long long)wwpn);
+ return -1;
+ }
+ }
+
+ vport = ocs_malloc(ocs, sizeof(*vport), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (vport == NULL) {
+ ocs_log_err(ocs, "ocs_malloc failed\n");
+ return -1;
+ }
+
+ vport->wwnn = wwnn;
+ vport->wwpn = wwpn;
+ vport->fc_id = fc_id;
+ vport->domain_instance = 0; /*TODO: may need to change this */
+ vport->enable_tgt = enable_tgt;
+ vport->enable_ini = enable_ini;
+ vport->tgt_data = tgt_data;
+ vport->ini_data = ini_data;
+
+ ocs_device_lock(ocs);
+ ocs_list_add_tail(&xport->vport_list, vport);
+ ocs_device_unlock(ocs);
+ return 0;
+}
+
+/* node group api */
+
+/**
+ * @brief Perform the AND operation on source vectors.
+ *
+ * @par Description
+ * Performs an AND operation on the 8-bit values in source vectors @c b and @c c.
+ * The resulting value is stored in @c a.
+ *
+ * @param a Destination-byte vector.
+ * @param b Source-byte vector.
+ * @param c Source-byte vector.
+ * @param n Byte count.
+ *
+ * @return None.
+ */
+
+static void
+and8(uint8_t *a, uint8_t *b, uint8_t *c, uint32_t n)
+{
+ uint32_t i;
+
+ for (i = 0; i < n; i ++) {
+ *a = *b & *c;
+ a++;
+ b++;
+ c++;
+ }
+}
+
+/**
+ * @brief Service parameters mask data.
+ */
+static fc_sparms_t sparms_cmp_mask = {
+ 0, /*uint32_t command_code: 8, */
+ 0, /* resv1: 24; */
+ {~0, ~0, ~0, ~0}, /* uint32_t common_service_parameters[4]; */
+ 0, /* uint32_t port_name_hi; */
+ 0, /* uint32_t port_name_lo; */
+ 0, /* uint32_t node_name_hi; */
+ 0, /* uint32_t node_name_lo; */
+ {~0, ~0, ~0, ~0}, /* uint32_t class1_service_parameters[4]; */
+ {~0, ~0, ~0, ~0}, /* uint32_t class2_service_parameters[4]; */
+ {~0, ~0, ~0, ~0}, /* uint32_t class3_service_parameters[4]; */
+ {~0, ~0, ~0, ~0}, /* uint32_t class4_service_parameters[4]; */
+ {~0, ~0, ~0, ~0}}; /* uint32_t vendor_version_level[4]; */
+
+/**
+ * @brief Compare service parameters.
+ *
+ * @par Description
+ * Returns 0 if the two service parameters are the same, excluding the port/node name
+ * elements.
+ *
+ * @param sp1 Pointer to service parameters 1.
+ * @param sp2 Pointer to service parameters 2.
+ *
+ * @return Returns 0 if parameters match; otherwise, returns a positive or negative value,
+ * depending on the arithmetic magnitude of the first mismatching byte.
+ */
+
+int
+ocs_sparm_cmp(uint8_t *sp1, uint8_t *sp2)
+{
+ int i;
+ int v;
+ uint8_t *sp3 = (uint8_t*) &sparms_cmp_mask;
+
+ for (i = 0; i < OCS_SERVICE_PARMS_LENGTH; i ++) {
+ v = ((int)(sp1[i] & sp3[i])) - ((int)(sp2[i] & sp3[i]));
+ if (v) {
+ break;
+ }
+ }
+ return v;
+}
+
+/**
+ * @brief Allocate a node group directory entry.
+ *
+ * @par Description
+ * A node group directory entry is allocated, initialized, and added to the sport's
+ * node group directory list.
+ *
+ * @param sport Pointer to the sport object.
+ * @param sparms Pointer to the service parameters.
+ *
+ * @return Returns a pointer to the allocated ocs_node_group_dir_t; or NULL.
+ */
+
+ocs_node_group_dir_t *
+ocs_node_group_dir_alloc(ocs_sport_t *sport, uint8_t *sparms)
+{
+ ocs_node_group_dir_t *node_group_dir;
+
+ node_group_dir = ocs_malloc(sport->ocs, sizeof(*node_group_dir), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (node_group_dir != NULL) {
+ node_group_dir->sport = sport;
+
+ ocs_lock(&sport->node_group_lock);
+ node_group_dir->instance_index = sport->node_group_dir_next_instance++;
+ and8(node_group_dir->service_params, sparms, (uint8_t*)&sparms_cmp_mask, OCS_SERVICE_PARMS_LENGTH);
+ ocs_list_init(&node_group_dir->node_group_list, ocs_remote_node_group_t, link);
+
+ node_group_dir->node_group_list_count = 0;
+ node_group_dir->next_idx = 0;
+ ocs_list_add_tail(&sport->node_group_dir_list, node_group_dir);
+ ocs_unlock(&sport->node_group_lock);
+
+ ocs_log_debug(sport->ocs, "[%s] [%d] allocating node group directory\n", sport->display_name,
+ node_group_dir->instance_index);
+ }
+ return node_group_dir;
+}
+
+/**
+ * @brief Free a node group directory entry.
+ *
+ * @par Description
+ * The node group directory entry @c node_group_dir is removed
+ * from the sport's node group directory list and freed.
+ *
+ * @param node_group_dir Pointer to the node group directory entry.
+ *
+ * @return None.
+ */
+
+void
+ocs_node_group_dir_free(ocs_node_group_dir_t *node_group_dir)
+{
+ ocs_sport_t *sport;
+ if (node_group_dir != NULL) {
+ sport = node_group_dir->sport;
+ ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group directory\n", sport->display_name,
+ node_group_dir->instance_index);
+ ocs_lock(&sport->node_group_lock);
+ if (!ocs_list_empty(&node_group_dir->node_group_list)) {
+ ocs_log_test(sport->ocs, "[%s] WARNING: node group list not empty\n", sport->display_name);
+ }
+ ocs_list_remove(&sport->node_group_dir_list, node_group_dir);
+ ocs_unlock(&sport->node_group_lock);
+ ocs_free(sport->ocs, node_group_dir, sizeof(*node_group_dir));
+ }
+}
+
+/**
+ * @brief Find a matching node group directory entry.
+ *
+ * @par Description
+ * The sport's node group directory list is searched for a matching set of
+ * service parameters. The first matching entry is returned; otherwise
+ * NULL is returned.
+ *
+ * @param sport Pointer to the sport object.
+ * @param sparms Pointer to the sparams to match.
+ *
+ * @return Returns a pointer to the first matching entry found; or NULL.
+ */
+
+ocs_node_group_dir_t *
+ocs_node_group_dir_find(ocs_sport_t *sport, uint8_t *sparms)
+{
+ ocs_node_group_dir_t *node_dir = NULL;
+
+ ocs_lock(&sport->node_group_lock);
+ ocs_list_foreach(&sport->node_group_dir_list, node_dir) {
+ if (ocs_sparm_cmp(sparms, node_dir->service_params) == 0) {
+ ocs_unlock(&sport->node_group_lock);
+ return node_dir;
+ }
+ }
+ ocs_unlock(&sport->node_group_lock);
+ return NULL;
+}
+
+/**
+ * @brief Allocate a remote node group object.
+ *
+ * @par Description
+ * A remote node group object is allocated, initialized, and placed on the node group
+ * list of @c node_group_dir. The HW remote node group @b alloc function is called.
+ *
+ * @param node_group_dir Pointer to the node group directory.
+ *
+ * @return Returns a pointer to the allocated remote node group object; or NULL.
+ */
+
+ocs_remote_node_group_t *
+ocs_remote_node_group_alloc(ocs_node_group_dir_t *node_group_dir)
+{
+ ocs_t *ocs;
+ ocs_sport_t *sport;
+ ocs_remote_node_group_t *node_group;
+ ocs_hw_rtn_e hrc;
+
+ ocs_assert(node_group_dir, NULL);
+ ocs_assert(node_group_dir->sport, NULL);
+ ocs_assert(node_group_dir->sport->ocs, NULL);
+
+ sport = node_group_dir->sport;
+ ocs = sport->ocs;
+
+
+ node_group = ocs_malloc(ocs, sizeof(*node_group), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (node_group != NULL) {
+
+ /* set pointer to node group directory */
+ node_group->node_group_dir = node_group_dir;
+
+ ocs_lock(&node_group_dir->sport->node_group_lock);
+ node_group->instance_index = sport->node_group_next_instance++;
+ ocs_unlock(&node_group_dir->sport->node_group_lock);
+
+ /* invoke HW node group inialization */
+ hrc = ocs_hw_node_group_alloc(&ocs->hw, node_group);
+ if (hrc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "ocs_hw_node_group_alloc() failed: %d\n", hrc);
+ ocs_free(ocs, node_group, sizeof(*node_group));
+ return NULL;
+ }
+
+ ocs_log_debug(ocs, "[%s] [%d] indicator x%03x allocating node group\n", sport->display_name,
+ node_group->indicator, node_group->instance_index);
+
+ /* add to the node group directory entry node group list */
+ ocs_lock(&node_group_dir->sport->node_group_lock);
+ ocs_list_add_tail(&node_group_dir->node_group_list, node_group);
+ node_group_dir->node_group_list_count ++;
+ ocs_unlock(&node_group_dir->sport->node_group_lock);
+ }
+ return node_group;
+}
+
+/**
+ * @brief Free a remote node group object.
+ *
+ * @par Description
+ * The remote node group object @c node_group is removed from its
+ * node group directory entry and freed.
+ *
+ * @param node_group Pointer to the remote node group object.
+ *
+ * @return None.
+ */
+
+void
+ocs_remote_node_group_free(ocs_remote_node_group_t *node_group)
+{
+ ocs_sport_t *sport;
+ ocs_node_group_dir_t *node_group_dir;
+
+ if (node_group != NULL) {
+
+ ocs_assert(node_group->node_group_dir);
+ ocs_assert(node_group->node_group_dir->sport);
+ ocs_assert(node_group->node_group_dir->sport->ocs);
+
+ node_group_dir = node_group->node_group_dir;
+ sport = node_group_dir->sport;
+
+ ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group\n", sport->display_name, node_group->instance_index);
+
+ /* Remove from node group directory node group list */
+ ocs_lock(&sport->node_group_lock);
+ ocs_list_remove(&node_group_dir->node_group_list, node_group);
+ node_group_dir->node_group_list_count --;
+ /* TODO: note that we're going to have the node_group_dir entry persist forever ... we could delete it if
+ * the group_list_count goes to zero (or the linked list is empty */
+ ocs_unlock(&sport->node_group_lock);
+ ocs_free(sport->ocs, node_group, sizeof(*node_group));
+ }
+}
+
+/**
+ * @brief Initialize a node for high login mode.
+ *
+ * @par Description
+ * The @c node is initialized for high login mode. The following steps are performed:
+ * 1. The sports node group directory is searched for a matching set of service parameters.
+ * 2. If a matching set is not found, a node group directory entry is allocated.
+ * 3. If less than the @c hlm_group_size number of remote node group objects is present in the
+ * node group directory, a new remote node group object is allocated and added to the list.
+ * 4. A remote node group object is selected, and the node is attached to the node group.
+ *
+ * @param node Pointer to the node.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int
+ocs_node_group_init(ocs_node_t *node)
+{
+ ocs_t *ocs;
+ ocs_sport_t *sport;
+ ocs_node_group_dir_t *node_group_dir;
+ ocs_remote_node_group_t *node_group;
+ ocs_hw_rtn_e hrc;
+
+ ocs_assert(node, -1);
+ ocs_assert(node->sport, -1);
+ ocs_assert(node->ocs, -1);
+
+ ocs = node->ocs;
+ sport = node->sport;
+
+ ocs_assert(ocs->enable_hlm, -1);
+
+ /* see if there's a node group directory allocated for this service parameter set */
+ node_group_dir = ocs_node_group_dir_find(sport, node->service_params);
+ if (node_group_dir == NULL) {
+ /* not found, so allocate one */
+ node_group_dir = ocs_node_group_dir_alloc(sport, node->service_params);
+ if (node_group_dir == NULL) {
+ /* node group directory allocation failed ... can't continue, however,
+ * the node will be allocated with a normal (not shared) RPI
+ */
+ ocs_log_err(ocs, "ocs_node_group_dir_alloc() failed\n");
+ return -1;
+ }
+ }
+
+ /* check to see if we've allocated hlm_group_size's worth of node group structures for this
+ * directory entry, if not, then allocate and use a new one, otherwise pick the next one.
+ */
+ ocs_lock(&node->sport->node_group_lock);
+ if (node_group_dir->node_group_list_count < ocs->hlm_group_size) {
+ ocs_unlock(&node->sport->node_group_lock);
+ node_group = ocs_remote_node_group_alloc(node_group_dir);
+ if (node_group == NULL) {
+ ocs_log_err(ocs, "ocs_remote_node_group_alloc() failed\n");
+ return -1;
+ }
+ ocs_lock(&node->sport->node_group_lock);
+ } else {
+ uint32_t idx = 0;
+
+ ocs_list_foreach(&node_group_dir->node_group_list, node_group) {
+ if (idx >= ocs->hlm_group_size) {
+ ocs_log_err(node->ocs, "assertion failed: idx >= ocs->hlm_group_size\n");
+ ocs_unlock(&node->sport->node_group_lock);
+ return -1;
+ }
+
+ if (idx == node_group_dir->next_idx) {
+ break;
+ }
+ idx ++;
+ }
+ if (idx == ocs->hlm_group_size) {
+ node_group = ocs_list_get_head(&node_group_dir->node_group_list);
+ }
+ if (++node_group_dir->next_idx >= node_group_dir->node_group_list_count) {
+ node_group_dir->next_idx = 0;
+ }
+ }
+ ocs_unlock(&node->sport->node_group_lock);
+
+ /* Initialize a pointer in the node back to the node group */
+ node->node_group = node_group;
+
+ /* Join this node into the group */
+ hrc = ocs_hw_node_group_attach(&ocs->hw, node_group, &node->rnode);
+
+ return (hrc == OCS_HW_RTN_SUCCESS) ? 0 : -1;
+}
+
+
diff --git a/sys/dev/ocs_fc/ocs_sport.h b/sys/dev/ocs_fc/ocs_sport.h
new file mode 100644
index 000000000000..88bc5f5d949a
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_sport.h
@@ -0,0 +1,113 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS FC SLI port (SPORT) exported declarations
+ *
+ */
+
+#if !defined(__OCS_SPORT_H__)
+#define __OCS_SPORT_H__
+
+extern int32_t ocs_port_cb(void *arg, ocs_hw_port_event_e event, void *data);
+extern ocs_sport_t *ocs_sport_alloc(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn, uint32_t fc_id,
+ uint8_t enable_ini, uint8_t enable_tgt);
+extern void ocs_sport_free(ocs_sport_t *sport);
+extern void ocs_sport_force_free(ocs_sport_t *sport);
+
+static inline void
+ocs_sport_lock_init(ocs_sport_t *sport)
+{
+}
+
+static inline void
+ocs_sport_lock_free(ocs_sport_t *sport)
+{
+}
+
+static inline int32_t
+ocs_sport_lock_try(ocs_sport_t *sport)
+{
+ /* Use the device wide lock */
+ return ocs_device_lock_try(sport->ocs);
+}
+
+static inline void
+ocs_sport_lock(ocs_sport_t *sport)
+{
+ /* Use the device wide lock */
+ ocs_device_lock(sport->ocs);
+}
+
+static inline void
+ocs_sport_unlock(ocs_sport_t *sport)
+{
+ /* Use the device wide lock */
+ ocs_device_unlock(sport->ocs);
+}
+
+extern ocs_sport_t *ocs_sport_find(ocs_domain_t *domain, uint32_t d_id);
+extern ocs_sport_t *ocs_sport_find_wwn(ocs_domain_t *domain, uint64_t wwnn, uint64_t wwpn);
+extern int32_t ocs_sport_attach(ocs_sport_t *sport, uint32_t fc_id);
+
+extern void *__ocs_sport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_sport_wait_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_sport_wait_port_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_sport_vport_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_sport_vport_wait_alloc(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_sport_vport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+extern void *__ocs_sport_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
+
+extern int32_t ocs_vport_start(ocs_domain_t *domain);
+extern int32_t ocs_sport_vport_new(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn, uint32_t fc_id, uint8_t ini, uint8_t tgt, void *tgt_data, void *ini_data, uint8_t restore_vport);
+extern int32_t ocs_sport_vport_alloc(ocs_domain_t *domain, ocs_vport_spec_t *vport);
+extern int32_t ocs_sport_vport_del(ocs_t *ocs, ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn);
+extern void ocs_vport_del_all(ocs_t *ocs);
+extern int8_t ocs_vport_create_spec(ocs_t *ocs, uint64_t wwnn, uint64_t wwpn, uint32_t fc_id, uint32_t enable_ini, uint32_t enable_tgt, void *tgt_data, void *ini_data);
+
+extern int ocs_ddump_sport(ocs_textbuf_t *textbuf, ocs_sport_t *sport);
+
+/* Node group API */
+extern int ocs_sparm_cmp(uint8_t *sparms1, uint8_t *sparms2);
+extern ocs_node_group_dir_t *ocs_node_group_dir_alloc(ocs_sport_t *sport, uint8_t *sparms);
+extern void ocs_node_group_dir_free(ocs_node_group_dir_t *node_group_dir);
+extern ocs_node_group_dir_t *ocs_node_group_dir_find(ocs_sport_t *sport, uint8_t *sparms);
+extern ocs_remote_node_group_t *ocs_remote_node_group_alloc(ocs_node_group_dir_t *node_group_dir);
+extern void ocs_remote_node_group_free(ocs_remote_node_group_t *node_group);
+extern int ocs_node_group_init(ocs_node_t *node);
+extern void ocs_node_group_free(ocs_node_t *node);
+
+
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_stats.h b/sys/dev/ocs_fc/ocs_stats.h
new file mode 100644
index 000000000000..a7f324e099cc
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_stats.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ */
+
+
+#if !defined(__OCS_STATS_H__)
+#define __OCS_STATS_H__
+
+#define OCS_STAT_ENABLE 0
+#if OCS_STAT_ENABLE
+ #define OCS_STAT(x) x
+#else
+ #define OCS_STAT(x)
+#endif
+#endif
diff --git a/sys/dev/ocs_fc/ocs_unsol.c b/sys/dev/ocs_fc/ocs_unsol.c
new file mode 100644
index 000000000000..8c2ab2ad880d
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_unsol.c
@@ -0,0 +1,1399 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Code to handle unsolicited received FC frames.
+ */
+
+/*!
+ * @defgroup unsol Unsolicited Frame Handling
+ */
+
+#include "ocs.h"
+#include "ocs_els.h"
+#include "ocs_fabric.h"
+#include "ocs_device.h"
+
+
+#define frame_printf(ocs, hdr, fmt, ...) \
+ do { \
+ char s_id_text[16]; \
+ ocs_node_fcid_display(fc_be24toh((hdr)->s_id), s_id_text, sizeof(s_id_text)); \
+ ocs_log_debug(ocs, "[%06x.%s] %02x/%04x/%04x: " fmt, fc_be24toh((hdr)->d_id), s_id_text, \
+ (hdr)->r_ctl, ocs_be16toh((hdr)->ox_id), ocs_be16toh((hdr)->rx_id), ##__VA_ARGS__); \
+ } while(0)
+
+static int32_t ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq);
+static int32_t ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq);
+static int32_t ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq);
+static int32_t ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq);
+static int32_t ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
+static int32_t ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
+static int32_t ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg);
+static ocs_hw_sequence_t *ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock);
+static uint8_t ocs_node_frames_held(void *arg);
+static uint8_t ocs_domain_frames_held(void *arg);
+static int32_t ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock);
+static int32_t ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq);
+
+#define OCS_MAX_FRAMES_BEFORE_YEILDING 10000
+
+/**
+ * @brief Process the RQ circular buffer and process the incoming frames.
+ *
+ * @param mythread Pointer to thread object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+ocs_unsol_rq_thread(ocs_thread_t *mythread)
+{
+ ocs_xport_rq_thread_info_t *thread_data = mythread->arg;
+ ocs_t *ocs = thread_data->ocs;
+ ocs_hw_sequence_t *seq;
+ uint32_t yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
+
+ ocs_log_debug(ocs, "%s running\n", mythread->name);
+ while (!ocs_thread_terminate_requested(mythread)) {
+ seq = ocs_cbuf_get(thread_data->seq_cbuf, 100000);
+ if (seq == NULL) {
+ /* Prevent soft lockups by yielding the CPU */
+ ocs_thread_yield(&thread_data->thread);
+ yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
+ continue;
+ }
+ /* Note: Always returns 0 */
+ ocs_unsol_process((ocs_t*)seq->hw->os, seq);
+
+ /* We have to prevent CPU soft lockups, so just yield the CPU after x frames. */
+ if (--yield_count == 0) {
+ ocs_thread_yield(&thread_data->thread);
+ yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
+ }
+ }
+ ocs_log_debug(ocs, "%s exiting\n", mythread->name);
+ thread_data->thread_started = FALSE;
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Callback function when aborting a port owned XRI
+ * exchanges.
+ *
+ * @return Returns 0.
+ */
+static int32_t
+ocs_unsol_abort_cb (ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status, uint32_t ext, void *arg)
+{
+ ocs_t *ocs = arg;
+ ocs_assert(hio, -1);
+ ocs_assert(arg, -1);
+ ocs_log_debug(ocs, "xri=0x%x tag=0x%x\n", hio->indicator, hio->reqtag);
+ ocs_hw_io_free(&ocs->hw, hio);
+ return 0;
+}
+
+
+/**
+ * @ingroup unsol
+ * @brief Abort either a RQ Pair auto XFER RDY XRI.
+ * @return Returns None.
+ */
+static void
+ocs_port_owned_abort(ocs_t *ocs, ocs_hw_io_t *hio)
+{
+ ocs_hw_rtn_e hw_rc;
+ hw_rc = ocs_hw_io_abort(&ocs->hw, hio, FALSE,
+ ocs_unsol_abort_cb, ocs);
+ if((hw_rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) ||
+ (hw_rc == OCS_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED)) {
+ ocs_log_debug(ocs, "already aborted XRI 0x%x\n", hio->indicator);
+ } else if(hw_rc != OCS_HW_RTN_SUCCESS) {
+ ocs_log_debug(ocs, "Error aborting XRI 0x%x status %d\n",
+ hio->indicator, hw_rc);
+ }
+}
+
+/**
+ * @ingroup unsol
+ * @brief Handle unsolicited FC frames.
+ *
+ * <h3 class="desc">Description</h3>
+ * This function is called from the HW with unsolicited FC frames (FCP, ELS, BLS, etc.).
+ *
+ * @param arg Application-specified callback data.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+int32_t
+ocs_unsolicited_cb(void *arg, ocs_hw_sequence_t *seq)
+{
+ ocs_t *ocs = arg;
+ ocs_xport_t *xport = ocs->xport;
+ int32_t rc;
+
+ CPUTRACE("");
+
+ if (ocs->rq_threads == 0) {
+ rc = ocs_unsol_process(ocs, seq);
+ } else {
+ /* use the ox_id to dispatch this IO to a thread */
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint32_t ox_id = ocs_be16toh(hdr->ox_id);
+ uint32_t thr_index = ox_id % ocs->rq_threads;
+
+ rc = ocs_cbuf_put(xport->rq_thread_info[thr_index].seq_cbuf, seq);
+ }
+
+ if (rc) {
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Handle unsolicited FC frames.
+ *
+ * <h3 class="desc">Description</h3>
+ * This function is called either from ocs_unsolicited_cb() or ocs_unsol_rq_thread().
+ *
+ * @param ocs Pointer to the ocs structure.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+static int32_t
+ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq)
+{
+ ocs_xport_fcfi_t *xport_fcfi = NULL;
+ ocs_domain_t *domain;
+ uint8_t seq_fcfi = seq->fcfi;
+
+ /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
+ if (ocs->hw.workaround.override_fcfi) {
+ if (ocs->hw.first_domain_idx > -1) {
+ seq_fcfi = ocs->hw.first_domain_idx;
+ }
+ }
+
+ /* Range check seq->fcfi */
+ if (seq_fcfi < ARRAY_SIZE(ocs->xport->fcfi)) {
+ xport_fcfi = &ocs->xport->fcfi[seq_fcfi];
+ }
+
+ /* If the transport FCFI entry is NULL, then drop the frame */
+ if (xport_fcfi == NULL) {
+ ocs_log_test(ocs, "FCFI %d is not valid, dropping frame\n", seq->fcfi);
+ if (seq->hio != NULL) {
+ ocs_port_owned_abort(ocs, seq->hio);
+ }
+
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+ }
+ domain = ocs_hw_domain_get(&ocs->hw, seq_fcfi);
+
+ /*
+ * If we are holding frames or the domain is not yet registered or
+ * there's already frames on the pending list,
+ * then add the new frame to pending list
+ */
+ if (domain == NULL ||
+ xport_fcfi->hold_frames ||
+ !ocs_list_empty(&xport_fcfi->pend_frames)) {
+ ocs_lock(&xport_fcfi->pend_frames_lock);
+ ocs_list_add_tail(&xport_fcfi->pend_frames, seq);
+ ocs_unlock(&xport_fcfi->pend_frames_lock);
+
+ if (domain != NULL) {
+ /* immediately process pending frames */
+ ocs_domain_process_pending(domain);
+ }
+ } else {
+ /*
+ * We are not holding frames and pending list is empty, just process frame.
+ * A non-zero return means the frame was not handled - so cleanup
+ */
+ if (ocs_domain_dispatch_frame(domain, seq)) {
+ if (seq->hio != NULL) {
+ ocs_port_owned_abort(ocs, seq->hio);
+ }
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ }
+ }
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Process pending frames queued to the given node.
+ *
+ * <h3 class="desc">Description</h3>
+ * Frames that are queued for the \c node are dispatched and returned
+ * to the RQ.
+ *
+ * @param node Node of the queued frames that are to be dispatched.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int32_t
+ocs_process_node_pending(ocs_node_t *node)
+{
+ ocs_t *ocs = node->ocs;
+ ocs_hw_sequence_t *seq = NULL;
+ uint32_t pend_frames_processed = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame processed
+ * because any given frame could cause a transition to a state that
+ * holds frames
+ */
+ if (ocs_node_frames_held(node)) {
+ break;
+ }
+
+ /* Get next frame/sequence */
+ ocs_lock(&node->pend_frames_lock);
+ seq = ocs_list_remove_head(&node->pend_frames);
+ if (seq == NULL) {
+ pend_frames_processed = node->pend_frames_processed;
+ node->pend_frames_processed = 0;
+ ocs_unlock(&node->pend_frames_lock);
+ break;
+ }
+ node->pend_frames_processed++;
+ ocs_unlock(&node->pend_frames_lock);
+
+ /* now dispatch frame(s) to dispatch function */
+ if (ocs_node_dispatch_frame(node, seq)) {
+ if (seq->hio != NULL) {
+ ocs_port_owned_abort(ocs, seq->hio);
+ }
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ }
+ }
+
+ if (pend_frames_processed != 0) {
+ ocs_log_debug(ocs, "%u node frames held and processed\n", pend_frames_processed);
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Process pending frames queued to the given domain.
+ *
+ * <h3 class="desc">Description</h3>
+ * Frames that are queued for the \c domain are dispatched and
+ * returned to the RQ.
+ *
+ * @param domain Domain of the queued frames that are to be
+ * dispatched.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int32_t
+ocs_domain_process_pending(ocs_domain_t *domain)
+{
+ ocs_t *ocs = domain->ocs;
+ ocs_xport_fcfi_t *xport_fcfi;
+ ocs_hw_sequence_t *seq = NULL;
+ uint32_t pend_frames_processed = 0;
+
+ ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
+ xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame processed
+ * because any given frame could cause a transition to a state that
+ * holds frames
+ */
+ if (ocs_domain_frames_held(domain)) {
+ break;
+ }
+
+ /* Get next frame/sequence */
+ ocs_lock(&xport_fcfi->pend_frames_lock);
+ seq = ocs_list_remove_head(&xport_fcfi->pend_frames);
+ if (seq == NULL) {
+ pend_frames_processed = xport_fcfi->pend_frames_processed;
+ xport_fcfi->pend_frames_processed = 0;
+ ocs_unlock(&xport_fcfi->pend_frames_lock);
+ break;
+ }
+ xport_fcfi->pend_frames_processed++;
+ ocs_unlock(&xport_fcfi->pend_frames_lock);
+
+ /* now dispatch frame(s) to dispatch function */
+ if (ocs_domain_dispatch_frame(domain, seq)) {
+ if (seq->hio != NULL) {
+ ocs_port_owned_abort(ocs, seq->hio);
+ }
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ }
+ }
+ if (pend_frames_processed != 0) {
+ ocs_log_debug(ocs, "%u domain frames held and processed\n", pend_frames_processed);
+ }
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Purge given pending list
+ *
+ * <h3 class="desc">Description</h3>
+ * Frames that are queued on the given pending list are
+ * discarded and returned to the RQ.
+ *
+ * @param ocs Pointer to ocs object.
+ * @param pend_list Pending list to be purged.
+ * @param list_lock Lock that protects pending list.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+static int32_t
+ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock)
+{
+ ocs_hw_sequence_t *frame;
+
+ for (;;) {
+ frame = ocs_frame_next(pend_list, list_lock);
+ if (frame == NULL) {
+ break;
+ }
+
+ frame_printf(ocs, (fc_header_t*) frame->header->dma.virt, "Discarding held frame\n");
+ if (frame->hio != NULL) {
+ ocs_port_owned_abort(ocs, frame->hio);
+ }
+ ocs_hw_sequence_free(&ocs->hw, frame);
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Purge node's pending (queued) frames.
+ *
+ * <h3 class="desc">Description</h3>
+ * Frames that are queued for the \c node are discarded and returned
+ * to the RQ.
+ *
+ * @param node Node of the queued frames that are to be discarded.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+int32_t
+ocs_node_purge_pending(ocs_node_t *node)
+{
+ return ocs_purge_pending(node->ocs, &node->pend_frames, &node->pend_frames_lock);
+}
+
+/**
+ * @ingroup unsol
+ * @brief Purge xport's pending (queued) frames.
+ *
+ * <h3 class="desc">Description</h3>
+ * Frames that are queued for the \c xport are discarded and
+ * returned to the RQ.
+ *
+ * @param domain Pointer to domain object.
+ *
+ * @return Returns 0 on success; or a negative error value on failure.
+ */
+
+int32_t
+ocs_domain_purge_pending(ocs_domain_t *domain)
+{
+ ocs_t *ocs = domain->ocs;
+ ocs_xport_fcfi_t *xport_fcfi;
+
+ ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
+ xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
+ return ocs_purge_pending(domain->ocs,
+ &xport_fcfi->pend_frames,
+ &xport_fcfi->pend_frames_lock);
+}
+
+/**
+ * @ingroup unsol
+ * @brief Check if node's pending frames are held.
+ *
+ * @param arg Node for which the pending frame hold condition is
+ * checked.
+ *
+ * @return Returns 1 if node is holding pending frames, or 0
+ * if not.
+ */
+
+static uint8_t
+ocs_node_frames_held(void *arg)
+{
+ ocs_node_t *node = (ocs_node_t *)arg;
+ return node->hold_frames;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Check if domain's pending frames are held.
+ *
+ * @param arg Domain for which the pending frame hold condition is
+ * checked.
+ *
+ * @return Returns 1 if domain is holding pending frames, or 0
+ * if not.
+ */
+
+static uint8_t
+ocs_domain_frames_held(void *arg)
+{
+ ocs_domain_t *domain = (ocs_domain_t *)arg;
+ ocs_t *ocs = domain->ocs;
+ ocs_xport_fcfi_t *xport_fcfi;
+
+ ocs_assert(domain != NULL, 1);
+ ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, 1);
+ xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
+ return xport_fcfi->hold_frames;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Globally (at xport level) hold unsolicited frames.
+ *
+ * <h3 class="desc">Description</h3>
+ * This function places a hold on processing unsolicited FC
+ * frames queued to the xport pending list.
+ *
+ * @param domain Pointer to domain object.
+ *
+ * @return Returns None.
+ */
+
+void
+ocs_domain_hold_frames(ocs_domain_t *domain)
+{
+ ocs_t *ocs = domain->ocs;
+ ocs_xport_fcfi_t *xport_fcfi;
+
+ ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
+ xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
+ if (!xport_fcfi->hold_frames) {
+ ocs_log_debug(domain->ocs, "hold frames set for FCFI %d\n",
+ domain->fcf_indicator);
+ xport_fcfi->hold_frames = 1;
+ }
+}
+
+/**
+ * @ingroup unsol
+ * @brief Clear hold on unsolicited frames.
+ *
+ * <h3 class="desc">Description</h3>
+ * This function clears the hold on processing unsolicited FC
+ * frames queued to the domain pending list.
+ *
+ * @param domain Pointer to domain object.
+ *
+ * @return Returns None.
+ */
+
+void
+ocs_domain_accept_frames(ocs_domain_t *domain)
+{
+ ocs_t *ocs = domain->ocs;
+ ocs_xport_fcfi_t *xport_fcfi;
+
+ ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
+ xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
+ if (xport_fcfi->hold_frames == 1) {
+ ocs_log_debug(domain->ocs, "hold frames cleared for FCFI %d\n",
+ domain->fcf_indicator);
+ }
+ xport_fcfi->hold_frames = 0;
+ ocs_domain_process_pending(domain);
+}
+
+
+/**
+ * @ingroup unsol
+ * @brief Dispatch unsolicited FC frame.
+ *
+ * <h3 class="desc">Description</h3>
+ * This function processes an unsolicited FC frame queued at the
+ * domain level.
+ *
+ * @param arg Pointer to ocs object.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled.
+ */
+
+static __inline int32_t
+ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
+{
+ ocs_domain_t *domain = (ocs_domain_t *)arg;
+ ocs_t *ocs = domain->ocs;
+ fc_header_t *hdr;
+ uint32_t s_id;
+ uint32_t d_id;
+ ocs_node_t *node = NULL;
+ ocs_sport_t *sport = NULL;
+
+ ocs_assert(seq->header, -1);
+ ocs_assert(seq->header->dma.virt, -1);
+ ocs_assert(seq->payload->dma.virt, -1);
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = fc_be24toh(hdr->s_id);
+ d_id = fc_be24toh(hdr->d_id);
+
+ sport = domain->sport;
+ if (sport == NULL) {
+ frame_printf(ocs, hdr, "phy sport for FC ID 0x%06x is NULL, dropping frame\n", d_id);
+ return -1;
+ }
+
+ if (sport->fc_id != d_id) {
+ /* Not a physical port IO lookup sport associated with the npiv port */
+ sport = ocs_sport_find(domain, d_id); /* Look up without lock */
+ if (sport == NULL) {
+ if (hdr->type == FC_TYPE_FCP) {
+ /* Drop frame */
+ ocs_log_warn(ocs, "unsolicited FCP frame with invalid d_id x%x, dropping\n",
+ d_id);
+ return -1;
+ } else {
+ /* p2p will use this case */
+ sport = domain->sport;
+ }
+ }
+ }
+
+ /* Lookup the node given the remote s_id */
+ node = ocs_node_find(sport, s_id);
+
+ /* If not found, then create a new node */
+ if (node == NULL) {
+ /* If this is solicited data or control based on R_CTL and there is no node context,
+ * then we can drop the frame
+ */
+ if ((hdr->r_ctl == FC_RCTL_FC4_DATA) && (
+ (hdr->info == FC_RCTL_INFO_SOL_DATA) || (hdr->info == FC_RCTL_INFO_SOL_CTRL))) {
+ ocs_log_debug(ocs, "solicited data/ctrl frame without node, dropping\n");
+ return -1;
+ }
+ node = ocs_node_alloc(sport, s_id, FALSE, FALSE);
+ if (node == NULL) {
+ ocs_log_err(ocs, "ocs_node_alloc() failed\n");
+ return -1;
+ }
+ /* don't send PLOGI on ocs_d_init entry */
+ ocs_node_init_device(node, FALSE);
+ }
+
+ if (node->hold_frames || !ocs_list_empty((&node->pend_frames))) {
+ /* TODO: info log level
+ frame_printf(ocs, hdr, "Holding frame\n");
+ */
+ /* add frame to node's pending list */
+ ocs_lock(&node->pend_frames_lock);
+ ocs_list_add_tail(&node->pend_frames, seq);
+ ocs_unlock(&node->pend_frames_lock);
+
+ return 0;
+ }
+
+ /* now dispatch frame to the node frame handler */
+ return ocs_node_dispatch_frame(node, seq);
+}
+
+/**
+ * @ingroup unsol
+ * @brief Dispatch a frame.
+ *
+ * <h3 class="desc">Description</h3>
+ * A frame is dispatched from the \c node to the handler.
+ *
+ * @param arg Node that originated the frame.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled.
+ */
+static int32_t
+ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
+{
+
+ fc_header_t *hdr = seq->header->dma.virt;
+ uint32_t port_id;
+ ocs_node_t *node = (ocs_node_t *)arg;
+ int32_t rc = -1;
+ int32_t sit_set = 0;
+
+ port_id = fc_be24toh(hdr->s_id);
+ ocs_assert(port_id == node->rnode.fc_id, -1);
+
+ if (fc_be24toh(hdr->f_ctl) & FC_FCTL_END_SEQUENCE) {
+ /*if SIT is set */
+ if (fc_be24toh(hdr->f_ctl) & FC_FCTL_SEQUENCE_INITIATIVE) {
+ sit_set = 1;
+ }
+ switch (hdr->r_ctl) {
+ case FC_RCTL_ELS:
+ if (sit_set) {
+ rc = ocs_node_recv_els_frame(node, seq);
+ }
+ break;
+
+ case FC_RCTL_BLS:
+ if (sit_set) {
+ rc = ocs_node_recv_abts_frame(node, seq);
+ }else {
+ rc = ocs_node_recv_bls_no_sit(node, seq);
+ }
+ break;
+
+ case FC_RCTL_FC4_DATA:
+ switch(hdr->type) {
+ case FC_TYPE_FCP:
+ if (hdr->info == FC_RCTL_INFO_UNSOL_CMD) {
+ if (node->fcp_enabled) {
+ if (sit_set) {
+ rc = ocs_dispatch_fcp_cmd(node, seq);
+ }else {
+ /* send the auto xfer ready command */
+ rc = ocs_dispatch_fcp_cmd_auto_xfer_rdy(node, seq);
+ }
+ } else {
+ rc = ocs_node_recv_fcp_cmd(node, seq);
+ }
+ } else if (hdr->info == FC_RCTL_INFO_SOL_DATA) {
+ if (sit_set) {
+ rc = ocs_dispatch_fcp_data(node, seq);
+ }
+ }
+ break;
+ case FC_TYPE_GS:
+ if (sit_set) {
+ rc = ocs_node_recv_ct_frame(node, seq);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ } else {
+ node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
+ ocs_htobe32(((uint32_t *)hdr)[0]),
+ ocs_htobe32(((uint32_t *)hdr)[1]),
+ ocs_htobe32(((uint32_t *)hdr)[2]),
+ ocs_htobe32(((uint32_t *)hdr)[3]),
+ ocs_htobe32(((uint32_t *)hdr)[4]),
+ ocs_htobe32(((uint32_t *)hdr)[5]));
+ }
+ return rc;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Dispatch unsolicited FCP frames (RQ Pair).
+ *
+ * <h3 class="desc">Description</h3>
+ * Dispatch unsolicited FCP frames (called from the device node state machine).
+ *
+ * @param io Pointer to the IO context.
+ * @param task_management_flags Task management flags from the FCP_CMND frame.
+ * @param node Node that originated the frame.
+ * @param lun 32-bit LUN from FCP_CMND frame.
+ *
+ * @return Returns None.
+ */
+
+static void
+ocs_dispatch_unsolicited_tmf(ocs_io_t *io, uint8_t task_management_flags, ocs_node_t *node, uint64_t lun)
+{
+ uint32_t i;
+ struct {
+ uint32_t mask;
+ ocs_scsi_tmf_cmd_e cmd;
+ } tmflist[] = {
+ {FCP_QUERY_TASK_SET, OCS_SCSI_TMF_QUERY_TASK_SET},
+ {FCP_ABORT_TASK_SET, OCS_SCSI_TMF_ABORT_TASK_SET},
+ {FCP_CLEAR_TASK_SET, OCS_SCSI_TMF_CLEAR_TASK_SET},
+ {FCP_QUERY_ASYNCHRONOUS_EVENT, OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT},
+ {FCP_LOGICAL_UNIT_RESET, OCS_SCSI_TMF_LOGICAL_UNIT_RESET},
+ {FCP_TARGET_RESET, OCS_SCSI_TMF_TARGET_RESET},
+ {FCP_CLEAR_ACA, OCS_SCSI_TMF_CLEAR_ACA}};
+
+ io->exp_xfer_len = 0; /* BUG 32235 */
+
+ for (i = 0; i < ARRAY_SIZE(tmflist); i ++) {
+ if (tmflist[i].mask & task_management_flags) {
+ io->tmf_cmd = tmflist[i].cmd;
+ ocs_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(tmflist)) {
+ /* Not handled */
+ node_printf(node, "TMF x%x rejected\n", task_management_flags);
+ ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_REJECTED, NULL, ocs_fc_tmf_rejected_cb, NULL);
+ }
+}
+
+static int32_t
+ocs_validate_fcp_cmd(ocs_t *ocs, ocs_hw_sequence_t *seq)
+{
+ size_t exp_payload_len = 0;
+ fcp_cmnd_iu_t *cmnd = seq->payload->dma.virt;
+ exp_payload_len = sizeof(fcp_cmnd_iu_t) - 16 + cmnd->additional_fcp_cdb_length;
+
+ /*
+ * If we received less than FCP_CMND_IU bytes, assume that the frame is
+ * corrupted in some way and drop it. This was seen when jamming the FCTL
+ * fill bytes field.
+ */
+ if (seq->payload->dma.len < exp_payload_len) {
+ fc_header_t *fchdr = seq->header->dma.virt;
+ ocs_log_debug(ocs, "dropping ox_id %04x with payload length (%zd) less than expected (%zd)\n",
+ ocs_be16toh(fchdr->ox_id), seq->payload->dma.len,
+ exp_payload_len);
+ return -1;
+ }
+ return 0;
+
+}
+
+static void
+ocs_populate_io_fcp_cmd(ocs_io_t *io, fcp_cmnd_iu_t *cmnd, fc_header_t *fchdr, uint8_t sit)
+{
+ uint32_t *fcp_dl;
+ io->init_task_tag = ocs_be16toh(fchdr->ox_id);
+ /* note, tgt_task_tag, hw_tag set when HW io is allocated */
+ fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl));
+ fcp_dl += cmnd->additional_fcp_cdb_length;
+ io->exp_xfer_len = ocs_be32toh(*fcp_dl);
+ io->transferred = 0;
+
+ /* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
+ * Our assertion here is, the priority given to a frame containing
+ * the FCP cmd should be the priority given to ALL frames contained
+ * in that IO. Thus we need to save the incoming CS_CTL here.
+ */
+ if (fc_be24toh(fchdr->f_ctl) & FC_FCTL_PRIORITY_ENABLE) {
+ io->cs_ctl = fchdr->cs_ctl;
+ } else {
+ io->cs_ctl = 0;
+ }
+ io->seq_init = sit;
+}
+
+static uint32_t
+ocs_get_flags_fcp_cmd(fcp_cmnd_iu_t *cmnd)
+{
+ uint32_t flags = 0;
+ switch (cmnd->task_attribute) {
+ case FCP_TASK_ATTR_SIMPLE:
+ flags |= OCS_SCSI_CMD_SIMPLE;
+ break;
+ case FCP_TASK_ATTR_HEAD_OF_QUEUE:
+ flags |= OCS_SCSI_CMD_HEAD_OF_QUEUE;
+ break;
+ case FCP_TASK_ATTR_ORDERED:
+ flags |= OCS_SCSI_CMD_ORDERED;
+ break;
+ case FCP_TASK_ATTR_ACA:
+ flags |= OCS_SCSI_CMD_ACA;
+ break;
+ case FCP_TASK_ATTR_UNTAGGED:
+ flags |= OCS_SCSI_CMD_UNTAGGED;
+ break;
+ }
+ if (cmnd->wrdata)
+ flags |= OCS_SCSI_CMD_DIR_IN;
+ if (cmnd->rddata)
+ flags |= OCS_SCSI_CMD_DIR_OUT;
+
+ return flags;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Dispatch unsolicited FCP_CMND frame.
+ *
+ * <h3 class="desc">Description</h3>
+ * Dispatch unsolicited FCP_CMND frame. RQ Pair mode - always
+ * used for RQ Pair mode since first burst is not supported.
+ *
+ * @param node Node that originated the frame.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled and RX buffers need
+ * to be returned.
+ */
+static int32_t
+ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ ocs_t *ocs = node->ocs;
+ fc_header_t *fchdr = seq->header->dma.virt;
+ fcp_cmnd_iu_t *cmnd = NULL;
+ ocs_io_t *io = NULL;
+ fc_vm_header_t *vhdr;
+ uint8_t df_ctl;
+ uint64_t lun = UINT64_MAX;
+ int32_t rc = 0;
+
+ ocs_assert(seq->payload, -1);
+ cmnd = seq->payload->dma.virt;
+
+ /* perform FCP_CMND validation check(s) */
+ if (ocs_validate_fcp_cmd(ocs, seq)) {
+ return -1;
+ }
+
+ lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
+ if (lun == UINT64_MAX) {
+ return -1;
+ }
+
+ io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
+ if (io == NULL) {
+ uint32_t send_frame_capable;
+
+ /* If we have SEND_FRAME capability, then use it to send task set full or busy */
+ rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
+ if ((rc == 0) && send_frame_capable) {
+ rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
+ if (rc) {
+ ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
+ }
+ return rc;
+ }
+
+ ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
+ return -1;
+ }
+ io->hw_priv = seq->hw_priv;
+
+ /* Check if the CMD has vmheader. */
+ io->app_id = 0;
+ df_ctl = fchdr->df_ctl;
+ if (df_ctl & FC_DFCTL_DEVICE_HDR_16_MASK) {
+ uint32_t vmhdr_offset = 0;
+ /* Presence of VMID. Get the vm header offset. */
+ if (df_ctl & FC_DFCTL_ESP_HDR_MASK) {
+ vmhdr_offset += FC_DFCTL_ESP_HDR_SIZE;
+ ocs_log_err(ocs, "ESP Header present. Fix ESP Size.\n");
+ }
+
+ if (df_ctl & FC_DFCTL_NETWORK_HDR_MASK) {
+ vmhdr_offset += FC_DFCTL_NETWORK_HDR_SIZE;
+ }
+ vhdr = (fc_vm_header_t *) ((char *)fchdr + sizeof(fc_header_t) + vmhdr_offset);
+ io->app_id = ocs_be32toh(vhdr->src_vmid);
+ }
+
+ /* RQ pair, if we got here, SIT=1 */
+ ocs_populate_io_fcp_cmd(io, cmnd, fchdr, TRUE);
+
+ if (cmnd->task_management_flags) {
+ ocs_dispatch_unsolicited_tmf(io, cmnd->task_management_flags, node, lun);
+ } else {
+ uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
+
+ /* can return failure for things like task set full and UAs,
+ * no need to treat as a dropped frame if rc != 0
+ */
+ ocs_scsi_recv_cmd(io, lun, cmnd->fcp_cdb,
+ sizeof(cmnd->fcp_cdb) +
+ (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
+ flags);
+ }
+
+ /* successfully processed, now return RX buffer to the chip */
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Dispatch unsolicited FCP_CMND frame (auto xfer rdy).
+ *
+ * <h3 class="desc">Description</h3>
+ * Dispatch unsolicited FCP_CMND frame that is assisted with auto xfer ready.
+ *
+ * @param node Node that originated the frame.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled and RX buffers need
+ * to be returned.
+ */
+static int32_t
+ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ ocs_t *ocs = node->ocs;
+ fc_header_t *fchdr = seq->header->dma.virt;
+ fcp_cmnd_iu_t *cmnd = NULL;
+ ocs_io_t *io = NULL;
+ uint64_t lun = UINT64_MAX;
+ int32_t rc = 0;
+
+ ocs_assert(seq->payload, -1);
+ cmnd = seq->payload->dma.virt;
+
+ /* perform FCP_CMND validation check(s) */
+ if (ocs_validate_fcp_cmd(ocs, seq)) {
+ return -1;
+ }
+
+ /* make sure first burst or auto xfer_rdy is enabled */
+ if (!seq->auto_xrdy) {
+ node_printf(node, "IO is not Auto Xfr Rdy assisted, dropping FCP_CMND\n");
+ return -1;
+ }
+
+ lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
+
+ /* TODO should there be a check here for an error? Why do any of the
+ * below if the LUN decode failed? */
+ io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
+ if (io == NULL) {
+ uint32_t send_frame_capable;
+
+ /* If we have SEND_FRAME capability, then use it to send task set full or busy */
+ rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
+ if ((rc == 0) && send_frame_capable) {
+ rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
+ if (rc) {
+ ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
+ }
+ return rc;
+ }
+
+ ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
+ return -1;
+ }
+ io->hw_priv = seq->hw_priv;
+
+ /* RQ pair, if we got here, SIT=0 */
+ ocs_populate_io_fcp_cmd(io, cmnd, fchdr, FALSE);
+
+ if (cmnd->task_management_flags) {
+ /* first burst command better not be a TMF */
+ ocs_log_err(ocs, "TMF flags set 0x%x\n", cmnd->task_management_flags);
+ ocs_scsi_io_free(io);
+ return -1;
+ } else {
+ uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
+
+ /* activate HW IO */
+ ocs_hw_io_activate_port_owned(&ocs->hw, seq->hio);
+ io->hio = seq->hio;
+ seq->hio->ul_io = io;
+ io->tgt_task_tag = seq->hio->indicator;
+
+ /* Note: Data buffers are received in another call */
+ ocs_scsi_recv_cmd_first_burst(io, lun, cmnd->fcp_cdb,
+ sizeof(cmnd->fcp_cdb) +
+ (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
+ flags, NULL, 0);
+ }
+
+ /* FCP_CMND processed, return RX buffer to the chip */
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+}
+
+/**
+ * @ingroup unsol
+ * @brief Dispatch FCP data frames for auto xfer ready.
+ *
+ * <h3 class="desc">Description</h3>
+ * Dispatch unsolicited FCP data frames (auto xfer ready)
+ * containing sequence initiative transferred (SIT=1).
+ *
+ * @param node Node that originated the frame.
+ * @param seq Header/payload sequence buffers.
+ *
+ * @return Returns 0 if frame processed and RX buffers cleaned
+ * up appropriately, -1 if frame not handled.
+ */
+
+static int32_t
+ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ ocs_t *ocs = node->ocs;
+ ocs_hw_t *hw = &ocs->hw;
+ ocs_hw_io_t *hio = seq->hio;
+ ocs_io_t *io;
+ ocs_dma_t fburst[1];
+
+ ocs_assert(seq->payload, -1);
+ ocs_assert(hio, -1);
+
+ io = hio->ul_io;
+ if (io == NULL) {
+ ocs_log_err(ocs, "data received for NULL io, xri=0x%x\n",
+ hio->indicator);
+ return -1;
+ }
+
+ /*
+ * We only support data completions for auto xfer ready. Make sure
+ * this is a port owned XRI.
+ */
+ if (!ocs_hw_is_io_port_owned(hw, seq->hio)) {
+ ocs_log_err(ocs, "data received for host owned XRI, xri=0x%x\n",
+ hio->indicator);
+ return -1;
+ }
+
+ /* For error statuses, pass the error to the target back end */
+ if (seq->status != OCS_HW_UNSOL_SUCCESS) {
+ ocs_log_err(ocs, "data with status 0x%x received, xri=0x%x\n",
+ seq->status, hio->indicator);
+
+ /*
+ * In this case, there is an existing, in-use HW IO that
+ * first may need to be aborted. Then, the backend will be
+ * notified of the error while waiting for the data.
+ */
+ ocs_port_owned_abort(ocs, seq->hio);
+
+ /*
+ * HW IO has already been allocated and is waiting for data.
+ * Need to tell backend that an error has occurred.
+ */
+ ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, OCS_SCSI_FIRST_BURST_ERR, NULL, 0);
+ return -1;
+ }
+
+ /* sequence initiative has been transferred */
+ io->seq_init = 1;
+
+ /* convert the array of pointers to the correct type, to send to backend */
+ fburst[0] = seq->payload->dma;
+
+ /* the amount of first burst data was saved as "acculated sequence length" */
+ io->transferred = seq->payload->dma.len;
+
+ if (ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, 0,
+ fburst, io->transferred)) {
+ ocs_log_err(ocs, "error passing first burst, xri=0x%x, oxid=0x%x\n",
+ hio->indicator, io->init_task_tag);
+ }
+
+ /* Free the header and all the accumulated payload buffers */
+ ocs_hw_sequence_free(&ocs->hw, seq);
+ return 0;
+}
+
+
+/**
+ * @ingroup unsol
+ * @brief Handle the callback for the TMF FUNCTION_REJECTED response.
+ *
+ * <h3 class="desc">Description</h3>
+ * Handle the callback of a send TMF FUNCTION_REJECTED response request.
+ *
+ * @param io Pointer to the IO context.
+ * @param scsi_status Status of the response.
+ * @param flags Callback flags.
+ * @param arg Callback argument.
+ *
+ * @return Returns 0 on success, or a negative error value on failure.
+ */
+
+static int32_t
+ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg)
+{
+ ocs_scsi_io_free(io);
+ return 0;
+}
+
+/**
+ * @brief Return next FC frame on node->pend_frames list
+ *
+ * The next FC frame on the node->pend_frames list is returned, or NULL
+ * if the list is empty.
+ *
+ * @param pend_list Pending list to be purged.
+ * @param list_lock Lock that protects pending list.
+ *
+ * @return Returns pointer to the next FC frame, or NULL if the pending frame list
+ * is empty.
+ */
+static ocs_hw_sequence_t *
+ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock)
+{
+ ocs_hw_sequence_t *frame = NULL;
+
+ ocs_lock(list_lock);
+ frame = ocs_list_remove_head(pend_list);
+ ocs_unlock(list_lock);
+ return frame;
+}
+
+/**
+ * @brief Process send fcp response frame callback
+ *
+ * The function is called when the send FCP response posting has completed. Regardless
+ * of the outcome, the sequence is freed.
+ *
+ * @param arg Pointer to originator frame sequence.
+ * @param cqe Pointer to completion queue entry.
+ * @param status Status of operation.
+ *
+ * @return None.
+ */
+static void
+ocs_sframe_common_send_cb(void *arg, uint8_t *cqe, int32_t status)
+{
+ ocs_hw_send_frame_context_t *ctx = arg;
+ ocs_hw_t *hw = ctx->hw;
+
+ /* Free WQ completion callback */
+ ocs_hw_reqtag_free(hw, ctx->wqcb);
+
+ /* Free sequence */
+ ocs_hw_sequence_free(hw, ctx->seq);
+}
+
+/**
+ * @brief Send a frame, common code
+ *
+ * A frame is sent using SEND_FRAME, the R_CTL/F_CTL/TYPE may be specified, the payload is
+ * sent as a single frame.
+ *
+ * Memory resources are allocated from RQ buffers contained in the passed in sequence data.
+ *
+ * @param node Pointer to node object.
+ * @param seq Pointer to sequence object.
+ * @param r_ctl R_CTL value to place in FC header.
+ * @param info INFO value to place in FC header.
+ * @param f_ctl F_CTL value to place in FC header.
+ * @param type TYPE value to place in FC header.
+ * @param payload Pointer to payload data
+ * @param payload_len Length of payload in bytes.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+static int32_t
+ocs_sframe_common_send(ocs_node_t *node, ocs_hw_sequence_t *seq, uint8_t r_ctl, uint8_t info, uint32_t f_ctl,
+ uint8_t type, void *payload, uint32_t payload_len)
+{
+ ocs_t *ocs = node->ocs;
+ ocs_hw_t *hw = &ocs->hw;
+ ocs_hw_rtn_e rc = 0;
+ fc_header_t *behdr = seq->header->dma.virt;
+ fc_header_le_t hdr;
+ uint32_t s_id = fc_be24toh(behdr->s_id);
+ uint32_t d_id = fc_be24toh(behdr->d_id);
+ uint16_t ox_id = ocs_be16toh(behdr->ox_id);
+ uint16_t rx_id = ocs_be16toh(behdr->rx_id);
+ ocs_hw_send_frame_context_t *ctx;
+
+ uint32_t heap_size = seq->payload->dma.size;
+ uintptr_t heap_phys_base = seq->payload->dma.phys;
+ uint8_t *heap_virt_base = seq->payload->dma.virt;
+ uint32_t heap_offset = 0;
+
+ /* Build the FC header reusing the RQ header DMA buffer */
+ ocs_memset(&hdr, 0, sizeof(hdr));
+ hdr.d_id = s_id; /* send it back to whomever sent it to us */
+ hdr.r_ctl = r_ctl;
+ hdr.info = info;
+ hdr.s_id = d_id;
+ hdr.cs_ctl = 0;
+ hdr.f_ctl = f_ctl;
+ hdr.type = type;
+ hdr.seq_cnt = 0;
+ hdr.df_ctl = 0;
+
+ /*
+ * send_frame_seq_id is an atomic, we just let it increment, while storing only
+ * the low 8 bits to hdr->seq_id
+ */
+ hdr.seq_id = (uint8_t) ocs_atomic_add_return(&hw->send_frame_seq_id, 1);
+
+ hdr.rx_id = rx_id;
+ hdr.ox_id = ox_id;
+ hdr.parameter = 0;
+
+ /* Allocate and fill in the send frame request context */
+ ctx = (void*)(heap_virt_base + heap_offset);
+ heap_offset += sizeof(*ctx);
+ ocs_assert(heap_offset < heap_size, -1);
+ ocs_memset(ctx, 0, sizeof(*ctx));
+
+ /* Save sequence */
+ ctx->seq = seq;
+
+ /* Allocate a response payload DMA buffer from the heap */
+ ctx->payload.phys = heap_phys_base + heap_offset;
+ ctx->payload.virt = heap_virt_base + heap_offset;
+ ctx->payload.size = payload_len;
+ ctx->payload.len = payload_len;
+ heap_offset += payload_len;
+ ocs_assert(heap_offset <= heap_size, -1);
+
+ /* Copy the payload in */
+ ocs_memcpy(ctx->payload.virt, payload, payload_len);
+
+ /* Send */
+ rc = ocs_hw_send_frame(&ocs->hw, (void*)&hdr, FC_SOFI3, FC_EOFT, &ctx->payload, ctx,
+ ocs_sframe_common_send_cb, ctx);
+ if (rc) {
+ ocs_log_test(ocs, "ocs_hw_send_frame failed: %d\n", rc);
+ }
+
+ return rc ? -1 : 0;
+}
+
+/**
+ * @brief Send FCP response using SEND_FRAME
+ *
+ * The FCP response is send using the SEND_FRAME function.
+ *
+ * @param node Pointer to node object.
+ * @param seq Pointer to inbound sequence.
+ * @param rsp Pointer to response data.
+ * @param rsp_len Length of response data, in bytes.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+static int32_t
+ocs_sframe_send_fcp_rsp(ocs_node_t *node, ocs_hw_sequence_t *seq, void *rsp, uint32_t rsp_len)
+{
+ return ocs_sframe_common_send(node, seq,
+ FC_RCTL_FC4_DATA,
+ FC_RCTL_INFO_CMD_STATUS,
+ FC_FCTL_EXCHANGE_RESPONDER |
+ FC_FCTL_LAST_SEQUENCE |
+ FC_FCTL_END_SEQUENCE |
+ FC_FCTL_SEQUENCE_INITIATIVE,
+ FC_TYPE_FCP,
+ rsp, rsp_len);
+}
+
+/**
+ * @brief Send task set full response
+ *
+ * Return a task set full or busy response using send frame.
+ *
+ * @param node Pointer to node object.
+ * @param seq Pointer to originator frame sequence.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+static int32_t
+ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ fcp_rsp_iu_t fcprsp;
+ fcp_cmnd_iu_t *fcpcmd = seq->payload->dma.virt;
+ uint32_t *fcp_dl_ptr;
+ uint32_t fcp_dl;
+ int32_t rc = 0;
+
+ /* extract FCP_DL from FCP command*/
+ fcp_dl_ptr = (uint32_t*)(&(fcpcmd->fcp_cdb_and_dl));
+ fcp_dl_ptr += fcpcmd->additional_fcp_cdb_length;
+ fcp_dl = ocs_be32toh(*fcp_dl_ptr);
+
+ /* construct task set full or busy response */
+ ocs_memset(&fcprsp, 0, sizeof(fcprsp));
+ ocs_lock(&node->active_ios_lock);
+ fcprsp.scsi_status = ocs_list_empty(&node->active_ios) ? SCSI_STATUS_BUSY : SCSI_STATUS_TASK_SET_FULL;
+ ocs_unlock(&node->active_ios_lock);
+ *((uint32_t*)&fcprsp.fcp_resid) = fcp_dl;
+
+ /* send it using send_frame */
+ rc = ocs_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp) - sizeof(fcprsp.data));
+ if (rc) {
+ ocs_log_test(node->ocs, "ocs_sframe_send_fcp_rsp failed: %d\n", rc);
+ }
+ return rc;
+}
+
+/**
+ * @brief Send BA_ACC using sent frame
+ *
+ * A BA_ACC is sent using SEND_FRAME
+ *
+ * @param node Pointer to node object.
+ * @param seq Pointer to originator frame sequence.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+int32_t
+ocs_sframe_send_bls_acc(ocs_node_t *node, ocs_hw_sequence_t *seq)
+{
+ fc_header_t *behdr = seq->header->dma.virt;
+ uint16_t ox_id = ocs_be16toh(behdr->ox_id);
+ uint16_t rx_id = ocs_be16toh(behdr->rx_id);
+ fc_ba_acc_payload_t acc = {0};
+
+ acc.ox_id = ocs_htobe16(ox_id);
+ acc.rx_id = ocs_htobe16(rx_id);
+ acc.low_seq_cnt = UINT16_MAX;
+ acc.high_seq_cnt = UINT16_MAX;
+
+ return ocs_sframe_common_send(node, seq,
+ FC_RCTL_BLS,
+ FC_RCTL_INFO_UNSOL_DATA,
+ FC_FCTL_EXCHANGE_RESPONDER |
+ FC_FCTL_LAST_SEQUENCE |
+ FC_FCTL_END_SEQUENCE,
+ FC_TYPE_BASIC_LINK,
+ &acc, sizeof(acc));
+}
diff --git a/sys/dev/ocs_fc/ocs_unsol.h b/sys/dev/ocs_fc/ocs_unsol.h
new file mode 100644
index 000000000000..a36b2299f4eb
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_unsol.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Declarations for the interface exported by ocs_unsol.c
+ */
+
+#if !defined(__OSC_UNSOL_H__)
+#define __OSC_UNSOL_H__
+
+extern int32_t ocs_unsol_rq_thread(ocs_thread_t *mythread);
+extern int32_t ocs_unsolicited_cb(void *arg, ocs_hw_sequence_t *seq);
+extern int32_t ocs_node_purge_pending(ocs_node_t *node);
+extern int32_t ocs_process_node_pending(ocs_node_t *node);
+extern int32_t ocs_domain_process_pending(ocs_domain_t *domain);
+extern int32_t ocs_domain_purge_pending(ocs_domain_t *domain);
+extern int32_t ocs_dispatch_unsolicited_bls(ocs_node_t *node, ocs_hw_sequence_t *seq);
+extern void ocs_domain_hold_frames(ocs_domain_t *domain);
+extern void ocs_domain_accept_frames(ocs_domain_t *domain);
+extern void ocs_seq_coalesce_cleanup(ocs_hw_io_t *hio, uint8_t abort_io);
+extern int32_t ocs_sframe_send_bls_acc(ocs_node_t *node, ocs_hw_sequence_t *seq);
+#endif
diff --git a/sys/dev/ocs_fc/ocs_utils.c b/sys/dev/ocs_fc/ocs_utils.c
new file mode 100644
index 000000000000..f6f74302835e
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_utils.c
@@ -0,0 +1,2826 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ */
+
+#include "ocs.h"
+#include "ocs_os.h"
+
+#define DEFAULT_SLAB_LEN (64*1024)
+
+struct ocs_array_s {
+ ocs_os_handle_t os;
+
+ uint32_t size;
+ uint32_t count;
+
+ uint32_t n_rows;
+ uint32_t elems_per_row;
+ uint32_t bytes_per_row;
+
+ void **array_rows;
+ uint32_t array_rows_len;
+};
+
+static uint32_t slab_len = DEFAULT_SLAB_LEN;
+
+/**
+ * @brief Set array slab allocation length
+ *
+ * The slab length is the maximum allocation length that the array uses.
+ * The default 64k slab length may be overridden using this function.
+ *
+ * @param len new slab length.
+ *
+ * @return none
+ */
+void
+ocs_array_set_slablen(uint32_t len)
+{
+ slab_len = len;
+}
+
+/**
+ * @brief Allocate an array object
+ *
+ * An array object of size and number of elements is allocated
+ *
+ * @param os OS handle
+ * @param size size of array elements in bytes
+ * @param count number of elements in array
+ *
+ * @return pointer to array object or NULL
+ */
+ocs_array_t *
+ocs_array_alloc(ocs_os_handle_t os, uint32_t size, uint32_t count)
+{
+ ocs_array_t *array = NULL;
+ uint32_t i;
+
+ /* Fail if the item size exceeds slab_len - caller should increase slab_size,
+ * or not use this API.
+ */
+ if (size > slab_len) {
+ ocs_log_err(NULL, "Error: size exceeds slab length\n");
+ return NULL;
+ }
+
+ array = ocs_malloc(os, sizeof(*array), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (array == NULL) {
+ return NULL;
+ }
+
+ array->os = os;
+ array->size = size;
+ array->count = count;
+ array->elems_per_row = slab_len / size;
+ array->n_rows = (count + array->elems_per_row - 1) / array->elems_per_row;
+ array->bytes_per_row = array->elems_per_row * array->size;
+
+ array->array_rows_len = array->n_rows * sizeof(*array->array_rows);
+ array->array_rows = ocs_malloc(os, array->array_rows_len, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (array->array_rows == NULL) {
+ ocs_array_free(array);
+ return NULL;
+ }
+ for (i = 0; i < array->n_rows; i++) {
+ array->array_rows[i] = ocs_malloc(os, array->bytes_per_row, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (array->array_rows[i] == NULL) {
+ ocs_array_free(array);
+ return NULL;
+ }
+ }
+
+ return array;
+}
+
+/**
+ * @brief Free an array object
+ *
+ * Frees a prevously allocated array object
+ *
+ * @param array pointer to array object
+ *
+ * @return none
+ */
+void
+ocs_array_free(ocs_array_t *array)
+{
+ uint32_t i;
+
+ if (array != NULL) {
+ if (array->array_rows != NULL) {
+ for (i = 0; i < array->n_rows; i++) {
+ if (array->array_rows[i] != NULL) {
+ ocs_free(array->os, array->array_rows[i], array->bytes_per_row);
+ }
+ }
+ ocs_free(array->os, array->array_rows, array->array_rows_len);
+ }
+ ocs_free(array->os, array, sizeof(*array));
+ }
+}
+
+/**
+ * @brief Return reference to an element of an array object
+ *
+ * Return the address of an array element given an index
+ *
+ * @param array pointer to array object
+ * @param idx array element index
+ *
+ * @return rointer to array element, or NULL if index out of range
+ */
+void *ocs_array_get(ocs_array_t *array, uint32_t idx)
+{
+ void *entry = NULL;
+
+ if (idx < array->count) {
+ uint32_t row = idx / array->elems_per_row;
+ uint32_t offset = idx % array->elems_per_row;
+ entry = ((uint8_t*)array->array_rows[row]) + (offset * array->size);
+ }
+ return entry;
+}
+
+/**
+ * @brief Return number of elements in an array
+ *
+ * Return the number of elements in an array
+ *
+ * @param array pointer to array object
+ *
+ * @return returns count of elements in an array
+ */
+uint32_t
+ocs_array_get_count(ocs_array_t *array)
+{
+ return array->count;
+}
+
+/**
+ * @brief Return size of array elements in bytes
+ *
+ * Returns the size in bytes of each array element
+ *
+ * @param array pointer to array object
+ *
+ * @return size of array element
+ */
+uint32_t
+ocs_array_get_size(ocs_array_t *array)
+{
+ return array->size;
+}
+
+/**
+ * @brief Void pointer array structure
+ *
+ * This structure describes an object consisting of an array of void
+ * pointers. The object is allocated with a maximum array size, entries
+ * are then added to the array with while maintaining an entry count. A set of
+ * iterator APIs are included to allow facilitate cycling through the array
+ * entries in a circular fashion.
+ *
+ */
+struct ocs_varray_s {
+ ocs_os_handle_t os;
+ uint32_t array_count; /*>> maximum entry count in array */
+ void **array; /*>> pointer to allocated array memory */
+ uint32_t entry_count; /*>> number of entries added to the array */
+ uint32_t next_index; /*>> iterator next index */
+ ocs_lock_t lock; /*>> iterator lock */
+};
+
+/**
+ * @brief Allocate a void pointer array
+ *
+ * A void pointer array of given length is allocated.
+ *
+ * @param os OS handle
+ * @param array_count Array size
+ *
+ * @return returns a pointer to the ocs_varray_t object, other NULL on error
+ */
+ocs_varray_t *
+ocs_varray_alloc(ocs_os_handle_t os, uint32_t array_count)
+{
+ ocs_varray_t *va;
+
+ va = ocs_malloc(os, sizeof(*va), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (va != NULL) {
+ va->os = os;
+ va->array_count = array_count;
+ va->array = ocs_malloc(os, sizeof(*va->array) * va->array_count, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (va->array != NULL) {
+ va->next_index = 0;
+ ocs_lock_init(os, &va->lock, "varray:%p", va);
+ } else {
+ ocs_free(os, va, sizeof(*va));
+ va = NULL;
+ }
+ }
+ return va;
+}
+
+/**
+ * @brief Free a void pointer array
+ *
+ * The void pointer array object is free'd
+ *
+ * @param va Pointer to void pointer array
+ *
+ * @return none
+ */
+void
+ocs_varray_free(ocs_varray_t *va)
+{
+ if (va != NULL) {
+ ocs_lock_free(&va->lock);
+ if (va->array != NULL) {
+ ocs_free(va->os, va->array, sizeof(*va->array) * va->array_count);
+ }
+ ocs_free(va->os, va, sizeof(*va));
+ }
+}
+
+/**
+ * @brief Add an entry to a void pointer array
+ *
+ * An entry is added to the void pointer array
+ *
+ * @param va Pointer to void pointer array
+ * @param entry Pointer to entry to add
+ *
+ * @return returns 0 if entry was added, -1 if there is no more space in the array
+ */
+int32_t
+ocs_varray_add(ocs_varray_t *va, void *entry)
+{
+ uint32_t rc = -1;
+
+ ocs_lock(&va->lock);
+ if (va->entry_count < va->array_count) {
+ va->array[va->entry_count++] = entry;
+ rc = 0;
+ }
+ ocs_unlock(&va->lock);
+
+ return rc;
+}
+
+/**
+ * @brief Reset the void pointer array iterator
+ *
+ * The next index value of the void pointer array iterator is cleared.
+ *
+ * @param va Pointer to void pointer array
+ *
+ * @return none
+ */
+void
+ocs_varray_iter_reset(ocs_varray_t *va)
+{
+ ocs_lock(&va->lock);
+ va->next_index = 0;
+ ocs_unlock(&va->lock);
+}
+
+/**
+ * @brief Return next entry from a void pointer array
+ *
+ * The next entry in the void pointer array is returned.
+ *
+ * @param va Pointer to void point array
+ *
+ * Note: takes the void pointer array lock
+ *
+ * @return returns next void pointer entry
+ */
+void *
+ocs_varray_iter_next(ocs_varray_t *va)
+{
+ void *rval = NULL;
+
+ if (va != NULL) {
+ ocs_lock(&va->lock);
+ rval = _ocs_varray_iter_next(va);
+ ocs_unlock(&va->lock);
+ }
+ return rval;
+}
+
+/**
+ * @brief Return next entry from a void pointer array
+ *
+ * The next entry in the void pointer array is returned.
+ *
+ * @param va Pointer to void point array
+ *
+ * Note: doesn't take the void pointer array lock
+ *
+ * @return returns next void pointer entry
+ */
+void *
+_ocs_varray_iter_next(ocs_varray_t *va)
+{
+ void *rval;
+
+ rval = va->array[va->next_index];
+ if (++va->next_index >= va->entry_count) {
+ va->next_index = 0;
+ }
+ return rval;
+}
+
+/**
+ * @brief Take void pointer array lock
+ *
+ * Takes the lock for the given void pointer array
+ *
+ * @param va Pointer to void pointer array
+ *
+ * @return none
+ */
+void
+ocs_varray_lock(ocs_varray_t *va)
+{
+ ocs_lock(&va->lock);
+}
+
+/**
+ * @brief Release void pointer array lock
+ *
+ * Releases the lock for the given void pointer array
+ *
+ * @param va Pointer to void pointer array
+ *
+ * @return none
+ */
+void
+ocs_varray_unlock(ocs_varray_t *va)
+{
+ ocs_unlock(&va->lock);
+}
+
+/**
+ * @brief Return entry count for a void pointer array
+ *
+ * The entry count for a void pointer array is returned
+ *
+ * @param va Pointer to void pointer array
+ *
+ * @return returns entry count
+ */
+uint32_t
+ocs_varray_get_count(ocs_varray_t *va)
+{
+ uint32_t rc;
+
+ ocs_lock(&va->lock);
+ rc = va->entry_count;
+ ocs_unlock(&va->lock);
+ return rc;
+}
+
+
+struct ocs_cbuf_s {
+ ocs_os_handle_t os; /*<< OS handle */
+ uint32_t entry_count; /*<< entry count */
+ void **array; /*<< pointer to array of cbuf pointers */
+ uint32_t pidx; /*<< producer index */
+ uint32_t cidx; /*<< consumer index */
+ ocs_lock_t cbuf_plock; /*<< idx lock */
+ ocs_lock_t cbuf_clock; /*<< idx lock */
+ ocs_sem_t cbuf_psem; /*<< cbuf producer counting semaphore */
+ ocs_sem_t cbuf_csem; /*<< cbuf consumer counting semaphore */
+};
+
+/**
+ * @brief Initialize a circular buffer queue
+ *
+ * A circular buffer with producer/consumer API is allocated
+ *
+ * @param os OS handle
+ * @param entry_count count of entries
+ *
+ * @return returns pointer to circular buffer, or NULL
+ */
+ocs_cbuf_t*
+ocs_cbuf_alloc(ocs_os_handle_t os, uint32_t entry_count)
+{
+ ocs_cbuf_t *cbuf;
+
+ cbuf = ocs_malloc(os, sizeof(*cbuf), OCS_M_NOWAIT | OCS_M_ZERO);
+ if (cbuf == NULL) {
+ return NULL;
+ }
+
+ cbuf->os = os;
+ cbuf->entry_count = entry_count;
+ cbuf->pidx = 0;
+ cbuf->cidx = 0;
+
+ ocs_lock_init(NULL, &cbuf->cbuf_clock, "cbuf_c:%p", cbuf);
+ ocs_lock_init(NULL, &cbuf->cbuf_plock, "cbuf_p:%p", cbuf);
+ ocs_sem_init(&cbuf->cbuf_csem, 0, "cbuf:%p", cbuf);
+ ocs_sem_init(&cbuf->cbuf_psem, cbuf->entry_count, "cbuf:%p", cbuf);
+
+ cbuf->array = ocs_malloc(os, entry_count * sizeof(*cbuf->array), OCS_M_NOWAIT | OCS_M_ZERO);
+ if (cbuf->array == NULL) {
+ ocs_cbuf_free(cbuf);
+ return NULL;
+ }
+
+ return cbuf;
+}
+
+/**
+ * @brief Free a circular buffer
+ *
+ * The memory resources of a circular buffer are free'd
+ *
+ * @param cbuf pointer to circular buffer
+ *
+ * @return none
+ */
+void
+ocs_cbuf_free(ocs_cbuf_t *cbuf)
+{
+ if (cbuf != NULL) {
+ if (cbuf->array != NULL) {
+ ocs_free(cbuf->os, cbuf->array, sizeof(*cbuf->array) * cbuf->entry_count);
+ }
+ ocs_lock_free(&cbuf->cbuf_clock);
+ ocs_lock_free(&cbuf->cbuf_plock);
+ ocs_free(cbuf->os, cbuf, sizeof(*cbuf));
+ }
+}
+
+/**
+ * @brief Get pointer to buffer
+ *
+ * Wait for a buffer to become available, and return a pointer to the buffer.
+ *
+ * @param cbuf pointer to circular buffer
+ * @param timeout_usec timeout in microseconds
+ *
+ * @return pointer to buffer, or NULL if timeout
+ */
+void*
+ocs_cbuf_get(ocs_cbuf_t *cbuf, int32_t timeout_usec)
+{
+ void *ret = NULL;
+
+ if (likely(ocs_sem_p(&cbuf->cbuf_csem, timeout_usec) == 0)) {
+ ocs_lock(&cbuf->cbuf_clock);
+ ret = cbuf->array[cbuf->cidx];
+ if (unlikely(++cbuf->cidx >= cbuf->entry_count)) {
+ cbuf->cidx = 0;
+ }
+ ocs_unlock(&cbuf->cbuf_clock);
+ ocs_sem_v(&cbuf->cbuf_psem);
+ }
+ return ret;
+}
+
+/**
+ * @brief write a buffer
+ *
+ * The buffer is written to the circular buffer.
+ *
+ * @param cbuf pointer to circular buffer
+ * @param elem pointer to entry
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_cbuf_put(ocs_cbuf_t *cbuf, void *elem)
+{
+ int32_t rc = 0;
+
+ if (likely(ocs_sem_p(&cbuf->cbuf_psem, -1) == 0)) {
+ ocs_lock(&cbuf->cbuf_plock);
+ cbuf->array[cbuf->pidx] = elem;
+ if (unlikely(++cbuf->pidx >= cbuf->entry_count)) {
+ cbuf->pidx = 0;
+ }
+ ocs_unlock(&cbuf->cbuf_plock);
+ ocs_sem_v(&cbuf->cbuf_csem);
+ } else {
+ rc = -1;
+ }
+ return rc;
+}
+
+/**
+ * @brief Prime a circular buffer data
+ *
+ * Post array buffers to a circular buffer
+ *
+ * @param cbuf pointer to circular buffer
+ * @param array pointer to buffer array
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+ocs_cbuf_prime(ocs_cbuf_t *cbuf, ocs_array_t *array)
+{
+ uint32_t i;
+ uint32_t count = MIN(ocs_array_get_count(array), cbuf->entry_count);
+
+ for (i = 0; i < count; i++) {
+ ocs_cbuf_put(cbuf, ocs_array_get(array, i));
+ }
+ return 0;
+}
+
+/**
+ * @brief Generate driver dump start of file information
+ *
+ * The start of file information is added to 'textbuf'
+ *
+ * @param textbuf pointer to driver dump text buffer
+ *
+ * @return none
+ */
+
+void
+ocs_ddump_startfile(ocs_textbuf_t *textbuf)
+{
+ ocs_textbuf_printf(textbuf, "<?xml version=\"1.0\" encoding=\"ISO-8859-1\" ?>\n");
+}
+
+/**
+ * @brief Generate driver dump end of file information
+ *
+ * The end of file information is added to 'textbuf'
+ *
+ * @param textbuf pointer to driver dump text buffer
+ *
+ * @return none
+ */
+
+void
+ocs_ddump_endfile(ocs_textbuf_t *textbuf)
+{
+}
+
+/**
+ * @brief Generate driver dump section start data
+ *
+ * The driver section start information is added to textbuf
+ *
+ * @param textbuf pointer to text buffer
+ * @param name name of section
+ * @param instance instance number of this section
+ *
+ * @return none
+ */
+
+void
+ocs_ddump_section(ocs_textbuf_t *textbuf, const char *name, uint32_t instance)
+{
+ ocs_textbuf_printf(textbuf, "<%s type=\"section\" instance=\"%d\">\n", name, instance);
+}
+
+/**
+ * @brief Generate driver dump section end data
+ *
+ * The driver section end information is added to textbuf
+ *
+ * @param textbuf pointer to text buffer
+ * @param name name of section
+ * @param instance instance number of this section
+ *
+ * @return none
+ */
+
+void
+ocs_ddump_endsection(ocs_textbuf_t *textbuf, const char *name, uint32_t instance)
+{
+ ocs_textbuf_printf(textbuf, "</%s>\n", name);
+}
+
+/**
+ * @brief Generate driver dump data for a given value
+ *
+ * A value is added to textbuf
+ *
+ * @param textbuf pointer to text buffer
+ * @param name name of variable
+ * @param fmt snprintf format specifier
+ *
+ * @return none
+ */
+
+void
+ocs_ddump_value(ocs_textbuf_t *textbuf, const char *name, const char *fmt, ...)
+{
+ va_list ap;
+ char valuebuf[64];
+
+ va_start(ap, fmt);
+ vsnprintf(valuebuf, sizeof(valuebuf), fmt, ap);
+ va_end(ap);
+
+ ocs_textbuf_printf(textbuf, "<%s>%s</%s>\n", name, valuebuf, name);
+}
+
+
+/**
+ * @brief Generate driver dump data for an arbitrary buffer of DWORDS
+ *
+ * A status value is added to textbuf
+ *
+ * @param textbuf pointer to text buffer
+ * @param name name of status variable
+ * @param instance instance number of this section
+ * @param buffer buffer to print
+ * @param size size of buffer in bytes
+ *
+ * @return none
+ */
+
+void
+ocs_ddump_buffer(ocs_textbuf_t *textbuf, const char *name, uint32_t instance, void *buffer, uint32_t size)
+{
+ uint32_t *dword;
+ uint32_t i;
+ uint32_t count;
+
+ count = size / sizeof(uint32_t);
+
+ if (count == 0) {
+ return;
+ }
+
+ ocs_textbuf_printf(textbuf, "<%s type=\"buffer\" instance=\"%d\">\n", name, instance);
+
+ dword = buffer;
+ for (i = 0; i < count; i++) {
+#define OCS_NEWLINE_MOD 8
+ ocs_textbuf_printf(textbuf, "%08x ", *dword++);
+ if ((i % OCS_NEWLINE_MOD) == (OCS_NEWLINE_MOD - 1)) {
+ ocs_textbuf_printf(textbuf, "\n");
+ }
+ }
+
+ ocs_textbuf_printf(textbuf, "</%s>\n", name);
+}
+
+/**
+ * @brief Generate driver dump for queue
+ *
+ * Add queue elements to text buffer
+ *
+ * @param textbuf pointer to driver dump text buffer
+ * @param q_addr address of start of queue
+ * @param size size of each queue entry
+ * @param length number of queue entries in the queue
+ * @param index current index of queue
+ * @param qentries number of most recent queue entries to dump
+ *
+ * @return none
+ */
+
+void
+ocs_ddump_queue_entries(ocs_textbuf_t *textbuf, void *q_addr, uint32_t size,
+ uint32_t length, int32_t index, uint32_t qentries)
+{
+ uint32_t i;
+ uint32_t j;
+ uint8_t *entry;
+ uint32_t *dword;
+ uint32_t entry_count = 0;
+ uint32_t entry_words = size / sizeof(uint32_t);
+
+ if ((qentries == (uint32_t)-1) || (qentries > length)) {
+ /* if qentries is -1 or larger than queue size, dump entire queue */
+ entry_count = length;
+ index = 0;
+ } else {
+ entry_count = qentries;
+
+ index -= (qentries - 1);
+ if (index < 0) {
+ index += length;
+ }
+
+ }
+#define OCS_NEWLINE_MOD 8
+ ocs_textbuf_printf(textbuf, "<qentries>\n");
+ for (i = 0; i < entry_count; i++){
+ entry = q_addr;
+ entry += index * size;
+ dword = (uint32_t *)entry;
+
+ ocs_textbuf_printf(textbuf, "[%04x] ", index);
+ for (j = 0; j < entry_words; j++) {
+ ocs_textbuf_printf(textbuf, "%08x ", *dword++);
+ if (((j+1) == entry_words) ||
+ ((j % OCS_NEWLINE_MOD) == (OCS_NEWLINE_MOD - 1))) {
+ ocs_textbuf_printf(textbuf, "\n");
+ if ((j+1) < entry_words) {
+ ocs_textbuf_printf(textbuf, " ");
+ }
+ }
+ }
+
+ index++;
+ if ((uint32_t)index >= length) {
+ index = 0;
+ }
+ }
+ ocs_textbuf_printf(textbuf, "</qentries>\n");
+}
+
+
+#define OCS_DEBUG_ENABLE(x) (x ? ~0 : 0)
+
+#define OCS_DEBUG_MASK \
+ (OCS_DEBUG_ENABLE(1) & OCS_DEBUG_ALWAYS) | \
+ (OCS_DEBUG_ENABLE(0) & OCS_DEBUG_ENABLE_MQ_DUMP) | \
+ (OCS_DEBUG_ENABLE(0) & OCS_DEBUG_ENABLE_CQ_DUMP) | \
+ (OCS_DEBUG_ENABLE(0) & OCS_DEBUG_ENABLE_WQ_DUMP) | \
+ (OCS_DEBUG_ENABLE(0) & OCS_DEBUG_ENABLE_EQ_DUMP) | \
+ (OCS_DEBUG_ENABLE(0) & OCS_DEBUG_ENABLE_SPARAM_DUMP)
+
+static uint32_t ocs_debug_mask = OCS_DEBUG_MASK;
+
+static int
+_isprint(int c) {
+ return ((c > 32) && (c < 127));
+}
+
+/**
+ * @ingroup debug
+ * @brief enable debug options
+ *
+ * Enables debug options by or-ing in <b>mask</b> into the currently enabled
+ * debug mask.
+ *
+ * @param mask mask bits to enable
+ *
+ * @return none
+ */
+
+void ocs_debug_enable(uint32_t mask) {
+ ocs_debug_mask |= mask;
+}
+
+/**
+ * @ingroup debug
+ * @brief disable debug options
+ *
+ * Disables debug options by clearing bits in <b>mask</b> into the currently enabled
+ * debug mask.
+ *
+ * @param mask mask bits to enable
+ *
+ * @return none
+ */
+
+void ocs_debug_disable(uint32_t mask) {
+ ocs_debug_mask &= ~mask;
+}
+
+/**
+ * @ingroup debug
+ * @brief return true if debug bits are enabled
+ *
+ * Returns true if the request debug bits are set.
+ *
+ * @param mask debug bit mask
+ *
+ * @return true if corresponding bits are set
+ *
+ * @note Passing in a mask value of zero always returns true
+ */
+
+int ocs_debug_is_enabled(uint32_t mask) {
+ return (ocs_debug_mask & mask) == mask;
+}
+
+
+/**
+ * @ingroup debug
+ * @brief Dump 32 bit hex/ascii data
+ *
+ * Dumps using ocs_log a buffer of data as 32 bit hex and ascii
+ *
+ * @param mask debug enable bits
+ * @param os os handle
+ * @param label text label for the display (may be NULL)
+ * @param buf pointer to data buffer
+ * @param buf_length length of data buffer
+ *
+ * @return none
+ *
+ */
+
+void
+ocs_dump32(uint32_t mask, ocs_os_handle_t os, const char *label, void *buf, uint32_t buf_length)
+{
+ uint32_t word_count = buf_length / sizeof(uint32_t);
+ uint32_t i;
+ uint32_t columns = 8;
+ uint32_t n;
+ uint32_t *wbuf;
+ char *cbuf;
+ uint32_t addr = 0;
+ char linebuf[200];
+ char *pbuf = linebuf;
+
+ if (!ocs_debug_is_enabled(mask))
+ return;
+
+ if (label)
+ ocs_log_debug(os, "%s\n", label);
+
+ wbuf = buf;
+ while (word_count > 0) {
+ pbuf = linebuf;
+ pbuf += ocs_snprintf(pbuf, sizeof(linebuf) - (pbuf-linebuf), "%08X: ", addr);
+
+ n = word_count;
+ if (n > columns)
+ n = columns;
+
+ for (i = 0; i < n; i ++)
+ pbuf += ocs_snprintf(pbuf, sizeof(linebuf) - (pbuf-linebuf), "%08X ", wbuf[i]);
+
+ for (; i < columns; i ++)
+ pbuf += ocs_snprintf(pbuf, sizeof(linebuf) - (pbuf-linebuf), "%8s ", "");
+
+ pbuf += ocs_snprintf(pbuf, sizeof(linebuf) - (pbuf-linebuf), " ");
+ cbuf = (char*)wbuf;
+ for (i = 0; i < n*sizeof(uint32_t); i ++)
+ pbuf += ocs_snprintf(pbuf, sizeof(linebuf) - (pbuf-linebuf), "%c", _isprint(cbuf[i]) ? cbuf[i] : '.');
+ pbuf += ocs_snprintf(pbuf, sizeof(linebuf) - (pbuf-linebuf), "\n");
+
+ ocs_log_debug(os, "%s", linebuf);
+
+ wbuf += n;
+ word_count -= n;
+ addr += n*sizeof(uint32_t);
+ }
+}
+
+
+#if defined(OCS_DEBUG_QUEUE_HISTORY)
+
+/* each bit corresponds to word to capture */
+#define OCS_Q_HIST_WQE_WORD_MASK_DEFAULT (BIT(4) | BIT(6) | BIT(7) | BIT(9) | BIT(12))
+#define OCS_Q_HIST_TRECV_CONT_WQE_WORD_MASK (BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(9) | BIT(12))
+#define OCS_Q_HIST_IWRITE_WQE_WORD_MASK (BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(9))
+#define OCS_Q_HIST_IREAD_WQE_WORD_MASK (BIT(4) | BIT(6) | BIT(7) | BIT(9))
+#define OCS_Q_HIST_ABORT_WQE_WORD_MASK (BIT(3) | BIT(7) | BIT(8) | BIT(9))
+#define OCS_Q_HIST_WCQE_WORD_MASK (BIT(0) | BIT(3))
+#define OCS_Q_HIST_WCQE_WORD_MASK_ERR (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define OCS_Q_HIST_CQXABT_WORD_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* if set, will provide extra queue information in each entry */
+#define OCS_Q_HIST_ENABLE_Q_INFO 0
+uint8_t ocs_queue_history_q_info_enabled(void)
+{
+ return OCS_Q_HIST_ENABLE_Q_INFO;
+}
+
+/* if set, will provide timestamps in each entry */
+#define OCS_Q_HIST_ENABLE_TIMESTAMPS 0
+uint8_t ocs_queue_history_timestamp_enabled(void)
+{
+ return OCS_Q_HIST_ENABLE_TIMESTAMPS;
+}
+
+/* Add WQEs and masks to override default WQE mask */
+ocs_q_hist_wqe_mask_t ocs_q_hist_wqe_masks[] = {
+ /* WQE command Word mask */
+ {SLI4_WQE_ABORT, OCS_Q_HIST_ABORT_WQE_WORD_MASK},
+ {SLI4_WQE_FCP_IREAD64, OCS_Q_HIST_IREAD_WQE_WORD_MASK},
+ {SLI4_WQE_FCP_IWRITE64, OCS_Q_HIST_IWRITE_WQE_WORD_MASK},
+ {SLI4_WQE_FCP_CONT_TRECEIVE64, OCS_Q_HIST_TRECV_CONT_WQE_WORD_MASK},
+};
+
+/* CQE masks */
+ocs_q_hist_cqe_mask_t ocs_q_hist_cqe_masks[] = {
+ /* CQE type Q_hist_type mask (success) mask (non-success) */
+ {SLI_QENTRY_WQ, OCS_Q_HIST_TYPE_CWQE, OCS_Q_HIST_WCQE_WORD_MASK, OCS_Q_HIST_WCQE_WORD_MASK_ERR},
+ {SLI_QENTRY_XABT, OCS_Q_HIST_TYPE_CXABT, OCS_Q_HIST_CQXABT_WORD_MASK, OCS_Q_HIST_WCQE_WORD_MASK},
+};
+
+static uint32_t ocs_q_hist_get_wqe_mask(sli4_generic_wqe_t *wqe)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(ocs_q_hist_wqe_masks); i++) {
+ if (ocs_q_hist_wqe_masks[i].command == wqe->command) {
+ return ocs_q_hist_wqe_masks[i].mask;
+ }
+ }
+ /* return default WQE mask */
+ return OCS_Q_HIST_WQE_WORD_MASK_DEFAULT;
+}
+
+/**
+ * @ingroup debug
+ * @brief Initialize resources for queue history
+ *
+ * @param os os handle
+ * @param q_hist Pointer to the queue history object.
+ *
+ * @return none
+ */
+void
+ocs_queue_history_init(ocs_t *ocs, ocs_hw_q_hist_t *q_hist)
+{
+ q_hist->ocs = ocs;
+ if (q_hist->q_hist != NULL) {
+ /* Setup is already done */
+ ocs_log_debug(ocs, "q_hist not NULL, skipping init\n");
+ return;
+ }
+
+ q_hist->q_hist = ocs_malloc(ocs, sizeof(*q_hist->q_hist)*OCS_Q_HIST_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
+
+ if (q_hist->q_hist == NULL) {
+ ocs_log_err(ocs, "Could not allocate queue history buffer\n");
+ } else {
+ ocs_lock_init(ocs, &q_hist->q_hist_lock, "queue history lock[%d]", ocs_instance(ocs));
+ }
+
+ q_hist->q_hist_index = 0;
+}
+
+/**
+ * @ingroup debug
+ * @brief Free resources for queue history
+ *
+ * @param q_hist Pointer to the queue history object.
+ *
+ * @return none
+ */
+void
+ocs_queue_history_free(ocs_hw_q_hist_t *q_hist)
+{
+ ocs_t *ocs = q_hist->ocs;
+
+ if (q_hist->q_hist != NULL) {
+ ocs_free(ocs, q_hist->q_hist, sizeof(*q_hist->q_hist)*OCS_Q_HIST_SIZE);
+ ocs_lock_free(&q_hist->q_hist_lock);
+ q_hist->q_hist = NULL;
+ }
+}
+
+static void
+ocs_queue_history_add_q_info(ocs_hw_q_hist_t *q_hist, uint32_t qid, uint32_t qindex)
+{
+ if (ocs_queue_history_q_info_enabled()) {
+ /* write qid, index */
+ q_hist->q_hist[q_hist->q_hist_index] = (qid << 16) | qindex;
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ }
+}
+
+static void
+ocs_queue_history_add_timestamp(ocs_hw_q_hist_t *q_hist)
+{
+ if (ocs_queue_history_timestamp_enabled()) {
+ /* write tsc */
+ uint64_t tsc_value;
+ tsc_value = get_cyclecount();
+ q_hist->q_hist[q_hist->q_hist_index] = ((tsc_value >> 32 ) & 0xFFFFFFFF);
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ q_hist->q_hist[q_hist->q_hist_index] = (tsc_value & 0xFFFFFFFF);
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ }
+}
+
+/**
+ * @ingroup debug
+ * @brief Log work queue entry (WQE) into history array
+ *
+ * @param q_hist Pointer to the queue history object.
+ * @param entryw Work queue entry in words
+ * @param qid Queue ID
+ * @param qindex Queue index
+ *
+ * @return none
+ */
+void
+ocs_queue_history_wq(ocs_hw_q_hist_t *q_hist, uint32_t *entryw, uint32_t qid, uint32_t qindex)
+{
+ int i;
+ ocs_q_hist_ftr_t ftr;
+ uint32_t wqe_word_mask = ocs_q_hist_get_wqe_mask((sli4_generic_wqe_t *)entryw);
+
+ if (q_hist->q_hist == NULL) {
+ /* Can't save anything */
+ return;
+ }
+
+ ftr.word = 0;
+ ftr.s.type = OCS_Q_HIST_TYPE_WQE;
+ ocs_lock(&q_hist->q_hist_lock);
+ /* Capture words in reverse order since we'll be interpretting them LIFO */
+ for (i = ((sizeof(wqe_word_mask)*8) - 1); i >= 0; i--){
+ if ((wqe_word_mask >> i) & 1) {
+ q_hist->q_hist[q_hist->q_hist_index] = entryw[i];
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ }
+ }
+
+ ocs_queue_history_add_q_info(q_hist, qid, qindex);
+ ocs_queue_history_add_timestamp(q_hist);
+
+ /* write footer */
+ if (wqe_word_mask) {
+ ftr.s.mask = wqe_word_mask;
+ q_hist->q_hist[q_hist->q_hist_index] = ftr.word;
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ }
+
+ ocs_unlock(&q_hist->q_hist_lock);
+}
+
+/**
+ * @ingroup debug
+ * @brief Log misc words
+ *
+ * @param q_hist Pointer to the queue history object.
+ * @param entryw array of words
+ * @param num_words number of words in entryw
+ *
+ * @return none
+ */
+void
+ocs_queue_history_misc(ocs_hw_q_hist_t *q_hist, uint32_t *entryw, uint32_t num_words)
+{
+ int i;
+ ocs_q_hist_ftr_t ftr;
+ uint32_t mask = 0;
+
+ if (q_hist->q_hist == NULL) {
+ /* Can't save anything */
+ return;
+ }
+
+ ftr.word = 0;
+ ftr.s.type = OCS_Q_HIST_TYPE_MISC;
+ ocs_lock(&q_hist->q_hist_lock);
+ /* Capture words in reverse order since we'll be interpretting them LIFO */
+ for (i = num_words-1; i >= 0; i--) {
+ q_hist->q_hist[q_hist->q_hist_index] = entryw[i];
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ mask |= BIT(i);
+ }
+
+ ocs_queue_history_add_timestamp(q_hist);
+
+ /* write footer */
+ if (num_words) {
+ ftr.s.mask = mask;
+ q_hist->q_hist[q_hist->q_hist_index] = ftr.word;
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ }
+
+ ocs_unlock(&q_hist->q_hist_lock);
+}
+
+/**
+ * @ingroup debug
+ * @brief Log work queue completion (CQE) entry into history
+ * array
+ *
+ * @param q_hist Pointer to the queue history object.
+ * @param ctype Type of completion entry
+ * @param entryw Completion queue entry in words
+ * @param status Completion queue status
+ * @param qid Queue ID
+ * @param qindex Queue index
+ *
+ * @return none
+ */
+void
+ocs_queue_history_cqe(ocs_hw_q_hist_t *q_hist, uint8_t ctype, uint32_t *entryw, uint8_t status, uint32_t qid, uint32_t qindex)
+{
+ int i;
+ unsigned j;
+ uint32_t cqe_word_mask = 0;
+ ocs_q_hist_ftr_t ftr;
+
+ if (q_hist->q_hist == NULL) {
+ /* Can't save anything */
+ return;
+ }
+
+ ftr.word = 0;
+ for (j = 0; j < ARRAY_SIZE(ocs_q_hist_cqe_masks); j++) {
+ if (ocs_q_hist_cqe_masks[j].ctype == ctype) {
+ ftr.s.type = ocs_q_hist_cqe_masks[j].type;
+ if (status != 0) {
+ cqe_word_mask = ocs_q_hist_cqe_masks[j].mask_err;
+ } else {
+ cqe_word_mask = ocs_q_hist_cqe_masks[j].mask;
+ }
+ }
+ }
+ ocs_lock(&q_hist->q_hist_lock);
+ /* Capture words in reverse order since we'll be interpretting them LIFO */
+ for (i = ((sizeof(cqe_word_mask)*8) - 1); i >= 0; i--){
+ if ((cqe_word_mask >> i) & 1) {
+ q_hist->q_hist[q_hist->q_hist_index] = entryw[i];
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ }
+ }
+ ocs_queue_history_add_q_info(q_hist, qid, qindex);
+ ocs_queue_history_add_timestamp(q_hist);
+
+ /* write footer */
+ if (cqe_word_mask) {
+ ftr.s.mask = cqe_word_mask;
+ q_hist->q_hist[q_hist->q_hist_index] = ftr.word;
+ q_hist->q_hist_index++;
+ q_hist->q_hist_index = q_hist->q_hist_index % OCS_Q_HIST_SIZE;
+ }
+
+ ocs_unlock(&q_hist->q_hist_lock);
+}
+
+/**
+ * @brief Get previous index
+ *
+ * @param index Index from which previous index is derived.
+ */
+uint32_t
+ocs_queue_history_prev_index(uint32_t index)
+{
+ if (index == 0) {
+ return OCS_Q_HIST_SIZE - 1;
+ } else {
+ return index - 1;
+ }
+}
+
+#endif /* OCS_DEBUG_QUEUE_HISTORY */
+
+/**
+ * @brief Display service parameters
+ *
+ * <description>
+ *
+ * @param prelabel leading display label
+ * @param reqlabel display label
+ * @param dest destination 0=ocs_log, 1=textbuf
+ * @param textbuf text buffer destination (if dest==1)
+ * @param sparams pointer to service parameter
+ *
+ * @return none
+ */
+
+void
+ocs_display_sparams(const char *prelabel, const char *reqlabel, int dest, void *textbuf, void *sparams)
+{
+ char label[64];
+
+ if (sparams == NULL) {
+ return;
+ }
+
+ switch(dest) {
+ case 0:
+ if (prelabel != NULL) {
+ ocs_snprintf(label, sizeof(label), "[%s] sparam: %s", prelabel, reqlabel);
+ } else {
+ ocs_snprintf(label, sizeof(label), "sparam: %s", reqlabel);
+ }
+
+ ocs_dump32(OCS_DEBUG_ENABLE_SPARAM_DUMP, NULL, label, sparams, sizeof(fc_plogi_payload_t));
+ break;
+ case 1:
+ ocs_ddump_buffer((ocs_textbuf_t*) textbuf, reqlabel, 0, sparams, sizeof(fc_plogi_payload_t));
+ break;
+ }
+}
+
+/**
+ * @brief Calculate the T10 PI CRC guard value for a block.
+ *
+ * @param buffer Pointer to the data buffer.
+ * @param size Number of bytes.
+ * @param crc Previously-calculated CRC, or 0 for a new block.
+ *
+ * @return Returns the calculated CRC, which may be passed back in for partial blocks.
+ *
+ */
+
+uint16_t
+ocs_scsi_dif_calc_crc(const uint8_t *buffer, uint32_t size, uint16_t crc)
+{
+ return t10crc16(buffer, size, crc);
+}
+
+/**
+ * @brief Calculate the IP-checksum guard value for a block.
+ *
+ * @param addrlen array of address length pairs
+ * @param addrlen_count number of entries in the addrlen[] array
+ *
+ * Algorithm:
+ * Sum all all the 16-byte words in the block
+ * Add in the "carry", which is everything in excess of 16-bits
+ * Flip all the bits
+ *
+ * @return Returns the calculated checksum
+ */
+
+uint16_t
+ocs_scsi_dif_calc_checksum(ocs_scsi_vaddr_len_t addrlen[], uint32_t addrlen_count)
+{
+ uint32_t i, j;
+ uint16_t checksum;
+ uint32_t intermediate; /* Use an intermediate to hold more than 16 bits during calculations */
+ uint32_t count;
+ uint16_t *buffer;
+
+ intermediate = 0;
+ for (j = 0; j < addrlen_count; j++) {
+ buffer = addrlen[j].vaddr;
+ count = addrlen[j].length / 2;
+ for (i=0; i < count; i++) {
+ intermediate += buffer[i];
+ }
+ }
+
+ /* Carry is everything over 16 bits */
+ intermediate += ((intermediate & 0xffff0000) >> 16);
+
+ /* Flip all the bits */
+ intermediate = ~intermediate;
+
+ checksum = intermediate;
+
+ return checksum;
+}
+
+/**
+ * @brief Return blocksize given SCSI API DIF block size
+ *
+ * Given the DIF block size enumerated value, return the block size value. (e.g.
+ * OCS_SCSI_DIF_BLK_SIZE_512 returns 512)
+ *
+ * @param dif_info Pointer to SCSI API DIF info block
+ *
+ * @return returns block size, or 0 if SCSI API DIF blocksize is invalid
+ */
+
+uint32_t
+ocs_scsi_dif_blocksize(ocs_scsi_dif_info_t *dif_info)
+{
+ uint32_t blocksize = 0;
+
+ switch(dif_info->blk_size) {
+ case OCS_SCSI_DIF_BK_SIZE_512: blocksize = 512; break;
+ case OCS_SCSI_DIF_BK_SIZE_1024: blocksize = 1024; break;
+ case OCS_SCSI_DIF_BK_SIZE_2048: blocksize = 2048; break;
+ case OCS_SCSI_DIF_BK_SIZE_4096: blocksize = 4096; break;
+ case OCS_SCSI_DIF_BK_SIZE_520: blocksize = 520; break;
+ case OCS_SCSI_DIF_BK_SIZE_4104: blocksize = 4104; break;
+ default:
+ break;
+ }
+
+ return blocksize;
+}
+
+/**
+ * @brief Set SCSI API DIF blocksize
+ *
+ * Given a blocksize value (512, 1024, etc.), set the SCSI API DIF blocksize
+ * in the DIF info block
+ *
+ * @param dif_info Pointer to the SCSI API DIF info block
+ * @param blocksize Block size
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+
+int32_t
+ocs_scsi_dif_set_blocksize(ocs_scsi_dif_info_t *dif_info, uint32_t blocksize)
+{
+ int32_t rc = 0;
+
+ switch(blocksize) {
+ case 512: dif_info->blk_size = OCS_SCSI_DIF_BK_SIZE_512; break;
+ case 1024: dif_info->blk_size = OCS_SCSI_DIF_BK_SIZE_1024; break;
+ case 2048: dif_info->blk_size = OCS_SCSI_DIF_BK_SIZE_2048; break;
+ case 4096: dif_info->blk_size = OCS_SCSI_DIF_BK_SIZE_4096; break;
+ case 520: dif_info->blk_size = OCS_SCSI_DIF_BK_SIZE_520; break;
+ case 4104: dif_info->blk_size = OCS_SCSI_DIF_BK_SIZE_4104; break;
+ default:
+ rc = -1;
+ break;
+ }
+ return rc;
+
+}
+
+/**
+ * @brief Return memory block size given SCSI DIF API
+ *
+ * The blocksize in memory for the DIF transfer is returned, given the SCSI DIF info
+ * block and the direction of transfer.
+ *
+ * @param dif_info Pointer to DIF info block
+ * @param wiretomem Transfer direction, 1 is wire to memory, 0 is memory to wire
+ *
+ * @return Memory blocksize, or negative error value
+ *
+ * WARNING: the order of initialization of the adj[] arrays MUST match the declarations
+ * of OCS_SCSI_DIF_OPER_*
+ */
+
+int32_t
+ocs_scsi_dif_mem_blocksize(ocs_scsi_dif_info_t *dif_info, int wiretomem)
+{
+ uint32_t blocksize;
+ uint8_t wiretomem_adj[] = {
+ 0, /* OCS_SCSI_DIF_OPER_DISABLED, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW, */
+ uint8_t memtowire_adj[] = {
+ 0, /* OCS_SCSI_DIF_OPER_DISABLED, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW, */
+
+ blocksize = ocs_scsi_dif_blocksize(dif_info);
+ if (blocksize == 0) {
+ return -1;
+ }
+
+ if (wiretomem) {
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(wiretomem_adj), 0);
+ blocksize += wiretomem_adj[dif_info->dif_oper];
+ } else { /* mem to wire */
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(memtowire_adj), 0);
+ blocksize += memtowire_adj[dif_info->dif_oper];
+ }
+ return blocksize;
+}
+
+/**
+ * @brief Return wire block size given SCSI DIF API
+ *
+ * The blocksize on the wire for the DIF transfer is returned, given the SCSI DIF info
+ * block and the direction of transfer.
+ *
+ * @param dif_info Pointer to DIF info block
+ * @param wiretomem Transfer direction, 1 is wire to memory, 0 is memory to wire
+ *
+ * @return Wire blocksize or negative error value
+ *
+ * WARNING: the order of initialization of the adj[] arrays MUST match the declarations
+ * of OCS_SCSI_DIF_OPER_*
+ */
+
+int32_t
+ocs_scsi_dif_wire_blocksize(ocs_scsi_dif_info_t *dif_info, int wiretomem)
+{
+ uint32_t blocksize;
+ uint8_t wiretomem_adj[] = {
+ 0, /* OCS_SCSI_DIF_OPER_DISABLED, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW, */
+ uint8_t memtowire_adj[] = {
+ 0, /* OCS_SCSI_DIF_OPER_DISABLED, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CRC, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_NODIF, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ 0, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_SCSI_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_SCSI_DIF_OPER_IN_RAW_OUT_RAW, */
+
+
+ blocksize = ocs_scsi_dif_blocksize(dif_info);
+ if (blocksize == 0) {
+ return -1;
+ }
+
+ if (wiretomem) {
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(wiretomem_adj), 0);
+ blocksize += wiretomem_adj[dif_info->dif_oper];
+ } else { /* mem to wire */
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(memtowire_adj), 0);
+ blocksize += memtowire_adj[dif_info->dif_oper];
+ }
+
+ return blocksize;
+}
+/**
+ * @brief Return blocksize given HW API DIF block size
+ *
+ * Given the DIF block size enumerated value, return the block size value. (e.g.
+ * OCS_SCSI_DIF_BLK_SIZE_512 returns 512)
+ *
+ * @param dif_info Pointer to HW API DIF info block
+ *
+ * @return returns block size, or 0 if HW API DIF blocksize is invalid
+ */
+
+uint32_t
+ocs_hw_dif_blocksize(ocs_hw_dif_info_t *dif_info)
+{
+ uint32_t blocksize = 0;
+
+ switch(dif_info->blk_size) {
+ case OCS_HW_DIF_BK_SIZE_512: blocksize = 512; break;
+ case OCS_HW_DIF_BK_SIZE_1024: blocksize = 1024; break;
+ case OCS_HW_DIF_BK_SIZE_2048: blocksize = 2048; break;
+ case OCS_HW_DIF_BK_SIZE_4096: blocksize = 4096; break;
+ case OCS_HW_DIF_BK_SIZE_520: blocksize = 520; break;
+ case OCS_HW_DIF_BK_SIZE_4104: blocksize = 4104; break;
+ default:
+ break;
+ }
+
+ return blocksize;
+}
+
+/**
+ * @brief Return memory block size given HW DIF API
+ *
+ * The blocksize in memory for the DIF transfer is returned, given the HW DIF info
+ * block and the direction of transfer.
+ *
+ * @param dif_info Pointer to DIF info block
+ * @param wiretomem Transfer direction, 1 is wire to memory, 0 is memory to wire
+ *
+ * @return Memory blocksize, or negative error value
+ *
+ * WARNING: the order of initialization of the adj[] arrays MUST match the declarations
+ * of OCS_HW_DIF_OPER_*
+ */
+
+int32_t
+ocs_hw_dif_mem_blocksize(ocs_hw_dif_info_t *dif_info, int wiretomem)
+{
+ uint32_t blocksize;
+ uint8_t wiretomem_adj[] = {
+ 0, /* OCS_HW_DIF_OPER_DISABLED, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CRC, */
+ 0, /* OCS_HW_DIF_OPER_IN_CRC_OUT_NODIF, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ 0, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_HW_DIF_OPER_IN_RAW_OUT_RAW, */
+ uint8_t memtowire_adj[] = {
+ 0, /* OCS_HW_DIF_OPER_DISABLED, */
+ 0, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CRC, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_NODIF, */
+ 0, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_HW_DIF_OPER_IN_RAW_OUT_RAW, */
+
+ blocksize = ocs_hw_dif_blocksize(dif_info);
+ if (blocksize == 0) {
+ return -1;
+ }
+
+ if (wiretomem) {
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(wiretomem_adj), 0);
+ blocksize += wiretomem_adj[dif_info->dif_oper];
+ } else { /* mem to wire */
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(memtowire_adj), 0);
+ blocksize += memtowire_adj[dif_info->dif_oper];
+ }
+ return blocksize;
+}
+
+/**
+ * @brief Return wire block size given HW DIF API
+ *
+ * The blocksize on the wire for the DIF transfer is returned, given the HW DIF info
+ * block and the direction of transfer.
+ *
+ * @param dif_info Pointer to DIF info block
+ * @param wiretomem Transfer direction, 1 is wire to memory, 0 is memory to wire
+ *
+ * @return Wire blocksize or negative error value
+ *
+ * WARNING: the order of initialization of the adj[] arrays MUST match the declarations
+ * of OCS_HW_DIF_OPER_*
+ */
+
+int32_t
+ocs_hw_dif_wire_blocksize(ocs_hw_dif_info_t *dif_info, int wiretomem)
+{
+ uint32_t blocksize;
+ uint8_t wiretomem_adj[] = {
+ 0, /* OCS_HW_DIF_OPER_DISABLED, */
+ 0, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CRC, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_NODIF, */
+ 0, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_HW_DIF_OPER_IN_RAW_OUT_RAW, */
+ uint8_t memtowire_adj[] = {
+ 0, /* OCS_HW_DIF_OPER_DISABLED, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CRC, */
+ 0, /* OCS_HW_DIF_OPER_IN_CRC_OUT_NODIF, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_NODIF_OUT_CHKSUM, */
+ 0, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_NODIF, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CRC, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CRC_OUT_CHKSUM, */
+ DIF_SIZE, /* OCS_HW_DIF_OPER_IN_CHKSUM_OUT_CRC, */
+ DIF_SIZE}; /* OCS_HW_DIF_OPER_IN_RAW_OUT_RAW, */
+
+
+ blocksize = ocs_hw_dif_blocksize(dif_info);
+ if (blocksize == 0) {
+ return -1;
+ }
+
+ if (wiretomem) {
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(wiretomem_adj), 0);
+ blocksize += wiretomem_adj[dif_info->dif_oper];
+ } else { /* mem to wire */
+ ocs_assert(dif_info->dif_oper < ARRAY_SIZE(memtowire_adj), 0);
+ blocksize += memtowire_adj[dif_info->dif_oper];
+ }
+
+ return blocksize;
+}
+
+static int32_t ocs_segment_remaining(ocs_textbuf_segment_t *segment);
+static ocs_textbuf_segment_t *ocs_textbuf_segment_alloc(ocs_textbuf_t *textbuf);
+static void ocs_textbuf_segment_free(ocs_t *ocs, ocs_textbuf_segment_t *segment);
+static ocs_textbuf_segment_t *ocs_textbuf_get_segment(ocs_textbuf_t *textbuf, uint32_t idx);
+
+uint8_t *
+ocs_textbuf_get_buffer(ocs_textbuf_t *textbuf)
+{
+ return ocs_textbuf_ext_get_buffer(textbuf, 0);
+}
+
+int32_t
+ocs_textbuf_get_length(ocs_textbuf_t *textbuf)
+{
+ return ocs_textbuf_ext_get_length(textbuf, 0);
+}
+
+int32_t
+ocs_textbuf_get_written(ocs_textbuf_t *textbuf)
+{
+ uint32_t idx;
+ int32_t n;
+ int32_t total = 0;
+
+ for (idx = 0; (n = ocs_textbuf_ext_get_written(textbuf, idx)) >= 0; idx++) {
+ total += n;
+ }
+ return total;
+}
+
+uint8_t *ocs_textbuf_ext_get_buffer(ocs_textbuf_t *textbuf, uint32_t idx)
+{
+ ocs_textbuf_segment_t *segment = ocs_textbuf_get_segment(textbuf, idx);
+ if (segment == NULL) {
+ return NULL;
+ }
+ return segment->buffer;
+}
+
+int32_t ocs_textbuf_ext_get_length(ocs_textbuf_t *textbuf, uint32_t idx)
+{
+ ocs_textbuf_segment_t *segment = ocs_textbuf_get_segment(textbuf, idx);
+ if (segment == NULL) {
+ return -1;
+ }
+ return segment->buffer_length;
+}
+
+int32_t ocs_textbuf_ext_get_written(ocs_textbuf_t *textbuf, uint32_t idx)
+{
+ ocs_textbuf_segment_t *segment = ocs_textbuf_get_segment(textbuf, idx);
+ if (segment == NULL) {
+ return -1;
+ }
+ return segment->buffer_written;
+}
+
+uint32_t
+ocs_textbuf_initialized(ocs_textbuf_t *textbuf)
+{
+ return (textbuf->ocs != NULL);
+}
+
+int32_t
+ocs_textbuf_alloc(ocs_t *ocs, ocs_textbuf_t *textbuf, uint32_t length)
+{
+ ocs_memset(textbuf, 0, sizeof(*textbuf));
+
+ textbuf->ocs = ocs;
+ ocs_list_init(&textbuf->segment_list, ocs_textbuf_segment_t, link);
+
+ if (length > OCS_TEXTBUF_MAX_ALLOC_LEN) {
+ textbuf->allocation_length = OCS_TEXTBUF_MAX_ALLOC_LEN;
+ } else {
+ textbuf->allocation_length = length;
+ }
+
+ /* mark as extendable */
+ textbuf->extendable = TRUE;
+
+ /* save maximum allocation length */
+ textbuf->max_allocation_length = length;
+
+ /* Add first segment */
+ return (ocs_textbuf_segment_alloc(textbuf) == NULL) ? -1 : 0;
+}
+
+static ocs_textbuf_segment_t *
+ocs_textbuf_segment_alloc(ocs_textbuf_t *textbuf)
+{
+ ocs_textbuf_segment_t *segment = NULL;
+
+ if (textbuf->extendable) {
+ segment = ocs_malloc(textbuf->ocs, sizeof(*segment), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (segment != NULL) {
+ segment->buffer = ocs_malloc(textbuf->ocs, textbuf->allocation_length, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (segment->buffer != NULL) {
+ segment->buffer_length = textbuf->allocation_length;
+ segment->buffer_written = 0;
+ ocs_list_add_tail(&textbuf->segment_list, segment);
+ textbuf->total_allocation_length += textbuf->allocation_length;
+
+ /* If we've allocated our limit, then mark as not extendable */
+ if (textbuf->total_allocation_length >= textbuf->max_allocation_length) {
+ textbuf->extendable = 0;
+ }
+
+ } else {
+ ocs_textbuf_segment_free(textbuf->ocs, segment);
+ segment = NULL;
+ }
+ }
+ }
+ return segment;
+}
+
+static void
+ocs_textbuf_segment_free(ocs_t *ocs, ocs_textbuf_segment_t *segment)
+{
+ if (segment) {
+ if (segment->buffer && !segment->user_allocated) {
+ ocs_free(ocs, segment->buffer, segment->buffer_length);
+ }
+ ocs_free(ocs, segment, sizeof(*segment));
+ }
+}
+
+static ocs_textbuf_segment_t *
+ocs_textbuf_get_segment(ocs_textbuf_t *textbuf, uint32_t idx)
+{
+ uint32_t i;
+ ocs_textbuf_segment_t *segment;
+
+ if (ocs_textbuf_initialized(textbuf)) {
+ i = 0;
+ ocs_list_foreach(&textbuf->segment_list, segment) {
+ if (i == idx) {
+ return segment;
+ }
+ i++;
+ }
+ }
+ return NULL;
+}
+
+int32_t
+ocs_textbuf_init(ocs_t *ocs, ocs_textbuf_t *textbuf, void *buffer, uint32_t length)
+{
+ int32_t rc = -1;
+ ocs_textbuf_segment_t *segment;
+
+ ocs_memset(textbuf, 0, sizeof(*textbuf));
+
+ textbuf->ocs = ocs;
+ ocs_list_init(&textbuf->segment_list, ocs_textbuf_segment_t, link);
+ segment = ocs_malloc(ocs, sizeof(*segment), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (segment) {
+ segment->buffer = buffer;
+ segment->buffer_length = length;
+ segment->buffer_written = 0;
+ segment->user_allocated = 1;
+ ocs_list_add_tail(&textbuf->segment_list, segment);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+void
+ocs_textbuf_free(ocs_t *ocs, ocs_textbuf_t *textbuf)
+{
+ ocs_textbuf_segment_t *segment;
+ ocs_textbuf_segment_t *n;
+
+ if (ocs_textbuf_initialized(textbuf)) {
+ ocs_list_foreach_safe(&textbuf->segment_list, segment, n) {
+ ocs_list_remove(&textbuf->segment_list, segment);
+ ocs_textbuf_segment_free(ocs, segment);
+ }
+
+ ocs_memset(textbuf, 0, sizeof(*textbuf));
+ }
+}
+
+void
+ocs_textbuf_printf(ocs_textbuf_t *textbuf, const char *fmt, ...)
+{
+ va_list ap;
+
+ if (ocs_textbuf_initialized(textbuf)) {
+ va_start(ap, fmt);
+ ocs_textbuf_vprintf(textbuf, fmt, ap);
+ va_end(ap);
+ }
+}
+
+void
+ocs_textbuf_vprintf(ocs_textbuf_t *textbuf, const char *fmt, va_list ap)
+{
+ int avail;
+ int written;
+ ocs_textbuf_segment_t *segment;
+ va_list save_ap;
+
+ if (!ocs_textbuf_initialized(textbuf)) {
+ return;
+ }
+
+ va_copy(save_ap, ap);
+
+ /* fetch last segment */
+ segment = ocs_list_get_tail(&textbuf->segment_list);
+
+ avail = ocs_segment_remaining(segment);
+ if (avail == 0) {
+ if ((segment = ocs_textbuf_segment_alloc(textbuf)) == NULL) {
+ goto out;
+ }
+ avail = ocs_segment_remaining(segment);
+ }
+
+ written = ocs_vsnprintf(segment->buffer + segment->buffer_written, avail, fmt, ap);
+
+ /* See if data was truncated */
+ if (written >= avail) {
+
+ written = avail;
+
+ if (textbuf->extendable) {
+
+ /* revert the partially written data */
+ *(segment->buffer + segment->buffer_written) = 0;
+
+ /* Allocate a new segment */
+ if ((segment = ocs_textbuf_segment_alloc(textbuf)) == NULL) {
+ ocs_log_err(textbuf->ocs, "alloc segment failed\n");
+ goto out;
+ }
+ avail = ocs_segment_remaining(segment);
+
+ /* Retry the write */
+ written = ocs_vsnprintf(segment->buffer + segment->buffer_written, avail, fmt, save_ap);
+ }
+ }
+ segment->buffer_written += written;
+
+out:
+ va_end(save_ap);
+}
+
+void
+ocs_textbuf_putc(ocs_textbuf_t *textbuf, uint8_t c)
+{
+ ocs_textbuf_segment_t *segment;
+
+ if (ocs_textbuf_initialized(textbuf)) {
+ segment = ocs_list_get_tail(&textbuf->segment_list);
+
+ if (ocs_segment_remaining(segment)) {
+ *(segment->buffer + segment->buffer_written++) = c;
+ }
+ if (ocs_segment_remaining(segment) == 0) {
+ ocs_textbuf_segment_alloc(textbuf);
+ }
+ }
+}
+
+void
+ocs_textbuf_puts(ocs_textbuf_t *textbuf, char *s)
+{
+ if (ocs_textbuf_initialized(textbuf)) {
+ while(*s) {
+ ocs_textbuf_putc(textbuf, *s++);
+ }
+ }
+}
+
+void
+ocs_textbuf_buffer(ocs_textbuf_t *textbuf, uint8_t *buffer, uint32_t buffer_length)
+{
+ char *s;
+
+ if (!ocs_textbuf_initialized(textbuf)) {
+ return;
+ }
+
+ s = (char*) buffer;
+ while(*s) {
+
+ /*
+ * XML escapes
+ *
+ * " &quot;
+ * ' &apos;
+ * < &lt;
+ * > &gt;
+ * & &amp;
+ */
+
+ switch(*s) {
+ case '"': ocs_textbuf_puts(textbuf, "&quot;"); break;
+ case '\'': ocs_textbuf_puts(textbuf, "&apos;"); break;
+ case '<': ocs_textbuf_puts(textbuf, "&lt;"); break;
+ case '>': ocs_textbuf_puts(textbuf, "&gt;"); break;
+ case '&': ocs_textbuf_puts(textbuf, "&amp;"); break;
+ default: ocs_textbuf_putc(textbuf, *s); break;
+ }
+ s++;
+ }
+
+}
+
+void
+ocs_textbuf_copy(ocs_textbuf_t *textbuf, uint8_t *buffer, uint32_t buffer_length)
+{
+ char *s;
+
+ if (!ocs_textbuf_initialized(textbuf)) {
+ return;
+ }
+
+ s = (char*) buffer;
+ while(*s) {
+ ocs_textbuf_putc(textbuf, *s++);
+ }
+
+}
+
+int32_t
+ocs_textbuf_remaining(ocs_textbuf_t *textbuf)
+{
+ if (ocs_textbuf_initialized(textbuf)) {
+ return ocs_segment_remaining(ocs_list_get_head(&textbuf->segment_list));
+ } else {
+ return 0;
+ }
+}
+
+static int32_t
+ocs_segment_remaining(ocs_textbuf_segment_t *segment)
+{
+ return segment->buffer_length - segment->buffer_written;
+}
+
+void
+ocs_textbuf_reset(ocs_textbuf_t *textbuf)
+{
+ uint32_t i = 0;
+ ocs_textbuf_segment_t *segment;
+ ocs_textbuf_segment_t *n;
+
+ if (ocs_textbuf_initialized(textbuf)) {
+ /* zero written on the first segment, free the rest */
+ ocs_list_foreach_safe(&textbuf->segment_list, segment, n) {
+ if (i++ == 0) {
+ segment->buffer_written = 0;
+ } else {
+ ocs_list_remove(&textbuf->segment_list, segment);
+ ocs_textbuf_segment_free(textbuf->ocs, segment);
+ }
+ }
+ }
+}
+
+/**
+ * @brief Sparse Vector API.
+ *
+ * This is a trimmed down sparse vector implementation tuned to the problem of
+ * 24-bit FC_IDs. In this case, the 24-bit index value is broken down in three
+ * 8-bit values. These values are used to index up to three 256 element arrays.
+ * Arrays are allocated, only when needed. @n @n
+ * The lookup can complete in constant time (3 indexed array references). @n @n
+ * A typical use case would be that the fabric/directory FC_IDs would cause two rows to be
+ * allocated, and the fabric assigned remote nodes would cause two rows to be allocated, with
+ * the root row always allocated. This gives five rows of 256 x sizeof(void*),
+ * resulting in 10k.
+ */
+
+
+
+/**
+ * @ingroup spv
+ * @brief Allocate a new sparse vector row.
+ *
+ * @param os OS handle
+ * @param rowcount Count of rows.
+ *
+ * @par Description
+ * A new sparse vector row is allocated.
+ *
+ * @param rowcount Number of elements in a row.
+ *
+ * @return Returns the pointer to a row.
+ */
+static void
+**spv_new_row(ocs_os_handle_t os, uint32_t rowcount)
+{
+ return ocs_malloc(os, sizeof(void*) * rowcount, OCS_M_ZERO | OCS_M_NOWAIT);
+}
+
+
+
+/**
+ * @ingroup spv
+ * @brief Delete row recursively.
+ *
+ * @par Description
+ * This function recursively deletes the rows in this sparse vector
+ *
+ * @param os OS handle
+ * @param a Pointer to the row.
+ * @param n Number of elements in the row.
+ * @param depth Depth of deleting.
+ *
+ * @return None.
+ */
+static void
+_spv_del(ocs_os_handle_t os, void **a, uint32_t n, uint32_t depth)
+{
+ if (a) {
+ if (depth) {
+ uint32_t i;
+
+ for (i = 0; i < n; i ++) {
+ _spv_del(os, a[i], n, depth-1);
+ }
+
+ ocs_free(os, a, SPV_ROWLEN*sizeof(*a));
+ }
+ }
+}
+
+/**
+ * @ingroup spv
+ * @brief Delete a sparse vector.
+ *
+ * @par Description
+ * The sparse vector is freed.
+ *
+ * @param spv Pointer to the sparse vector object.
+ */
+void
+spv_del(sparse_vector_t spv)
+{
+ if (spv) {
+ _spv_del(spv->os, spv->array, SPV_ROWLEN, SPV_DIM);
+ ocs_free(spv->os, spv, sizeof(*spv));
+ }
+}
+
+/**
+ * @ingroup spv
+ * @brief Instantiate a new sparse vector object.
+ *
+ * @par Description
+ * A new sparse vector is allocated.
+ *
+ * @param os OS handle
+ *
+ * @return Returns the pointer to the sparse vector, or NULL.
+ */
+sparse_vector_t
+spv_new(ocs_os_handle_t os)
+{
+ sparse_vector_t spv;
+ uint32_t i;
+
+ spv = ocs_malloc(os, sizeof(*spv), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (!spv) {
+ return NULL;
+ }
+
+ spv->os = os;
+ spv->max_idx = 1;
+ for (i = 0; i < SPV_DIM; i ++) {
+ spv->max_idx *= SPV_ROWLEN;
+ }
+
+ return spv;
+}
+
+/**
+ * @ingroup spv
+ * @brief Return the address of a cell.
+ *
+ * @par Description
+ * Returns the address of a cell, allocates sparse rows as needed if the
+ * alloc_new_rows parameter is set.
+ *
+ * @param sv Pointer to the sparse vector.
+ * @param idx Index of which to return the address.
+ * @param alloc_new_rows If TRUE, then new rows may be allocated to set values,
+ * Set to FALSE for retrieving values.
+ *
+ * @return Returns the pointer to the cell, or NULL.
+ */
+static void
+*spv_new_cell(sparse_vector_t sv, uint32_t idx, uint8_t alloc_new_rows)
+{
+ uint32_t a = (idx >> 16) & 0xff;
+ uint32_t b = (idx >> 8) & 0xff;
+ uint32_t c = (idx >> 0) & 0xff;
+ void **p;
+
+ if (idx >= sv->max_idx) {
+ return NULL;
+ }
+
+ if (sv->array == NULL) {
+ sv->array = (alloc_new_rows ? spv_new_row(sv->os, SPV_ROWLEN) : NULL);
+ if (sv->array == NULL) {
+ return NULL;
+ }
+ }
+ p = sv->array;
+ if (p[a] == NULL) {
+ p[a] = (alloc_new_rows ? spv_new_row(sv->os, SPV_ROWLEN) : NULL);
+ if (p[a] == NULL) {
+ return NULL;
+ }
+ }
+ p = p[a];
+ if (p[b] == NULL) {
+ p[b] = (alloc_new_rows ? spv_new_row(sv->os, SPV_ROWLEN) : NULL);
+ if (p[b] == NULL) {
+ return NULL;
+ }
+ }
+ p = p[b];
+
+ return &p[c];
+}
+
+/**
+ * @ingroup spv
+ * @brief Set the sparse vector cell value.
+ *
+ * @par Description
+ * Sets the sparse vector at @c idx to @c value.
+ *
+ * @param sv Pointer to the sparse vector.
+ * @param idx Index of which to store.
+ * @param value Value to store.
+ *
+ * @return None.
+ */
+void
+spv_set(sparse_vector_t sv, uint32_t idx, void *value)
+{
+ void **ref = spv_new_cell(sv, idx, TRUE);
+ if (ref) {
+ *ref = value;
+ }
+}
+
+/**
+ * @ingroup spv
+ * @brief Return the sparse vector cell value.
+ *
+ * @par Description
+ * Returns the value at @c idx.
+ *
+ * @param sv Pointer to the sparse vector.
+ * @param idx Index of which to return the value.
+ *
+ * @return Returns the cell value, or NULL.
+ */
+void
+*spv_get(sparse_vector_t sv, uint32_t idx)
+{
+ void **ref = spv_new_cell(sv, idx, FALSE);
+ if (ref) {
+ return *ref;
+ }
+ return NULL;
+}
+
+/*****************************************************************/
+/* */
+/* CRC LOOKUP TABLE */
+/* ================ */
+/* The following CRC lookup table was generated automagically */
+/* by the Rocksoft^tm Model CRC Algorithm Table Generation */
+/* Program V1.0 using the following model parameters: */
+/* */
+/* Width : 2 bytes. */
+/* Poly : 0x8BB7 */
+/* Reverse : FALSE. */
+/* */
+/* For more information on the Rocksoft^tm Model CRC Algorithm, */
+/* see the document titled "A Painless Guide to CRC Error */
+/* Detection Algorithms" by Ross Williams */
+/* (ross@guest.adelaide.edu.au.). This document is likely to be */
+/* in the FTP archive "ftp.adelaide.edu.au/pub/rocksoft". */
+/* */
+/*****************************************************************/
+/*
+ * Emulex Inc, changes:
+ * - minor syntax changes for successful compilation with contemporary
+ * C compilers, and OCS SDK API
+ * - crctable[] generated using Rocksoft public domain code
+ *
+ * Used in the Emulex SDK, the generated file crctable.out is cut and pasted into
+ * applicable SDK sources.
+ */
+
+
+static unsigned short crctable[256] =
+{
+ 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
+ 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
+ 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
+ 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
+ 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
+ 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
+ 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
+ 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
+ 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
+ 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
+ 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
+ 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
+ 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
+ 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
+ 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
+ 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
+ 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
+ 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
+ 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
+ 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
+ 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
+ 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
+ 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
+ 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
+ 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
+ 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
+ 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
+ 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
+ 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
+ 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
+ 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
+ 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
+};
+
+/*****************************************************************/
+/* End of CRC Lookup Table */
+/*****************************************************************/
+
+/**
+ * @brief Calculate the T10 PI CRC guard value for a block.
+ *
+ * Code based on Rocksoft's public domain CRC code, refer to
+ * http://www.ross.net/crc/download/crc_v3.txt. Minimally altered
+ * to work with the ocs_dif API.
+ *
+ * @param blk_adr Pointer to the data buffer.
+ * @param blk_len Number of bytes.
+ * @param crc Previously-calculated CRC, or crcseed for a new block.
+ *
+ * @return Returns the calculated CRC, which may be passed back in for partial blocks.
+ *
+ */
+
+unsigned short
+t10crc16(const unsigned char *blk_adr, unsigned long blk_len, unsigned short crc)
+{
+ if (blk_len > 0) {
+ while (blk_len--) {
+ crc = crctable[((crc>>8) ^ *blk_adr++) & 0xFFL] ^ (crc << 8);
+ }
+ }
+ return crc;
+}
+
+struct ocs_ramlog_s {
+ uint32_t initialized;
+ uint32_t textbuf_count;
+ uint32_t textbuf_base;
+ ocs_textbuf_t *textbufs;
+ uint32_t cur_textbuf_idx;
+ ocs_textbuf_t *cur_textbuf;
+ ocs_lock_t lock;
+};
+
+static uint32_t ocs_ramlog_next_idx(ocs_ramlog_t *ramlog, uint32_t idx);
+
+/**
+ * @brief Allocate a ramlog buffer.
+ *
+ * Initialize a RAM logging buffer with text buffers totalling buffer_len.
+ *
+ * @param ocs Pointer to driver structure.
+ * @param buffer_len Total length of RAM log buffers.
+ * @param buffer_count Number of text buffers to allocate (totalling buffer-len).
+ *
+ * @return Returns pointer to ocs_ramlog_t instance, or NULL.
+ */
+ocs_ramlog_t *
+ocs_ramlog_init(ocs_t *ocs, uint32_t buffer_len, uint32_t buffer_count)
+{
+ uint32_t i;
+ uint32_t rc;
+ ocs_ramlog_t *ramlog;
+
+ ramlog = ocs_malloc(ocs, sizeof(*ramlog), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (ramlog == NULL) {
+ ocs_log_err(ocs, "ocs_malloc ramlog failed\n");
+ return NULL;
+ }
+
+ ramlog->textbuf_count = buffer_count;
+
+ ramlog->textbufs = ocs_malloc(ocs, sizeof(*ramlog->textbufs)*buffer_count, OCS_M_ZERO | OCS_M_NOWAIT);
+ if (ramlog->textbufs == NULL) {
+ ocs_log_err(ocs, "ocs_malloc textbufs failed\n");
+ ocs_ramlog_free(ocs, ramlog);
+ return NULL;
+ }
+
+ for (i = 0; i < buffer_count; i ++) {
+ rc = ocs_textbuf_alloc(ocs, &ramlog->textbufs[i], buffer_len);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_textbuf_alloc failed\n");
+ ocs_ramlog_free(ocs, ramlog);
+ return NULL;
+ }
+ }
+
+ ramlog->cur_textbuf_idx = 0;
+ ramlog->textbuf_base = 1;
+ ramlog->cur_textbuf = &ramlog->textbufs[0];
+ ramlog->initialized = TRUE;
+ ocs_lock_init(ocs, &ramlog->lock, "ramlog_lock[%d]", ocs_instance(ocs));
+ return ramlog;
+}
+
+/**
+ * @brief Free a ramlog buffer.
+ *
+ * A previously allocated RAM logging buffer is freed.
+ *
+ * @param ocs Pointer to driver structure.
+ * @param ramlog Pointer to RAM logging buffer structure.
+ *
+ * @return None.
+ */
+
+void
+ocs_ramlog_free(ocs_t *ocs, ocs_ramlog_t *ramlog)
+{
+ uint32_t i;
+
+ if (ramlog != NULL) {
+ ocs_lock_free(&ramlog->lock);
+ if (ramlog->textbufs) {
+ for (i = 0; i < ramlog->textbuf_count; i ++) {
+ ocs_textbuf_free(ocs, &ramlog->textbufs[i]);
+ }
+
+ ocs_free(ocs, ramlog->textbufs, ramlog->textbuf_count*sizeof(*ramlog->textbufs));
+ ramlog->textbufs = NULL;
+ }
+ ocs_free(ocs, ramlog, sizeof(*ramlog));
+ }
+}
+
+/**
+ * @brief Clear a ramlog buffer.
+ *
+ * The text in the start of day and/or recent ramlog text buffers is cleared.
+ *
+ * @param ocs Pointer to driver structure.
+ * @param ramlog Pointer to RAM logging buffer structure.
+ * @param clear_start_of_day Clear the start of day (driver init) portion of the ramlog.
+ * @param clear_recent Clear the recent messages portion of the ramlog.
+ *
+ * @return None.
+ */
+
+void
+ocs_ramlog_clear(ocs_t *ocs, ocs_ramlog_t *ramlog, int clear_start_of_day, int clear_recent)
+{
+ uint32_t i;
+
+ if (clear_recent) {
+ for (i = ramlog->textbuf_base; i < ramlog->textbuf_count; i ++) {
+ ocs_textbuf_reset(&ramlog->textbufs[i]);
+ }
+ ramlog->cur_textbuf_idx = 1;
+ }
+ if (clear_start_of_day && ramlog->textbuf_base) {
+ ocs_textbuf_reset(&ramlog->textbufs[0]);
+ /* Set textbuf_base to 0, so that all buffers are available for
+ * recent logs
+ */
+ ramlog->textbuf_base = 0;
+ }
+}
+
+/**
+ * @brief Append formatted printf data to a ramlog buffer.
+ *
+ * Formatted data is appended to a RAM logging buffer.
+ *
+ * @param os Pointer to driver structure.
+ * @param fmt Pointer to printf style format specifier.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+int32_t
+ocs_ramlog_printf(void *os, const char *fmt, ...)
+{
+ ocs_t *ocs = os;
+ va_list ap;
+ int32_t res;
+
+ if (ocs == NULL || ocs->ramlog == NULL) {
+ return -1;
+ }
+
+ va_start(ap, fmt);
+ res = ocs_ramlog_vprintf(ocs->ramlog, fmt, ap);
+ va_end(ap);
+
+ return res;
+}
+
+/**
+ * @brief Append formatted text to a ramlog using variable arguments.
+ *
+ * Formatted data is appended to the RAM logging buffer, using variable arguments.
+ *
+ * @param ramlog Pointer to RAM logging buffer.
+ * @param fmt Pointer to printf style formatting string.
+ * @param ap Variable argument pointer.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+int32_t
+ocs_ramlog_vprintf(ocs_ramlog_t *ramlog, const char *fmt, va_list ap)
+{
+ if (ramlog == NULL || !ramlog->initialized) {
+ return -1;
+ }
+
+ /* check the current text buffer, if it is almost full (less than 120 characaters), then
+ * roll to the next one.
+ */
+ ocs_lock(&ramlog->lock);
+ if (ocs_textbuf_remaining(ramlog->cur_textbuf) < 120) {
+ ramlog->cur_textbuf_idx = ocs_ramlog_next_idx(ramlog, ramlog->cur_textbuf_idx);
+ ramlog->cur_textbuf = &ramlog->textbufs[ramlog->cur_textbuf_idx];
+ ocs_textbuf_reset(ramlog->cur_textbuf);
+ }
+
+ ocs_textbuf_vprintf(ramlog->cur_textbuf, fmt, ap);
+ ocs_unlock(&ramlog->lock);
+
+ return 0;
+}
+
+/**
+ * @brief Return next ramlog buffer index.
+ *
+ * Given a RAM logging buffer index, return the next index.
+ *
+ * @param ramlog Pointer to RAM logging buffer.
+ * @param idx Index value.
+ *
+ * @return Returns next index value.
+ */
+
+static uint32_t
+ocs_ramlog_next_idx(ocs_ramlog_t *ramlog, uint32_t idx)
+{
+ idx = idx + 1;
+
+ if (idx >= ramlog->textbuf_count) {
+ idx = ramlog->textbuf_base;
+ }
+
+ return idx;
+}
+
+/**
+ * @brief Perform ramlog buffer driver dump.
+ *
+ * The RAM logging buffer is appended to the driver dump data.
+ *
+ * @param textbuf Pointer to the driver dump text buffer.
+ * @param ramlog Pointer to the RAM logging buffer.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+int32_t
+ocs_ddump_ramlog(ocs_textbuf_t *textbuf, ocs_ramlog_t *ramlog)
+{
+ uint32_t i;
+ ocs_textbuf_t *rltextbuf;
+ int idx;
+
+ if ((ramlog == NULL) || (ramlog->textbufs == NULL)) {
+ return -1;
+ }
+
+ ocs_ddump_section(textbuf, "driver-log", 0);
+
+ /* Dump the start of day buffer */
+ ocs_ddump_section(textbuf, "startofday", 0);
+ /* If textbuf_base is 0, then all buffers are used for recent */
+ if (ramlog->textbuf_base) {
+ rltextbuf = &ramlog->textbufs[0];
+ ocs_textbuf_buffer(textbuf, ocs_textbuf_get_buffer(rltextbuf), ocs_textbuf_get_written(rltextbuf));
+ }
+ ocs_ddump_endsection(textbuf, "startofday", 0);
+
+ /* Dump the most recent buffers */
+ ocs_ddump_section(textbuf, "recent", 0);
+
+ /* start with the next textbuf */
+ idx = ocs_ramlog_next_idx(ramlog, ramlog->textbuf_count);
+
+ for (i = ramlog->textbuf_base; i < ramlog->textbuf_count; i ++) {
+ rltextbuf = &ramlog->textbufs[idx];
+ ocs_textbuf_buffer(textbuf, ocs_textbuf_get_buffer(rltextbuf), ocs_textbuf_get_written(rltextbuf));
+ idx = ocs_ramlog_next_idx(ramlog, idx);
+ }
+ ocs_ddump_endsection(textbuf, "recent", 0);
+ ocs_ddump_endsection(textbuf, "driver-log", 0);
+
+ return 0;
+}
+
+struct ocs_pool_s {
+ ocs_os_handle_t os;
+ ocs_array_t *a;
+ ocs_list_t freelist;
+ uint32_t use_lock:1;
+ ocs_lock_t lock;
+};
+
+typedef struct {
+ ocs_list_link_t link;
+} pool_hdr_t;
+
+
+/**
+ * @brief Allocate a memory pool.
+ *
+ * A memory pool of given size and item count is allocated.
+ *
+ * @param os OS handle.
+ * @param size Size in bytes of item.
+ * @param count Number of items in a memory pool.
+ * @param use_lock TRUE to enable locking of pool.
+ *
+ * @return Returns pointer to allocated memory pool, or NULL.
+ */
+ocs_pool_t *
+ocs_pool_alloc(ocs_os_handle_t os, uint32_t size, uint32_t count, uint32_t use_lock)
+{
+ ocs_pool_t *pool;
+ uint32_t i;
+
+ pool = ocs_malloc(os, sizeof(*pool), OCS_M_ZERO | OCS_M_NOWAIT);
+ if (pool == NULL) {
+ return NULL;
+ }
+
+ pool->os = os;
+ pool->use_lock = use_lock;
+
+ /* Allocate an array where each array item is the size of a pool_hdr_t plus
+ * the requested memory item size (size)
+ */
+ pool->a = ocs_array_alloc(os, size + sizeof(pool_hdr_t), count);
+ if (pool->a == NULL) {
+ ocs_pool_free(pool);
+ return NULL;
+ }
+
+ ocs_list_init(&pool->freelist, pool_hdr_t, link);
+ for (i = 0; i < count; i++) {
+ ocs_list_add_tail(&pool->freelist, ocs_array_get(pool->a, i));
+ }
+
+ if (pool->use_lock) {
+ ocs_lock_init(os, &pool->lock, "ocs_pool:%p", pool);
+ }
+
+ return pool;
+}
+
+/**
+ * @brief Reset a memory pool.
+ *
+ * Place all pool elements on the free list, and zero them.
+ *
+ * @param pool Pointer to the pool object.
+ *
+ * @return None.
+ */
+void
+ocs_pool_reset(ocs_pool_t *pool)
+{
+ uint32_t i;
+ uint32_t count = ocs_array_get_count(pool->a);
+ uint32_t size = ocs_array_get_size(pool->a);
+
+ if (pool->use_lock) {
+ ocs_lock(&pool->lock);
+ }
+
+ /*
+ * Remove all the entries from the free list, otherwise we will
+ * encountered linked list asserts when they are re-added.
+ */
+ while (!ocs_list_empty(&pool->freelist)) {
+ ocs_list_remove_head(&pool->freelist);
+ }
+
+ /* Reset the free list */
+ ocs_list_init(&pool->freelist, pool_hdr_t, link);
+
+ /* Return all elements to the free list and zero the elements */
+ for (i = 0; i < count; i++) {
+ ocs_memset(ocs_pool_get_instance(pool, i), 0, size - sizeof(pool_hdr_t));
+ ocs_list_add_tail(&pool->freelist, ocs_array_get(pool->a, i));
+ }
+ if (pool->use_lock) {
+ ocs_unlock(&pool->lock);
+ }
+
+}
+
+/**
+ * @brief Free a previously allocated memory pool.
+ *
+ * The memory pool is freed.
+ *
+ * @param pool Pointer to memory pool.
+ *
+ * @return None.
+ */
+void
+ocs_pool_free(ocs_pool_t *pool)
+{
+ if (pool != NULL) {
+ if (pool->a != NULL) {
+ ocs_array_free(pool->a);
+ }
+ if (pool->use_lock) {
+ ocs_lock_free(&pool->lock);
+ }
+ ocs_free(pool->os, pool, sizeof(*pool));
+ }
+}
+
+/**
+ * @brief Allocate a memory pool item
+ *
+ * A memory pool item is taken from the free list and returned.
+ *
+ * @param pool Pointer to memory pool.
+ *
+ * @return Pointer to allocated item, otherwise NULL if there are no unallocated
+ * items.
+ */
+void *
+ocs_pool_get(ocs_pool_t *pool)
+{
+ pool_hdr_t *h;
+ void *item = NULL;
+
+ if (pool->use_lock) {
+ ocs_lock(&pool->lock);
+ }
+
+ h = ocs_list_remove_head(&pool->freelist);
+
+ if (h != NULL) {
+ /* Return the array item address offset by the size of pool_hdr_t */
+ item = &h[1];
+ }
+
+ if (pool->use_lock) {
+ ocs_unlock(&pool->lock);
+ }
+ return item;
+}
+
+/**
+ * @brief free memory pool item
+ *
+ * A memory pool item is freed.
+ *
+ * @param pool Pointer to memory pool.
+ * @param item Pointer to item to free.
+ *
+ * @return None.
+ */
+void
+ocs_pool_put(ocs_pool_t *pool, void *item)
+{
+ pool_hdr_t *h;
+
+ if (pool->use_lock) {
+ ocs_lock(&pool->lock);
+ }
+
+ /* Fetch the address of the array item, which is the item address negatively offset
+ * by size of pool_hdr_t (note the index of [-1]
+ */
+ h = &((pool_hdr_t*)item)[-1];
+
+ ocs_list_add_tail(&pool->freelist, h);
+
+ if (pool->use_lock) {
+ ocs_unlock(&pool->lock);
+ }
+
+}
+
+/**
+ * @brief Return memory pool item count.
+ *
+ * Returns the allocated number of items.
+ *
+ * @param pool Pointer to memory pool.
+ *
+ * @return Returns count of allocated items.
+ */
+uint32_t
+ocs_pool_get_count(ocs_pool_t *pool)
+{
+ uint32_t count;
+ if (pool->use_lock) {
+ ocs_lock(&pool->lock);
+ }
+ count = ocs_array_get_count(pool->a);
+ if (pool->use_lock) {
+ ocs_unlock(&pool->lock);
+ }
+ return count;
+}
+
+/**
+ * @brief Return item given an index.
+ *
+ * A pointer to a memory pool item is returned given an index.
+ *
+ * @param pool Pointer to memory pool.
+ * @param idx Index.
+ *
+ * @return Returns pointer to item, or NULL if index is invalid.
+ */
+void *
+ocs_pool_get_instance(ocs_pool_t *pool, uint32_t idx)
+{
+ pool_hdr_t *h = ocs_array_get(pool->a, idx);
+
+ if (h == NULL) {
+ return NULL;
+ }
+ return &h[1];
+}
+
+/**
+ * @brief Return count of free objects in a pool.
+ *
+ * The number of objects on a pool's free list.
+ *
+ * @param pool Pointer to memory pool.
+ *
+ * @return Returns count of objects on free list.
+ */
+uint32_t
+ocs_pool_get_freelist_count(ocs_pool_t *pool)
+{
+ uint32_t count = 0;
+ void *item;
+
+ if (pool->use_lock) {
+ ocs_lock(&pool->lock);
+ }
+
+ ocs_list_foreach(&pool->freelist, item) {
+ count++;
+ }
+
+ if (pool->use_lock) {
+ ocs_unlock(&pool->lock);
+ }
+ return count;
+}
diff --git a/sys/dev/ocs_fc/ocs_utils.h b/sys/dev/ocs_fc/ocs_utils.h
new file mode 100644
index 000000000000..b4f6ec0635fa
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_utils.h
@@ -0,0 +1,345 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ */
+
+#ifndef __OCS_UTILS_H
+#define __OCS_UTILS_H
+
+#include "ocs_list.h"
+typedef struct ocs_array_s ocs_array_t;
+
+extern void ocs_array_set_slablen(uint32_t len);
+extern ocs_array_t *ocs_array_alloc(ocs_os_handle_t os, uint32_t size, uint32_t count);
+extern void ocs_array_free(ocs_array_t *array);
+extern void *ocs_array_get(ocs_array_t *array, uint32_t idx);
+extern uint32_t ocs_array_get_count(ocs_array_t *array);
+extern uint32_t ocs_array_get_size(ocs_array_t *array);
+
+/* Void pointer array and iterator */
+typedef struct ocs_varray_s ocs_varray_t;
+
+extern ocs_varray_t *ocs_varray_alloc(ocs_os_handle_t os, uint32_t entry_count);
+extern void ocs_varray_free(ocs_varray_t *ai);
+extern int32_t ocs_varray_add(ocs_varray_t *ai, void *entry);
+extern void ocs_varray_iter_reset(ocs_varray_t *ai);
+extern void *ocs_varray_iter_next(ocs_varray_t *ai);
+extern void *_ocs_varray_iter_next(ocs_varray_t *ai);
+extern void ocs_varray_lock(ocs_varray_t *ai);
+extern void ocs_varray_unlock(ocs_varray_t *ai);
+extern uint32_t ocs_varray_get_count(ocs_varray_t *ai);
+
+
+/***************************************************************************
+ * Circular buffer
+ *
+ */
+
+typedef struct ocs_cbuf_s ocs_cbuf_t;
+
+extern ocs_cbuf_t *ocs_cbuf_alloc(ocs_os_handle_t os, uint32_t entry_count);
+extern void ocs_cbuf_free(ocs_cbuf_t *cbuf);
+extern void *ocs_cbuf_get(ocs_cbuf_t *cbuf, int32_t timeout_usec);
+extern int32_t ocs_cbuf_put(ocs_cbuf_t *cbuf, void *elem);
+extern int32_t ocs_cbuf_prime(ocs_cbuf_t *cbuf, ocs_array_t *array);
+
+typedef struct {
+ void *vaddr;
+ uint32_t length;
+} ocs_scsi_vaddr_len_t;
+
+
+#define OCS_TEXTBUF_MAX_ALLOC_LEN (256*1024)
+
+typedef struct {
+ ocs_list_link_t link;
+ uint8_t user_allocated:1;
+ uint8_t *buffer;
+ uint32_t buffer_length;
+ uint32_t buffer_written;
+} ocs_textbuf_segment_t;
+
+typedef struct {
+ ocs_t *ocs;
+ ocs_list_t segment_list;
+ uint8_t extendable:1;
+ uint32_t allocation_length;
+ uint32_t total_allocation_length;
+ uint32_t max_allocation_length;
+} ocs_textbuf_t;
+
+extern int32_t ocs_textbuf_alloc(ocs_t *ocs, ocs_textbuf_t *textbuf, uint32_t length);
+extern uint32_t ocs_textbuf_initialized(ocs_textbuf_t *textbuf);
+extern int32_t ocs_textbuf_init(ocs_t *ocs, ocs_textbuf_t *textbuf, void *buffer, uint32_t length);
+extern void ocs_textbuf_free(ocs_t *ocs, ocs_textbuf_t *textbuf);
+extern void ocs_textbuf_putc(ocs_textbuf_t *textbuf, uint8_t c);
+extern void ocs_textbuf_puts(ocs_textbuf_t *textbuf, char *s);
+__attribute__((format(printf,2,3)))
+extern void ocs_textbuf_printf(ocs_textbuf_t *textbuf, const char *fmt, ...);
+__attribute__((format(printf,2,0)))
+extern void ocs_textbuf_vprintf(ocs_textbuf_t *textbuf, const char *fmt, va_list ap);
+extern void ocs_textbuf_buffer(ocs_textbuf_t *textbuf, uint8_t *buffer, uint32_t buffer_length);
+extern void ocs_textbuf_copy(ocs_textbuf_t *textbuf, uint8_t *buffer, uint32_t buffer_length);
+extern int32_t ocs_textbuf_remaining(ocs_textbuf_t *textbuf);
+extern void ocs_textbuf_reset(ocs_textbuf_t *textbuf);
+extern uint8_t *ocs_textbuf_get_buffer(ocs_textbuf_t *textbuf);
+extern int32_t ocs_textbuf_get_length(ocs_textbuf_t *textbuf);
+extern int32_t ocs_textbuf_get_written(ocs_textbuf_t *textbuf);
+extern uint8_t *ocs_textbuf_ext_get_buffer(ocs_textbuf_t *textbuf, uint32_t idx);
+extern int32_t ocs_textbuf_ext_get_length(ocs_textbuf_t *textbuf, uint32_t idx);
+extern int32_t ocs_textbuf_ext_get_written(ocs_textbuf_t *textbuf, uint32_t idx);
+
+
+typedef struct ocs_pool_s ocs_pool_t;
+
+extern ocs_pool_t *ocs_pool_alloc(ocs_os_handle_t os, uint32_t size, uint32_t count, uint32_t use_lock);
+extern void ocs_pool_reset(ocs_pool_t *pool);
+extern void ocs_pool_free(ocs_pool_t *pool);
+extern void *ocs_pool_get(ocs_pool_t *pool);
+extern void ocs_pool_put(ocs_pool_t *pool, void *item);
+extern uint32_t ocs_pool_get_count(ocs_pool_t *pool);
+extern void *ocs_pool_get_instance(ocs_pool_t *pool, uint32_t idx);
+extern uint32_t ocs_pool_get_freelist_count(ocs_pool_t *pool);
+
+
+/* Uncomment this line to enable logging extended queue history
+ */
+//#define OCS_DEBUG_QUEUE_HISTORY
+
+
+/* Allocate maximum allowed (4M) */
+#if defined(OCS_DEBUG_QUEUE_HISTORY)
+#define OCS_Q_HIST_SIZE (1000000UL) /* Size in words */
+#endif
+
+#define OCS_LOG_ENABLE_SM_TRACE(ocs) (((ocs) != NULL) ? (((ocs)->logmask & (1U << 0)) != 0) : 0)
+#define OCS_LOG_ENABLE_ELS_TRACE(ocs) (((ocs) != NULL) ? (((ocs)->logmask & (1U << 1)) != 0) : 0)
+#define OCS_LOG_ENABLE_SCSI_TRACE(ocs) (((ocs) != NULL) ? (((ocs)->logmask & (1U << 2)) != 0) : 0)
+#define OCS_LOG_ENABLE_SCSI_TGT_TRACE(ocs) (((ocs) != NULL) ? (((ocs)->logmask & (1U << 3)) != 0) : 0)
+#define OCS_LOG_ENABLE_DOMAIN_SM_TRACE(ocs) (((ocs) != NULL) ? (((ocs)->logmask & (1U << 4)) != 0) : 0)
+#define OCS_LOG_ENABLE_Q_FULL_BUSY_MSG(ocs) (((ocs) != NULL) ? (((ocs)->logmask & (1U << 5)) != 0) : 0)
+#define OCS_LOG_ENABLE_IO_ERRORS(ocs) (((ocs) != NULL) ? (((ocs)->logmask & (1U << 6)) != 0) : 0)
+
+
+extern void ocs_dump32(uint32_t, ocs_os_handle_t, const char *, void *, uint32_t);
+extern void ocs_debug_enable(uint32_t mask);
+extern void ocs_debug_disable(uint32_t mask);
+extern int ocs_debug_is_enabled(uint32_t mask);
+extern void ocs_debug_attach(void *);
+extern void ocs_debug_detach(void *);
+
+#if defined(OCS_DEBUG_QUEUE_HISTORY)
+
+/**
+ * @brief Queue history footer
+ */
+typedef union ocs_q_hist_ftr_u {
+ uint32_t word;
+ struct {
+#define Q_HIST_TYPE_LEN 3
+#define Q_HIST_MASK_LEN 29
+ uint32_t mask:Q_HIST_MASK_LEN,
+ type:Q_HIST_TYPE_LEN;
+ } s;
+} ocs_q_hist_ftr_t;
+
+
+/**
+ * @brief WQE command mask lookup
+ */
+typedef struct ocs_q_hist_wqe_mask_s {
+ uint8_t command;
+ uint32_t mask;
+} ocs_q_hist_wqe_mask_t;
+
+/**
+ * @brief CQE mask lookup
+ */
+typedef struct ocs_q_hist_cqe_mask_s {
+ uint8_t ctype;
+ uint32_t :Q_HIST_MASK_LEN,
+ type:Q_HIST_TYPE_LEN;
+ uint32_t mask;
+ uint32_t mask_err;
+} ocs_q_hist_cqe_mask_t;
+
+/**
+ * @brief Queue history type
+ */
+typedef enum {
+ /* changes need to be made to ocs_queue_history_type_name() as well */
+ OCS_Q_HIST_TYPE_WQE = 0,
+ OCS_Q_HIST_TYPE_CWQE,
+ OCS_Q_HIST_TYPE_CXABT,
+ OCS_Q_HIST_TYPE_MISC,
+} ocs_q_hist_type_t;
+
+static __inline const char *
+ocs_queue_history_type_name(ocs_q_hist_type_t type)
+{
+ switch (type) {
+ case OCS_Q_HIST_TYPE_WQE: return "wqe"; break;
+ case OCS_Q_HIST_TYPE_CWQE: return "wcqe"; break;
+ case OCS_Q_HIST_TYPE_CXABT: return "xacqe"; break;
+ case OCS_Q_HIST_TYPE_MISC: return "misc"; break;
+ default: return "unknown"; break;
+ }
+}
+
+typedef struct {
+ ocs_t *ocs;
+ uint32_t *q_hist;
+ uint32_t q_hist_index;
+ ocs_lock_t q_hist_lock;
+} ocs_hw_q_hist_t;
+
+extern void ocs_queue_history_cqe(ocs_hw_q_hist_t*, uint8_t, uint32_t *, uint8_t, uint32_t, uint32_t);
+extern void ocs_queue_history_wq(ocs_hw_q_hist_t*, uint32_t *, uint32_t, uint32_t);
+extern void ocs_queue_history_misc(ocs_hw_q_hist_t*, uint32_t *, uint32_t);
+extern void ocs_queue_history_init(ocs_t *, ocs_hw_q_hist_t*);
+extern void ocs_queue_history_free(ocs_hw_q_hist_t*);
+extern uint32_t ocs_queue_history_prev_index(uint32_t);
+extern uint8_t ocs_queue_history_q_info_enabled(void);
+extern uint8_t ocs_queue_history_timestamp_enabled(void);
+#else
+#define ocs_queue_history_wq(...)
+#define ocs_queue_history_cqe(...)
+#define ocs_queue_history_misc(...)
+#define ocs_queue_history_init(...)
+#define ocs_queue_history_free(...)
+#endif
+
+#define OCS_DEBUG_ALWAYS (1U << 0)
+#define OCS_DEBUG_ENABLE_MQ_DUMP (1U << 1)
+#define OCS_DEBUG_ENABLE_CQ_DUMP (1U << 2)
+#define OCS_DEBUG_ENABLE_WQ_DUMP (1U << 3)
+#define OCS_DEBUG_ENABLE_EQ_DUMP (1U << 4)
+#define OCS_DEBUG_ENABLE_SPARAM_DUMP (1U << 5)
+
+extern void _ocs_assert(const char *cond, const char *filename, int linenum);
+
+#define ocs_assert(cond, ...) \
+ do { \
+ if (!(cond)) { \
+ _ocs_assert(#cond, __FILE__, __LINE__); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+extern void ocs_dump_service_params(const char *label, void *sparms);
+extern void ocs_display_sparams(const char *prelabel, const char *reqlabel, int dest, void *textbuf, void *sparams);
+
+
+typedef struct {
+ uint16_t crc;
+ uint16_t app_tag;
+ uint32_t ref_tag;
+} ocs_dif_t;
+
+/* DIF guard calculations */
+extern uint16_t ocs_scsi_dif_calc_crc(const uint8_t *, uint32_t size, uint16_t crc);
+extern uint16_t ocs_scsi_dif_calc_checksum(ocs_scsi_vaddr_len_t addrlen[], uint32_t addrlen_count);
+
+/**
+ * @brief Power State change message types
+ *
+ */
+typedef enum {
+ OCS_PM_PREPARE = 1,
+ OCS_PM_SLEEP,
+ OCS_PM_HIBERNATE,
+ OCS_PM_RESUME,
+} ocs_pm_msg_e;
+
+/**
+ * @brief Power State values
+ *
+ */
+typedef enum {
+ OCS_PM_STATE_S0 = 0,
+ OCS_PM_STATE_S1,
+ OCS_PM_STATE_S2,
+ OCS_PM_STATE_S3,
+ OCS_PM_STATE_S4,
+} ocs_pm_state_e;
+
+typedef struct {
+ ocs_pm_state_e pm_state; /*<< Current PM state */
+} ocs_pm_context_t;
+
+extern int32_t ocs_pm_request(ocs_t *ocs, ocs_pm_msg_e msg, int32_t (*callback)(ocs_t *ocs, int32_t status, void *arg),
+ void *arg);
+extern ocs_pm_state_e ocs_pm_get_state(ocs_t *ocs);
+extern const char *ocs_pm_get_state_string(ocs_t *ocs);
+
+#define SPV_ROWLEN 256
+#define SPV_DIM 3
+
+
+/*!
+* @defgroup spv Sparse Vector
+*/
+
+/**
+ * @brief Sparse vector structure.
+ */
+typedef struct sparse_vector_s {
+ ocs_os_handle_t os;
+ uint32_t max_idx; /**< maximum index value */
+ void **array; /**< pointer to 3D array */
+} *sparse_vector_t;
+
+extern void spv_del(sparse_vector_t spv);
+extern sparse_vector_t spv_new(ocs_os_handle_t os);
+extern void spv_set(sparse_vector_t sv, uint32_t idx, void *value);
+extern void *spv_get(sparse_vector_t sv, uint32_t idx);
+
+extern unsigned short t10crc16(const unsigned char *blk_adr, unsigned long blk_len, unsigned short crc);
+
+typedef struct ocs_ramlog_s ocs_ramlog_t;
+
+#define OCS_RAMLOG_DEFAULT_BUFFERS 5
+
+extern ocs_ramlog_t *ocs_ramlog_init(ocs_t *ocs, uint32_t buffer_len, uint32_t buffer_count);
+extern void ocs_ramlog_free(ocs_t *ocs, ocs_ramlog_t *ramlog);
+extern void ocs_ramlog_clear(ocs_t *ocs, ocs_ramlog_t *ramlog, int clear_start_of_day, int clear_recent);
+__attribute__((format(printf,2,3)))
+extern int32_t ocs_ramlog_printf(void *os, const char *fmt, ...);
+__attribute__((format(printf,2,0)))
+extern int32_t ocs_ramlog_vprintf(ocs_ramlog_t *ramlog, const char *fmt, va_list ap);
+extern int32_t ocs_ddump_ramlog(ocs_textbuf_t *textbuf, ocs_ramlog_t *ramlog);
+
+#endif
diff --git a/sys/dev/ocs_fc/ocs_vpd.h b/sys/dev/ocs_fc/ocs_vpd.h
new file mode 100644
index 000000000000..c5c0341320e4
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_vpd.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * OCS VPD parser
+ */
+
+#if !defined(__OCS_VPD_H__)
+#define __OCS_VPD_H__
+
+/**
+ * @brief VPD buffer structure
+ */
+
+typedef struct {
+ uint8_t *buffer;
+ uint32_t length;
+ uint32_t offset;
+ uint8_t checksum;
+ } vpdbuf_t;
+
+/**
+ * @brief return next VPD byte
+ *
+ * Returns next VPD byte and updates accumulated checksum
+ *
+ * @param vpd pointer to vpd buffer
+ *
+ * @return returns next byte for success, or a negative error code value for failure.
+ *
+ */
+
+static inline int
+vpdnext(vpdbuf_t *vpd)
+{
+ int rc = -1;
+ if (vpd->offset < vpd->length) {
+ rc = vpd->buffer[vpd->offset++];
+ vpd->checksum += rc;
+ }
+ return rc;
+}
+
+/**
+ * @brief return true if no more vpd buffer data
+ *
+ * return true if the vpd buffer data has been completely consumed
+ *
+ * @param vpd pointer to vpd buffer
+ *
+ * @return returns true if no more data
+ *
+ */
+static inline int
+vpddone(vpdbuf_t *vpd)
+{
+ return vpd->offset >= vpd->length;
+}
+/**
+ * @brief return pointer to current VPD data location
+ *
+ * Returns a pointer to the current location in the VPD data
+ *
+ * @param vpd pointer to vpd buffer
+ *
+ * @return pointer to current VPD data location
+ */
+
+static inline uint8_t *
+vpdref(vpdbuf_t *vpd)
+{
+ return &vpd->buffer[vpd->offset];
+}
+
+#define VPD_LARGE_RESOURCE_TYPE_ID_STRING_TAG 0x82
+#define VPD_LARGE_RESOURCE_TYPE_R_TAG 0x90
+#define VPD_LARGE_RESOURCE_TYPE_W_TAG 0x91
+#define VPD_SMALL_RESOURCE_TYPE_END_TAG 0x78
+
+/**
+ * @brief find a VPD entry
+ *
+ * Finds a VPD entry given the two character code
+ *
+ * @param vpddata pointer to raw vpd data buffer
+ * @param vpddata_length length of vpddata buffer in bytes
+ * @param key key to look up
+
+ * @return returns a pointer to the key location or NULL if not found or checksum error
+ */
+
+static inline uint8_t *
+ocs_find_vpd(uint8_t *vpddata, uint32_t vpddata_length, const char *key)
+{
+ vpdbuf_t vpdbuf;
+ uint8_t *pret = NULL;
+ uint8_t c0 = key[0];
+ uint8_t c1 = key[1];
+
+ vpdbuf.buffer = (uint8_t*) vpddata;
+ vpdbuf.length = vpddata_length;
+ vpdbuf.offset = 0;
+ vpdbuf.checksum = 0;
+
+ while (!vpddone(&vpdbuf)) {
+ int type = vpdnext(&vpdbuf);
+ int len_lo;
+ int len_hi;
+ int len;
+ int i;
+
+ if (type == VPD_SMALL_RESOURCE_TYPE_END_TAG) {
+ break;
+ }
+
+ len_lo = vpdnext(&vpdbuf);
+ len_hi = vpdnext(&vpdbuf);
+ len = len_lo + (len_hi << 8);
+
+ if ((type == VPD_LARGE_RESOURCE_TYPE_R_TAG) || (type == VPD_LARGE_RESOURCE_TYPE_W_TAG)) {
+ while (len > 0) {
+ int rc0;
+ int rc1;
+ int sublen;
+ uint8_t *pstart;
+
+ rc0 = vpdnext(&vpdbuf);
+ rc1 = vpdnext(&vpdbuf);
+
+ /* Mark this location */
+ pstart = vpdref(&vpdbuf);
+
+ sublen = vpdnext(&vpdbuf);
+
+ /* Adjust remaining len */
+ len -= (sublen + 3);
+
+ /* check for match with request */
+ if ((c0 == rc0) && (c1 == rc1)) {
+ pret = pstart;
+ for (i = 0; i < sublen; i++) {
+ vpdnext(&vpdbuf);
+ }
+ /* check for "RV" end */
+ } else if ('R' == rc0 && 'V' == rc1) {
+
+ /* Read the checksum */
+ for (i = 0; i < sublen; i++) {
+ vpdnext(&vpdbuf);
+ }
+
+ /* The accumulated checksum should be zero here */
+ if (vpdbuf.checksum != 0) {
+ ocs_log_test(NULL, "checksum error\n");
+ return NULL;
+ }
+ }
+ else
+ for (i = 0; i < sublen; i++) {
+ vpdnext(&vpdbuf);
+ }
+ }
+ }
+
+ for (i = 0; i < len; i++) {
+ vpdnext(&vpdbuf);
+ }
+ }
+
+ return pret;
+}
+#endif
diff --git a/sys/dev/ocs_fc/ocs_xport.c b/sys/dev/ocs_fc/ocs_xport.c
new file mode 100644
index 000000000000..b7211c9851d2
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_xport.c
@@ -0,0 +1,1105 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * FC transport API
+ *
+ */
+
+#include "ocs.h"
+#include "ocs_device.h"
+
+static void ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg);
+static void ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg);
+/**
+ * @brief Post node event callback argument.
+ */
+typedef struct {
+ ocs_sem_t sem;
+ ocs_node_t *node;
+ ocs_sm_event_t evt;
+ void *context;
+} ocs_xport_post_node_event_t;
+
+/**
+ * @brief Allocate a transport object.
+ *
+ * @par Description
+ * A transport object is allocated, and associated with a device instance.
+ *
+ * @param ocs Pointer to device instance.
+ *
+ * @return Returns the pointer to the allocated transport object, or NULL if failed.
+ */
+ocs_xport_t *
+ocs_xport_alloc(ocs_t *ocs)
+{
+ ocs_xport_t *xport;
+
+ ocs_assert(ocs, NULL);
+ xport = ocs_malloc(ocs, sizeof(*xport), OCS_M_ZERO);
+ if (xport != NULL) {
+ xport->ocs = ocs;
+ }
+ return xport;
+}
+
+/**
+ * @brief Create the RQ threads and the circular buffers used to pass sequences.
+ *
+ * @par Description
+ * Creates the circular buffers and the servicing threads for RQ processing.
+ *
+ * @param xport Pointer to transport object
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static void
+ocs_xport_rq_threads_teardown(ocs_xport_t *xport)
+{
+ ocs_t *ocs = xport->ocs;
+ uint32_t i;
+
+ if (xport->num_rq_threads == 0 ||
+ xport->rq_thread_info == NULL) {
+ return;
+ }
+
+ /* Abort any threads */
+ for (i = 0; i < xport->num_rq_threads; i++) {
+ if (xport->rq_thread_info[i].thread_started) {
+ ocs_thread_terminate(&xport->rq_thread_info[i].thread);
+ /* wait for the thread to exit */
+ ocs_log_debug(ocs, "wait for thread %d to exit\n", i);
+ while (xport->rq_thread_info[i].thread_started) {
+ ocs_udelay(10000);
+ }
+ ocs_log_debug(ocs, "thread %d to exited\n", i);
+ }
+ if (xport->rq_thread_info[i].seq_cbuf != NULL) {
+ ocs_cbuf_free(xport->rq_thread_info[i].seq_cbuf);
+ xport->rq_thread_info[i].seq_cbuf = NULL;
+ }
+ }
+}
+
+/**
+ * @brief Create the RQ threads and the circular buffers used to pass sequences.
+ *
+ * @par Description
+ * Creates the circular buffers and the servicing threads for RQ processing.
+ *
+ * @param xport Pointer to transport object.
+ * @param num_rq_threads Number of RQ processing threads that the
+ * driver creates.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+ocs_xport_rq_threads_create(ocs_xport_t *xport, uint32_t num_rq_threads)
+{
+ ocs_t *ocs = xport->ocs;
+ int32_t rc = 0;
+ uint32_t i;
+
+ xport->num_rq_threads = num_rq_threads;
+ ocs_log_debug(ocs, "number of RQ threads %d\n", num_rq_threads);
+ if (num_rq_threads == 0) {
+ return 0;
+ }
+
+ /* Allocate the space for the thread objects */
+ xport->rq_thread_info = ocs_malloc(ocs, sizeof(ocs_xport_rq_thread_info_t) * num_rq_threads, OCS_M_ZERO);
+ if (xport->rq_thread_info == NULL) {
+ ocs_log_err(ocs, "memory allocation failure\n");
+ return -1;
+ }
+
+ /* Create the circular buffers and threads. */
+ for (i = 0; i < num_rq_threads; i++) {
+ xport->rq_thread_info[i].ocs = ocs;
+ xport->rq_thread_info[i].seq_cbuf = ocs_cbuf_alloc(ocs, OCS_HW_RQ_NUM_HDR);
+ if (xport->rq_thread_info[i].seq_cbuf == NULL) {
+ goto ocs_xport_rq_threads_create_error;
+ }
+
+ ocs_snprintf(xport->rq_thread_info[i].thread_name,
+ sizeof(xport->rq_thread_info[i].thread_name),
+ "ocs_unsol_rq:%d:%d", ocs->instance_index, i);
+ rc = ocs_thread_create(ocs, &xport->rq_thread_info[i].thread, ocs_unsol_rq_thread,
+ xport->rq_thread_info[i].thread_name,
+ &xport->rq_thread_info[i], OCS_THREAD_RUN);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_thread_create failed: %d\n", rc);
+ goto ocs_xport_rq_threads_create_error;
+ }
+ xport->rq_thread_info[i].thread_started = TRUE;
+ }
+ return 0;
+
+ocs_xport_rq_threads_create_error:
+ ocs_xport_rq_threads_teardown(xport);
+ return -1;
+}
+
+/**
+ * @brief Do as much allocation as possible, but do not initialization the device.
+ *
+ * @par Description
+ * Performs the functions required to get a device ready to run.
+ *
+ * @param xport Pointer to transport object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+ocs_xport_attach(ocs_xport_t *xport)
+{
+ ocs_t *ocs = xport->ocs;
+ int32_t rc;
+ uint32_t max_sgl;
+ uint32_t n_sgl;
+ uint32_t i;
+ uint32_t value;
+ uint32_t max_remote_nodes;
+
+ /* booleans used for cleanup if initialization fails */
+ uint8_t io_pool_created = FALSE;
+ uint8_t node_pool_created = FALSE;
+ uint8_t rq_threads_created = FALSE;
+
+ ocs_list_init(&ocs->domain_list, ocs_domain_t, link);
+
+ for (i = 0; i < SLI4_MAX_FCFI; i++) {
+ xport->fcfi[i].hold_frames = 1;
+ ocs_lock_init(ocs, &xport->fcfi[i].pend_frames_lock, "xport pend_frames[%d]", i);
+ ocs_list_init(&xport->fcfi[i].pend_frames, ocs_hw_sequence_t, link);
+ }
+
+ rc = ocs_hw_set_ptr(&ocs->hw, OCS_HW_WAR_VERSION, ocs->hw_war_version);
+ if (rc) {
+ ocs_log_test(ocs, "can't set OCS_HW_WAR_VERSION\n");
+ return -1;
+ }
+
+ rc = ocs_hw_setup(&ocs->hw, ocs, SLI4_PORT_TYPE_FC);
+ if (rc) {
+ ocs_log_err(ocs, "%s: Can't setup hardware\n", ocs->desc);
+ return -1;
+ } else if (ocs->ctrlmask & OCS_CTRLMASK_CRASH_RESET) {
+ ocs_log_debug(ocs, "stopping after ocs_hw_setup\n");
+ return -1;
+ }
+
+ ocs_hw_set(&ocs->hw, OCS_HW_BOUNCE, ocs->hw_bounce);
+ ocs_log_debug(ocs, "HW bounce: %d\n", ocs->hw_bounce);
+
+ ocs_hw_set(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, ocs->rq_selection_policy);
+ ocs_hw_set(&ocs->hw, OCS_HW_RR_QUANTA, ocs->rr_quanta);
+ ocs_hw_get(&ocs->hw, OCS_HW_RQ_SELECTION_POLICY, &value);
+ ocs_log_debug(ocs, "RQ Selection Policy: %d\n", value);
+
+ ocs_hw_set_ptr(&ocs->hw, OCS_HW_FILTER_DEF, (void*) ocs->filter_def);
+
+ ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl);
+ max_sgl -= SLI4_SGE_MAX_RESERVED;
+ n_sgl = MIN(OCS_FC_MAX_SGL, max_sgl);
+
+ /* EVT: For chained SGL testing */
+ if (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) {
+ n_sgl = 4;
+ }
+
+ /* Note: number of SGLs must be set for ocs_node_create_pool */
+ if (ocs_hw_set(&ocs->hw, OCS_HW_N_SGL, n_sgl) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "%s: Can't set number of SGLs\n", ocs->desc);
+ return -1;
+ } else {
+ ocs_log_debug(ocs, "%s: Configured for %d SGLs\n", ocs->desc, n_sgl);
+ }
+
+ ocs_hw_get(&ocs->hw, OCS_HW_MAX_NODES, &max_remote_nodes);
+
+ rc = ocs_node_create_pool(ocs, (ocs->max_remote_nodes) ?
+ ocs->max_remote_nodes : max_remote_nodes);
+ if (rc) {
+ ocs_log_err(ocs, "Can't allocate node pool\n");
+ goto ocs_xport_attach_cleanup;
+ } else {
+ node_pool_created = TRUE;
+ }
+
+ /* EVT: if testing chained SGLs allocate OCS_FC_MAX_SGL SGE's in the IO */
+ xport->io_pool = ocs_io_pool_create(ocs, ocs->num_scsi_ios,
+ (ocs->ctrlmask & OCS_CTRLMASK_TEST_CHAINED_SGLS) ? OCS_FC_MAX_SGL : n_sgl);
+ if (xport->io_pool == NULL) {
+ ocs_log_err(ocs, "Can't allocate IO pool\n");
+ goto ocs_xport_attach_cleanup;
+ } else {
+ io_pool_created = TRUE;
+ }
+
+ /*
+ * setup the RQ processing threads
+ */
+ if (ocs_xport_rq_threads_create(xport, ocs->rq_threads) != 0) {
+ ocs_log_err(ocs, "failure creating RQ threads\n");
+ goto ocs_xport_attach_cleanup;
+ }
+ rq_threads_created = TRUE;
+
+ return 0;
+
+ocs_xport_attach_cleanup:
+ if (io_pool_created) {
+ ocs_io_pool_free(xport->io_pool);
+ }
+
+ if (node_pool_created) {
+ ocs_node_free_pool(ocs);
+ }
+
+ if (rq_threads_created) {
+ ocs_xport_rq_threads_teardown(xport);
+ }
+
+ return -1;
+}
+
+/**
+ * @brief Determines how to setup auto Xfer ready.
+ *
+ * @par Description
+ * @param xport Pointer to transport object.
+ *
+ * @return Returns 0 on success or a non-zero value on failure.
+ */
+static int32_t
+ocs_xport_initialize_auto_xfer_ready(ocs_xport_t *xport)
+{
+ ocs_t *ocs = xport->ocs;
+ uint32_t auto_xfer_rdy;
+ char prop_buf[32];
+ uint32_t ramdisc_blocksize = 512;
+ uint8_t p_type = 0;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_AUTO_XFER_RDY_CAPABLE, &auto_xfer_rdy);
+ if (!auto_xfer_rdy) {
+ ocs->auto_xfer_rdy_size = 0;
+ ocs_log_test(ocs, "Cannot enable auto xfer rdy for this port\n");
+ return 0;
+ }
+
+ if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_SIZE, ocs->auto_xfer_rdy_size)) {
+ ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc);
+ return -1;
+ }
+
+ /*
+ * Determine if we are doing protection in the backend. We are looking
+ * at the modules parameters here. The backend cannot allow a format
+ * command to change the protection mode when using this feature,
+ * otherwise the firmware will not do the proper thing.
+ */
+ if (ocs_get_property("p_type", prop_buf, sizeof(prop_buf)) == 0) {
+ p_type = ocs_strtoul(prop_buf, 0, 0);
+ }
+ if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
+ ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
+ }
+ if (ocs_get_property("external_dif", prop_buf, sizeof(prop_buf)) == 0) {
+ if(ocs_strlen(prop_buf)) {
+ if (p_type == 0) {
+ p_type = 1;
+ }
+ }
+ }
+
+ if (p_type != 0) {
+ if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_T10_ENABLE, TRUE)) {
+ ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc);
+ return -1;
+ }
+ if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_BLK_SIZE, ramdisc_blocksize)) {
+ ocs_log_test(ocs, "%s: Can't set auto xfer rdy blk size\n", ocs->desc);
+ return -1;
+ }
+ if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_P_TYPE, p_type)) {
+ ocs_log_test(ocs, "%s: Can't set auto xfer rdy mode\n", ocs->desc);
+ return -1;
+ }
+ if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA, TRUE)) {
+ ocs_log_test(ocs, "%s: Can't set auto xfer rdy ref tag\n", ocs->desc);
+ return -1;
+ }
+ if (ocs_hw_set(&ocs->hw, OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID, FALSE)) {
+ ocs_log_test(ocs, "%s: Can't set auto xfer rdy app tag valid\n", ocs->desc);
+ return -1;
+ }
+ }
+ ocs_log_debug(ocs, "Auto xfer rdy is enabled, p_type=%d, blksize=%d\n",
+ p_type, ramdisc_blocksize);
+ return 0;
+}
+
+/**
+ * @brief Initializes the device.
+ *
+ * @par Description
+ * Performs the functions required to make a device functional.
+ *
+ * @param xport Pointer to transport object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+ocs_xport_initialize(ocs_xport_t *xport)
+{
+ ocs_t *ocs = xport->ocs;
+ int32_t rc;
+ uint32_t i;
+ uint32_t max_hw_io;
+ uint32_t max_sgl;
+ uint32_t hlm;
+ uint32_t rq_limit;
+ uint32_t dif_capable;
+ uint8_t dif_separate = 0;
+ char prop_buf[32];
+
+ /* booleans used for cleanup if initialization fails */
+ uint8_t ini_device_set = FALSE;
+ uint8_t tgt_device_set = FALSE;
+ uint8_t hw_initialized = FALSE;
+
+ ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io);
+ if (ocs_hw_set(&ocs->hw, OCS_HW_N_IO, max_hw_io) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "%s: Can't set number of IOs\n", ocs->desc);
+ return -1;
+ }
+
+ ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGL, &max_sgl);
+ max_sgl -= SLI4_SGE_MAX_RESERVED;
+
+ if (ocs->enable_hlm) {
+ ocs_hw_get(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, &hlm);
+ if (!hlm) {
+ ocs->enable_hlm = FALSE;
+ ocs_log_err(ocs, "Cannot enable high login mode for this port\n");
+ } else {
+ ocs_log_debug(ocs, "High login mode is enabled\n");
+ if (ocs_hw_set(&ocs->hw, OCS_HW_HIGH_LOGIN_MODE, TRUE)) {
+ ocs_log_err(ocs, "%s: Can't set high login mode\n", ocs->desc);
+ return -1;
+ }
+ }
+ }
+
+ /* validate the auto xfer_rdy size */
+ if (ocs->auto_xfer_rdy_size > 0 &&
+ (ocs->auto_xfer_rdy_size < 2048 ||
+ ocs->auto_xfer_rdy_size > 65536)) {
+ ocs_log_err(ocs, "Auto XFER_RDY size is out of range (2K-64K)\n");
+ return -1;
+ }
+
+ ocs_hw_get(&ocs->hw, OCS_HW_MAX_IO, &max_hw_io);
+
+ if (ocs->auto_xfer_rdy_size > 0) {
+ if (ocs_xport_initialize_auto_xfer_ready(xport)) {
+ ocs_log_err(ocs, "%s: Failed auto xfer ready setup\n", ocs->desc);
+ return -1;
+ }
+ if (ocs->esoc){
+ ocs_hw_set(&ocs->hw, OCS_ESOC, TRUE);
+ }
+ }
+
+ if (ocs->explicit_buffer_list) {
+ /* Are pre-registered SGL's required? */
+ ocs_hw_get(&ocs->hw, OCS_HW_PREREGISTER_SGL, &i);
+ if (i == TRUE) {
+ ocs_log_err(ocs, "Explicit Buffer List not supported on this device, not enabled\n");
+ } else {
+ ocs_hw_set(&ocs->hw, OCS_HW_PREREGISTER_SGL, FALSE);
+ }
+ }
+
+ if (ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, ocs->topology) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "%s: Can't set the toplogy\n", ocs->desc);
+ return -1;
+ }
+ ocs_hw_set(&ocs->hw, OCS_HW_RQ_DEFAULT_BUFFER_SIZE, OCS_FC_RQ_SIZE_DEFAULT);
+
+ if (ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, ocs->speed) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "%s: Can't set the link speed\n", ocs->desc);
+ return -1;
+ }
+
+ if (ocs_hw_set(&ocs->hw, OCS_HW_ETH_LICENSE, ocs->ethernet_license) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "%s: Can't set the ethernet license\n", ocs->desc);
+ return -1;
+ }
+
+ /* currently only lancer support setting the CRC seed value */
+ if (ocs->hw.sli.asic_type == SLI4_ASIC_TYPE_LANCER) {
+ if (ocs_hw_set(&ocs->hw, OCS_HW_DIF_SEED, OCS_FC_DIF_SEED) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "%s: Can't set the DIF seed\n", ocs->desc);
+ return -1;
+ }
+ }
+
+ /* Set the Dif mode */
+ if (0 == ocs_hw_get(&ocs->hw, OCS_HW_DIF_CAPABLE, &dif_capable)) {
+ if (dif_capable) {
+ if (ocs_get_property("dif_separate", prop_buf, sizeof(prop_buf)) == 0) {
+ dif_separate = ocs_strtoul(prop_buf, 0, 0);
+ }
+
+ if ((rc = ocs_hw_set(&ocs->hw, OCS_HW_DIF_MODE,
+ (dif_separate == 0 ? OCS_HW_DIF_MODE_INLINE : OCS_HW_DIF_MODE_SEPARATE)))) {
+ ocs_log_err(ocs, "Requested DIF MODE not supported\n");
+ }
+ }
+ }
+
+ if (ocs->target_io_timer_sec) {
+ ocs_log_debug(ocs, "setting target io timer=%d\n", ocs->target_io_timer_sec);
+ ocs_hw_set(&ocs->hw, OCS_HW_EMULATE_TARGET_WQE_TIMEOUT, TRUE);
+ }
+
+ ocs_hw_callback(&ocs->hw, OCS_HW_CB_DOMAIN, ocs_domain_cb, ocs);
+ ocs_hw_callback(&ocs->hw, OCS_HW_CB_REMOTE_NODE, ocs_remote_node_cb, ocs);
+ ocs_hw_callback(&ocs->hw, OCS_HW_CB_UNSOLICITED, ocs_unsolicited_cb, ocs);
+ ocs_hw_callback(&ocs->hw, OCS_HW_CB_PORT, ocs_port_cb, ocs);
+
+ ocs->fw_version = (const char*) ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV);
+
+ /* Initialize vport list */
+ ocs_list_init(&xport->vport_list, ocs_vport_spec_t, link);
+ ocs_lock_init(ocs, &xport->io_pending_lock, "io_pending_lock[%d]", ocs->instance_index);
+ ocs_list_init(&xport->io_pending_list, ocs_io_t, io_pending_link);
+ ocs_atomic_init(&xport->io_active_count, 0);
+ ocs_atomic_init(&xport->io_pending_count, 0);
+ ocs_atomic_init(&xport->io_total_free, 0);
+ ocs_atomic_init(&xport->io_total_pending, 0);
+ ocs_atomic_init(&xport->io_alloc_failed_count, 0);
+ ocs_atomic_init(&xport->io_pending_recursing, 0);
+ ocs_lock_init(ocs, &ocs->hw.watchdog_lock, " Watchdog Lock[%d]", ocs_instance(ocs));
+ rc = ocs_hw_init(&ocs->hw);
+ if (rc) {
+ ocs_log_err(ocs, "ocs_hw_init failure\n");
+ goto ocs_xport_init_cleanup;
+ } else {
+ hw_initialized = TRUE;
+ }
+
+ rq_limit = max_hw_io/2;
+ if (ocs_hw_set(&ocs->hw, OCS_HW_RQ_PROCESS_LIMIT, rq_limit) != OCS_HW_RTN_SUCCESS) {
+ ocs_log_err(ocs, "%s: Can't set the RQ process limit\n", ocs->desc);
+ }
+
+ if (ocs->config_tgt) {
+ rc = ocs_scsi_tgt_new_device(ocs);
+ if (rc) {
+ ocs_log_err(ocs, "failed to initialize target\n");
+ goto ocs_xport_init_cleanup;
+ } else {
+ tgt_device_set = TRUE;
+ }
+ }
+
+ if (ocs->enable_ini) {
+ rc = ocs_scsi_ini_new_device(ocs);
+ if (rc) {
+ ocs_log_err(ocs, "failed to initialize initiator\n");
+ goto ocs_xport_init_cleanup;
+ } else {
+ ini_device_set = TRUE;
+ }
+
+ }
+
+ /* Add vports */
+ if (ocs->num_vports != 0) {
+
+ uint32_t max_vports;
+ ocs_hw_get(&ocs->hw, OCS_HW_MAX_VPORTS, &max_vports);
+
+ if (ocs->num_vports < max_vports) {
+ ocs_log_debug(ocs, "Provisioning %d vports\n", ocs->num_vports);
+ for (i = 0; i < ocs->num_vports; i++) {
+ ocs_vport_create_spec(ocs, 0, 0, UINT32_MAX, ocs->enable_ini, ocs->enable_tgt, NULL, NULL);
+ }
+ } else {
+ ocs_log_err(ocs, "failed to create vports. num_vports range should be (1-%d) \n", max_vports-1);
+ goto ocs_xport_init_cleanup;
+ }
+ }
+
+ return 0;
+
+ocs_xport_init_cleanup:
+ if (ini_device_set) {
+ ocs_scsi_ini_del_device(ocs);
+ }
+
+ if (tgt_device_set) {
+ ocs_scsi_tgt_del_device(ocs);
+ }
+
+ if (hw_initialized) {
+ /* ocs_hw_teardown can only execute after ocs_hw_init */
+ ocs_hw_teardown(&ocs->hw);
+ }
+
+ return -1;
+}
+
+/**
+ * @brief Detaches the transport from the device.
+ *
+ * @par Description
+ * Performs the functions required to shut down a device.
+ *
+ * @param xport Pointer to transport object.
+ *
+ * @return Returns 0 on success or a non-zero value on failure.
+ */
+int32_t
+ocs_xport_detach(ocs_xport_t *xport)
+{
+ ocs_t *ocs = xport->ocs;
+
+ /* free resources associated with target-server and initiator-client */
+ if (ocs->config_tgt)
+ ocs_scsi_tgt_del_device(ocs);
+
+ if (ocs->enable_ini) {
+ ocs_scsi_ini_del_device(ocs);
+
+ /*Shutdown FC Statistics timer*/
+ if (ocs_timer_pending(&ocs->xport->stats_timer))
+ ocs_del_timer(&ocs->xport->stats_timer);
+ }
+
+ ocs_hw_teardown(&ocs->hw);
+
+ return 0;
+}
+
+/**
+ * @brief domain list empty callback
+ *
+ * @par Description
+ * Function is invoked when the device domain list goes empty. By convention
+ * @c arg points to an ocs_sem_t instance, that is incremented.
+ *
+ * @param ocs Pointer to device object.
+ * @param arg Pointer to semaphore instance.
+ *
+ * @return None.
+ */
+
+static void
+ocs_xport_domain_list_empty_cb(ocs_t *ocs, void *arg)
+{
+ ocs_sem_t *sem = arg;
+
+ ocs_assert(ocs);
+ ocs_assert(sem);
+
+ ocs_sem_v(sem);
+}
+
+/**
+ * @brief post node event callback
+ *
+ * @par Description
+ * This function is called from the mailbox completion interrupt context to post an
+ * event to a node object. By doing this in the interrupt context, it has
+ * the benefit of only posting events in the interrupt context, deferring the need to
+ * create a per event node lock.
+ *
+ * @param hw Pointer to HW structure.
+ * @param status Completion status for mailbox command.
+ * @param mqe Mailbox queue completion entry.
+ * @param arg Callback argument.
+ *
+ * @return Returns 0 on success, a negative error code value on failure.
+ */
+
+static int32_t
+ocs_xport_post_node_event_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
+{
+ ocs_xport_post_node_event_t *payload = arg;
+
+ if (payload != NULL) {
+ ocs_node_post_event(payload->node, payload->evt, payload->context);
+ ocs_sem_v(&payload->sem);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Initiate force free.
+ *
+ * @par Description
+ * Perform force free of OCS.
+ *
+ * @param xport Pointer to transport object.
+ *
+ * @return None.
+ */
+
+static void
+ocs_xport_force_free(ocs_xport_t *xport)
+{
+ ocs_t *ocs = xport->ocs;
+ ocs_domain_t *domain;
+ ocs_domain_t *next;
+
+ ocs_log_debug(ocs, "reset required, do force shutdown\n");
+ ocs_device_lock(ocs);
+ ocs_list_foreach_safe(&ocs->domain_list, domain, next) {
+ ocs_domain_force_free(domain);
+ }
+ ocs_device_unlock(ocs);
+}
+
+/**
+ * @brief Perform transport attach function.
+ *
+ * @par Description
+ * Perform the attach function, which for the FC transport makes a HW call
+ * to bring up the link.
+ *
+ * @param xport pointer to transport object.
+ * @param cmd command to execute.
+ *
+ * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_ONLINE)
+ * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_OFFLINE)
+ * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_PORT_SHUTDOWN)
+ * ocs_xport_control(ocs_xport_t *xport, OCS_XPORT_POST_NODE_EVENT, ocs_node_t *node, ocs_sm_event_t, void *context)
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+int32_t
+ocs_xport_control(ocs_xport_t *xport, ocs_xport_ctrl_e cmd, ...)
+{
+ uint32_t rc = 0;
+ ocs_t *ocs = NULL;
+ va_list argp;
+
+ ocs_assert(xport, -1);
+ ocs_assert(xport->ocs, -1);
+ ocs = xport->ocs;
+
+ switch (cmd) {
+ case OCS_XPORT_PORT_ONLINE: {
+ /* Bring the port on-line */
+ rc = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_INIT, 0, NULL, NULL);
+ if (rc) {
+ ocs_log_err(ocs, "%s: Can't init port\n", ocs->desc);
+ } else {
+ xport->configured_link_state = cmd;
+ }
+ break;
+ }
+ case OCS_XPORT_PORT_OFFLINE: {
+ if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) {
+ ocs_log_err(ocs, "port shutdown failed\n");
+ } else {
+ xport->configured_link_state = cmd;
+ }
+ break;
+ }
+
+ case OCS_XPORT_SHUTDOWN: {
+ ocs_sem_t sem;
+ uint32_t reset_required;
+
+ /* if a PHYSDEV reset was performed (e.g. hw dump), will affect
+ * all PCI functions; orderly shutdown won't work, just force free
+ */
+ /* TODO: need to poll this regularly... */
+ if (ocs_hw_get(&ocs->hw, OCS_HW_RESET_REQUIRED, &reset_required) != OCS_HW_RTN_SUCCESS) {
+ reset_required = 0;
+ }
+
+ if (reset_required) {
+ ocs_log_debug(ocs, "reset required, do force shutdown\n");
+ ocs_xport_force_free(xport);
+ break;
+ }
+ ocs_sem_init(&sem, 0, "domain_list_sem");
+ ocs_register_domain_list_empty_cb(ocs, ocs_xport_domain_list_empty_cb, &sem);
+
+ if (ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SHUTDOWN, 0, NULL, NULL)) {
+ ocs_log_debug(ocs, "port shutdown failed, do force shutdown\n");
+ ocs_xport_force_free(xport);
+ } else {
+ ocs_log_debug(ocs, "Waiting %d seconds for domain shutdown.\n", (OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC/1000000));
+
+ rc = ocs_sem_p(&sem, OCS_FC_DOMAIN_SHUTDOWN_TIMEOUT_USEC);
+ if (rc) {
+ ocs_log_debug(ocs, "Note: Domain shutdown timed out\n");
+ ocs_xport_force_free(xport);
+ }
+ }
+
+ ocs_register_domain_list_empty_cb(ocs, NULL, NULL);
+
+ /* Free up any saved virtual ports */
+ ocs_vport_del_all(ocs);
+ break;
+ }
+
+ /*
+ * POST_NODE_EVENT: post an event to a node object
+ *
+ * This transport function is used to post an event to a node object. It does
+ * this by submitting a NOP mailbox command to defer execution to the
+ * interrupt context (thereby enforcing the serialized execution of event posting
+ * to the node state machine instances)
+ *
+ * A counting semaphore is used to make the call synchronous (we wait until
+ * the callback increments the semaphore before returning (or times out)
+ */
+ case OCS_XPORT_POST_NODE_EVENT: {
+ ocs_node_t *node;
+ ocs_sm_event_t evt;
+ void *context;
+ ocs_xport_post_node_event_t payload;
+ ocs_t *ocs;
+ ocs_hw_t *hw;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ node = va_arg(argp, ocs_node_t*);
+ evt = va_arg(argp, ocs_sm_event_t);
+ context = va_arg(argp, void *);
+ va_end(argp);
+
+ ocs_assert(node, -1);
+ ocs_assert(node->ocs, -1);
+
+ ocs = node->ocs;
+ hw = &ocs->hw;
+
+ /* if node's state machine is disabled, don't bother continuing */
+ if (!node->sm.current_state) {
+ ocs_log_test(ocs, "node %p state machine disabled\n", node);
+ return -1;
+ }
+
+ /* Setup payload */
+ ocs_memset(&payload, 0, sizeof(payload));
+ ocs_sem_init(&payload.sem, 0, "xport_post_node_Event");
+ payload.node = node;
+ payload.evt = evt;
+ payload.context = context;
+
+ if (ocs_hw_async_call(hw, ocs_xport_post_node_event_cb, &payload)) {
+ ocs_log_test(ocs, "ocs_hw_async_call failed\n");
+ rc = -1;
+ break;
+ }
+
+ /* Wait for completion */
+ if (ocs_sem_p(&payload.sem, OCS_SEM_FOREVER)) {
+ ocs_log_test(ocs, "POST_NODE_EVENT: sem wait failed\n");
+ rc = -1;
+ }
+
+ break;
+ }
+ /*
+ * Set wwnn for the port. This will be used instead of the default provided by FW.
+ */
+ case OCS_XPORT_WWNN_SET: {
+ uint64_t wwnn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwnn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ ocs_log_debug(ocs, " WWNN %016" PRIx64 "\n", wwnn);
+ xport->req_wwnn = wwnn;
+
+ break;
+ }
+ /*
+ * Set wwpn for the port. This will be used instead of the default provided by FW.
+ */
+ case OCS_XPORT_WWPN_SET: {
+ uint64_t wwpn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwpn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ ocs_log_debug(ocs, " WWPN %016" PRIx64 "\n", wwpn);
+ xport->req_wwpn = wwpn;
+
+ break;
+ }
+
+
+ default:
+ break;
+ }
+ return rc;
+}
+
+/**
+ * @brief Return status on a link.
+ *
+ * @par Description
+ * Returns status information about a link.
+ *
+ * @param xport Pointer to transport object.
+ * @param cmd Command to execute.
+ * @param result Pointer to result value.
+ *
+ * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_PORT_STATUS)
+ * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_SPEED, ocs_xport_stats_t *result)
+ * return link speed in MB/sec
+ * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_IS_SUPPORTED_LINK_SPEED, ocs_xport_stats_t *result)
+ * [in] *result is speed to check in MB/s
+ * returns 1 if supported, 0 if not
+ * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STATISTICS, ocs_xport_stats_t *result)
+ * return link/host port stats
+ * ocs_xport_status(ocs_xport_t *xport, OCS_XPORT_LINK_STAT_RESET, ocs_xport_stats_t *result)
+ * resets link/host stats
+ *
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+int32_t
+ocs_xport_status(ocs_xport_t *xport, ocs_xport_status_e cmd, ocs_xport_stats_t *result)
+{
+ uint32_t rc = 0;
+ ocs_t *ocs = NULL;
+ ocs_xport_stats_t value;
+ ocs_hw_rtn_e hw_rc;
+
+ ocs_assert(xport, -1);
+ ocs_assert(xport->ocs, -1);
+
+ ocs = xport->ocs;
+
+ switch (cmd) {
+ case OCS_XPORT_CONFIG_PORT_STATUS:
+ ocs_assert(result, -1);
+ if (xport->configured_link_state == 0) {
+ /* Initial state is offline. configured_link_state is */
+ /* set to online explicitly when port is brought online. */
+ xport->configured_link_state = OCS_XPORT_PORT_OFFLINE;
+ }
+ result->value = xport->configured_link_state;
+ break;
+
+ case OCS_XPORT_PORT_STATUS:
+ ocs_assert(result, -1);
+ /* Determine port status based on link speed. */
+ hw_rc = ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value.value);
+ if (hw_rc == OCS_HW_RTN_SUCCESS) {
+ if (value.value == 0) {
+ result->value = 0;
+ } else {
+ result->value = 1;
+ }
+ rc = 0;
+ } else {
+ rc = -1;
+ }
+ break;
+
+ case OCS_XPORT_LINK_SPEED: {
+ uint32_t speed;
+
+ ocs_assert(result, -1);
+ result->value = 0;
+
+ rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_SPEED, &speed);
+ if (rc == 0) {
+ result->value = speed;
+ }
+ break;
+ }
+
+ case OCS_XPORT_IS_SUPPORTED_LINK_SPEED: {
+ uint32_t speed;
+ uint32_t link_module_type;
+
+ ocs_assert(result, -1);
+ speed = result->value;
+
+ rc = ocs_hw_get(&ocs->hw, OCS_HW_LINK_MODULE_TYPE, &link_module_type);
+ if (rc == 0) {
+ switch(speed) {
+ case 1000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_1GB) != 0; break;
+ case 2000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_2GB) != 0; break;
+ case 4000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_4GB) != 0; break;
+ case 8000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_8GB) != 0; break;
+ case 10000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_10GB) != 0; break;
+ case 16000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_16GB) != 0; break;
+ case 32000: rc = (link_module_type & OCS_HW_LINK_MODULE_TYPE_32GB) != 0; break;
+ default: rc = 0; break;
+ }
+ } else {
+ rc = 0;
+ }
+ break;
+ }
+ case OCS_XPORT_LINK_STATISTICS:
+ ocs_device_lock(ocs);
+ ocs_memcpy((void *)result, &ocs->xport->fc_xport_stats, sizeof(ocs_xport_stats_t));
+ ocs_device_unlock(ocs);
+ break;
+ case OCS_XPORT_LINK_STAT_RESET: {
+ /* Create a semaphore to synchronize the stat reset process. */
+ ocs_sem_init(&(result->stats.semaphore), 0, "fc_stats_reset");
+
+ /* First reset the link stats */
+ if ((rc = ocs_hw_get_link_stats(&ocs->hw, 0, 1, 1, ocs_xport_link_stats_cb, result)) != 0) {
+ ocs_log_err(ocs, "%s: Failed to reset link statistics\n", __func__);
+ break;
+ }
+
+ /* Wait for semaphore to be signaled when the command completes */
+ /* TODO: Should there be a timeout on this? If so, how long? */
+ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_test(ocs, "ocs_sem_p failed\n");
+ rc = -ENXIO;
+ break;
+ }
+
+ /* Next reset the host stats */
+ if ((rc = ocs_hw_get_host_stats(&ocs->hw, 1, ocs_xport_host_stats_cb, result)) != 0) {
+ ocs_log_err(ocs, "%s: Failed to reset host statistics\n", __func__);
+ break;
+ }
+
+ /* Wait for semaphore to be signaled when the command completes */
+ if (ocs_sem_p(&(result->stats.semaphore), OCS_SEM_FOREVER) != 0) {
+ /* Undefined failure */
+ ocs_log_test(ocs, "ocs_sem_p failed\n");
+ rc = -ENXIO;
+ break;
+ }
+ break;
+ }
+ case OCS_XPORT_IS_QUIESCED:
+ ocs_device_lock(ocs);
+ result->value = ocs_list_empty(&ocs->domain_list);
+ ocs_device_unlock(ocs);
+ break;
+ default:
+ rc = -1;
+ break;
+ }
+
+ return rc;
+
+}
+
+static void
+ocs_xport_link_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_link_stat_counts_t *counters, void *arg)
+{
+ ocs_xport_stats_t *result = arg;
+
+ result->stats.link_stats.link_failure_error_count = counters[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count = counters[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count = counters[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count = counters[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count = counters[OCS_HW_LINK_STAT_CRC_COUNT].counter;
+
+ ocs_sem_v(&(result->stats.semaphore));
+}
+
+
+static void
+ocs_xport_host_stats_cb(int32_t status, uint32_t num_counters, ocs_hw_host_stat_counts_t *counters, void *arg)
+{
+ ocs_xport_stats_t *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count = counters[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count = counters[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count = counters[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count = counters[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+
+ ocs_sem_v(&(result->stats.semaphore));
+}
+
+
+/**
+ * @brief Free a transport object.
+ *
+ * @par Description
+ * The transport object is freed.
+ *
+ * @param xport Pointer to transport object.
+ *
+ * @return None.
+ */
+
+void
+ocs_xport_free(ocs_xport_t *xport)
+{
+ ocs_t *ocs;
+ uint32_t i;
+
+ if (xport) {
+ ocs = xport->ocs;
+ ocs_io_pool_free(xport->io_pool);
+ ocs_node_free_pool(ocs);
+ if(mtx_initialized(&xport->io_pending_lock.lock))
+ ocs_lock_free(&xport->io_pending_lock);
+
+ for (i = 0; i < SLI4_MAX_FCFI; i++) {
+ ocs_lock_free(&xport->fcfi[i].pend_frames_lock);
+ }
+
+ ocs_xport_rq_threads_teardown(xport);
+
+ ocs_free(ocs, xport, sizeof(*xport));
+ }
+}
diff --git a/sys/dev/ocs_fc/ocs_xport.h b/sys/dev/ocs_fc/ocs_xport.h
new file mode 100644
index 000000000000..c7841cd5122e
--- /dev/null
+++ b/sys/dev/ocs_fc/ocs_xport.h
@@ -0,0 +1,213 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ *
+ */
+
+#if !defined(__OCS_XPORT_H__)
+#define __OCS_XPORT_H__
+
+/**
+ * @brief FCFI lookup/pending frames
+ */
+typedef struct ocs_xport_fcfi_s {
+ ocs_lock_t pend_frames_lock;
+ ocs_list_t pend_frames;
+ uint32_t hold_frames:1; /*<< hold pending frames */
+ uint32_t pend_frames_processed; /*<< count of pending frames that were processed */
+} ocs_xport_fcfi_t;
+
+/**
+ * @brief Structure to hold the information related to an RQ processing thread used
+ * to increase 40G performance.
+ */
+typedef struct ocs_xport_rq_thread_info_s {
+ ocs_t *ocs;
+ uint8_t thread_started;
+ ocs_thread_t thread;
+ ocs_cbuf_t * seq_cbuf;
+ char thread_name[64];
+} ocs_xport_rq_thread_info_t;
+
+typedef enum {
+ OCS_XPORT_PORT_ONLINE=1,
+ OCS_XPORT_PORT_OFFLINE,
+ OCS_XPORT_SHUTDOWN,
+ OCS_XPORT_POST_NODE_EVENT,
+ OCS_XPORT_WWNN_SET,
+ OCS_XPORT_WWPN_SET,
+} ocs_xport_ctrl_e;
+
+typedef enum {
+ OCS_XPORT_PORT_STATUS,
+ OCS_XPORT_CONFIG_PORT_STATUS,
+ OCS_XPORT_LINK_SPEED,
+ OCS_XPORT_IS_SUPPORTED_LINK_SPEED,
+ OCS_XPORT_LINK_STATISTICS,
+ OCS_XPORT_LINK_STAT_RESET,
+ OCS_XPORT_IS_QUIESCED
+} ocs_xport_status_e;
+
+typedef struct ocs_xport_link_stats_s {
+ uint32_t rec:1,
+ gec:1,
+ w02of:1,
+ w03of:1,
+ w04of:1,
+ w05of:1,
+ w06of:1,
+ w07of:1,
+ w08of:1,
+ w09of:1,
+ w10of:1,
+ w11of:1,
+ w12of:1,
+ w13of:1,
+ w14of:1,
+ w15of:1,
+ w16of:1,
+ w17of:1,
+ w18of:1,
+ w19of:1,
+ w20of:1,
+ w21of:1,
+ resv0:8,
+ clrc:1,
+ clof:1;
+ uint32_t link_failure_error_count;
+ uint32_t loss_of_sync_error_count;
+ uint32_t loss_of_signal_error_count;
+ uint32_t primitive_sequence_error_count;
+ uint32_t invalid_transmission_word_error_count;
+ uint32_t crc_error_count;
+ uint32_t primitive_sequence_event_timeout_count;
+ uint32_t elastic_buffer_overrun_error_count;
+ uint32_t arbitration_fc_al_timout_count;
+ uint32_t advertised_receive_bufftor_to_buffer_credit;
+ uint32_t current_receive_buffer_to_buffer_credit;
+ uint32_t advertised_transmit_buffer_to_buffer_credit;
+ uint32_t current_transmit_buffer_to_buffer_credit;
+ uint32_t received_eofa_count;
+ uint32_t received_eofdti_count;
+ uint32_t received_eofni_count;
+ uint32_t received_soff_count;
+ uint32_t received_dropped_no_aer_count;
+ uint32_t received_dropped_no_available_rpi_resources_count;
+ uint32_t received_dropped_no_available_xri_resources_count;
+} ocs_xport_link_stats_t;
+
+typedef struct ocs_xport_host_stats_s {
+ uint32_t cc:1,
+ :31;
+ uint32_t transmit_kbyte_count;
+ uint32_t receive_kbyte_count;
+ uint32_t transmit_frame_count;
+ uint32_t receive_frame_count;
+ uint32_t transmit_sequence_count;
+ uint32_t receive_sequence_count;
+ uint32_t total_exchanges_originator;
+ uint32_t total_exchanges_responder;
+ uint32_t receive_p_bsy_count;
+ uint32_t receive_f_bsy_count;
+ uint32_t dropped_frames_due_to_no_rq_buffer_count;
+ uint32_t empty_rq_timeout_count;
+ uint32_t dropped_frames_due_to_no_xri_count;
+ uint32_t empty_xri_pool_count;
+} ocs_xport_host_stats_t;
+
+typedef struct ocs_xport_host_statistics_s {
+ ocs_sem_t semaphore;
+ ocs_xport_link_stats_t link_stats;
+ ocs_xport_host_stats_t host_stats;
+} ocs_xport_host_statistics_t;
+
+typedef union ocs_xport {
+ uint32_t value;
+ ocs_xport_host_statistics_t stats;
+} ocs_xport_stats_t;
+/**
+ * @brief Transport private values
+ */
+struct ocs_xport_s {
+ ocs_t *ocs;
+ uint64_t req_wwpn; /*<< wwpn requested by user for primary sport */
+ uint64_t req_wwnn; /*<< wwnn requested by user for primary sport */
+
+ ocs_xport_fcfi_t fcfi[SLI4_MAX_FCFI];
+
+ /* Nodes */
+ uint32_t nodes_count; /**< number of allocated nodes */
+ ocs_node_t **nodes; /**< array of pointers to nodes */
+ ocs_list_t nodes_free_list; /**< linked list of free nodes */
+
+ /* Io pool and counts */
+ ocs_io_pool_t *io_pool; /**< pointer to IO pool */
+ ocs_atomic_t io_alloc_failed_count; /**< used to track how often IO pool is empty */
+ ocs_lock_t io_pending_lock; /**< lock for io_pending_list */
+ ocs_list_t io_pending_list; /**< list of IOs waiting for HW resources
+ ** lock: xport->io_pending_lock
+ ** link: ocs_io_t->io_pending_link
+ */
+ ocs_atomic_t io_total_alloc; /**< count of totals IOS allocated */
+ ocs_atomic_t io_total_free; /**< count of totals IOS free'd */
+ ocs_atomic_t io_total_pending; /**< count of totals IOS that were pended */
+ ocs_atomic_t io_active_count; /**< count of active IOS */
+ ocs_atomic_t io_pending_count; /**< count of pending IOS */
+ ocs_atomic_t io_pending_recursing; /**< non-zero if ocs_scsi_check_pending is executing */
+
+ /* vport */
+ ocs_list_t vport_list; /**< list of VPORTS (NPIV) */
+
+ /* Port */
+ uint32_t configured_link_state; /**< requested link state */
+
+ /* RQ processing threads */
+ uint32_t num_rq_threads;
+ ocs_xport_rq_thread_info_t *rq_thread_info;
+
+ ocs_timer_t stats_timer; /**< Timer for Statistics */
+ ocs_xport_stats_t fc_xport_stats;
+};
+
+
+extern ocs_xport_t *ocs_xport_alloc(ocs_t *ocs);
+extern int32_t ocs_xport_attach(ocs_xport_t *xport);
+extern int32_t ocs_xport_initialize(ocs_xport_t *xport);
+extern int32_t ocs_xport_detach(ocs_xport_t *xport);
+extern int32_t ocs_xport_control(ocs_xport_t *xport, ocs_xport_ctrl_e cmd, ...);
+extern int32_t ocs_xport_status(ocs_xport_t *xport, ocs_xport_status_e cmd, ocs_xport_stats_t *result);
+extern void ocs_xport_free(ocs_xport_t *xport);
+
+#endif
diff --git a/sys/dev/ocs_fc/sli4.c b/sys/dev/ocs_fc/sli4.c
new file mode 100644
index 000000000000..c346fb2e0d8c
--- /dev/null
+++ b/sys/dev/ocs_fc/sli4.c
@@ -0,0 +1,8648 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @defgroup sli SLI-4 Base APIs
+ */
+
+/**
+ * @file
+ * All common (i.e. transport-independent) SLI-4 functions are implemented
+ * in this file.
+ */
+
+#include "sli4.h"
+
+#if defined(OCS_INCLUDE_DEBUG)
+#include "ocs_utils.h"
+#endif
+
+#define SLI4_BMBX_DELAY_US 1000 /* 1 ms */
+#define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */
+
+static int32_t sli_fw_init(sli4_t *);
+static int32_t sli_fw_term(sli4_t *);
+static int32_t sli_sliport_control(sli4_t *sli4, uint32_t endian);
+static int32_t sli_cmd_fw_deinitialize(sli4_t *, void *, size_t);
+static int32_t sli_cmd_fw_initialize(sli4_t *, void *, size_t);
+static int32_t sli_queue_doorbell(sli4_t *, sli4_queue_t *);
+static uint8_t sli_queue_entry_is_valid(sli4_queue_t *, uint8_t *, uint8_t);
+
+const uint8_t sli4_fw_initialize[] = {
+ 0xff, 0x12, 0x34, 0xff,
+ 0xff, 0x56, 0x78, 0xff,
+};
+
+const uint8_t sli4_fw_deinitialize[] = {
+ 0xff, 0xaa, 0xbb, 0xff,
+ 0xff, 0xcc, 0xdd, 0xff,
+};
+
+typedef struct {
+ uint32_t rev_id;
+ uint32_t family; /* generation */
+ sli4_asic_type_e type;
+ sli4_asic_rev_e rev;
+} sli4_asic_entry_t;
+
+sli4_asic_entry_t sli4_asic_table[] = {
+ { 0x00, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A0},
+ { 0x01, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A1},
+ { 0x02, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_A2},
+ { 0x00, 4, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0},
+ { 0x00, 2, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_A0},
+ { 0x10, 1, SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_B0},
+ { 0x10, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B0},
+ { 0x11, 0x04, SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_B1},
+ { 0x0, 0x0a, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0},
+ { 0x10, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_B0},
+ { 0x30, 0x0b, SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_D0},
+ { 0x3, 0x0b, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3},
+ { 0x0, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A0},
+ { 0x1, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A1},
+ { 0x3, 0x0c, SLI4_ASIC_TYPE_LANCERG6,SLI4_ASIC_REV_A3},
+
+ { 0x00, 0x05, SLI4_ASIC_TYPE_CORSAIR, SLI4_ASIC_REV_A0},
+};
+
+/*
+ * @brief Convert queue type enum (SLI_QTYPE_*) into a string.
+ */
+const char *SLI_QNAME[] = {
+ "Event Queue",
+ "Completion Queue",
+ "Mailbox Queue",
+ "Work Queue",
+ "Receive Queue",
+ "Undefined"
+};
+
+/**
+ * @brief Define the mapping of registers to their BAR and offset.
+ *
+ * @par Description
+ * Although SLI-4 specification defines a common set of registers, their locations
+ * (both BAR and offset) depend on the interface type. This array maps a register
+ * enum to an array of BAR/offset pairs indexed by the interface type. For
+ * example, to access the bootstrap mailbox register on an interface type 0
+ * device, code can refer to the offset using regmap[SLI4_REG_BMBX][0].offset.
+ *
+ * @b Note: A value of UINT32_MAX for either the register set (rset) or offset (off)
+ * indicates an invalid mapping.
+ */
+const sli4_reg_t regmap[SLI4_REG_MAX][SLI4_MAX_IF_TYPES] = {
+ /* SLI4_REG_BMBX */
+ {
+ { 2, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG }, { 0, SLI4_BMBX_REG },
+ },
+ /* SLI4_REG_EQCQ_DOORBELL */
+ {
+ { 2, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
+ { 0, SLI4_EQCQ_DOORBELL_REG }, { 0, SLI4_EQCQ_DOORBELL_REG },
+ },
+ /* SLI4_REG_FCOE_RQ_DOORBELL */
+ {
+ { 2, SLI4_RQ_DOORBELL_REG }, { 0, SLI4_RQ_DOORBELL_REG },
+ { 0, SLI4_RQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_IO_WQ_DOORBELL */
+ {
+ { 2, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG }, { 0, SLI4_IO_WQ_DOORBELL_REG }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_MQ_DOORBELL */
+ {
+ { 2, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG },
+ { 0, SLI4_MQ_DOORBELL_REG }, { 0, SLI4_MQ_DOORBELL_REG },
+ },
+ /* SLI4_REG_PHYSDEV_CONTROL */
+ {
+ { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PHSDEV_CONTROL_REG_23 }, { 0, SLI4_PHSDEV_CONTROL_REG_23 },
+ },
+ /* SLI4_REG_SLIPORT_CONTROL */
+ {
+ { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_CONTROL_REG }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_SLIPORT_ERROR1 */
+ {
+ { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR1 }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_SLIPORT_ERROR2 */
+ {
+ { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_SLIPORT_ERROR2 }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_SLIPORT_SEMAPHORE */
+ {
+ { 1, SLI4_PORT_SEMAPHORE_REG_0 }, { 0, SLI4_PORT_SEMAPHORE_REG_1 },
+ { 0, SLI4_PORT_SEMAPHORE_REG_23 }, { 0, SLI4_PORT_SEMAPHORE_REG_23 },
+ },
+ /* SLI4_REG_SLIPORT_STATUS */
+ {
+ { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { 0, SLI4_PORT_STATUS_REG_23 }, { 0, SLI4_PORT_STATUS_REG_23 },
+ },
+ /* SLI4_REG_UERR_MASK_HI */
+ {
+ { 0, SLI4_UERR_MASK_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_UERR_MASK_LO */
+ {
+ { 0, SLI4_UERR_MASK_LOW_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_UERR_STATUS_HI */
+ {
+ { 0, SLI4_UERR_STATUS_HIGH_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_UERR_STATUS_LO */
+ {
+ { 0, SLI4_UERR_STATUS_LOW_REG }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_SW_UE_CSR1 */
+ {
+ { 1, SLI4_SW_UE_CSR1}, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
+ },
+ /* SLI4_REG_SW_UE_CSR2 */
+ {
+ { 1, SLI4_SW_UE_CSR2}, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX }, { UINT32_MAX, UINT32_MAX },
+ },
+};
+
+/**
+ * @brief Read the given SLI register.
+ *
+ * @param sli Pointer to the SLI context.
+ * @param reg Register name enum.
+ *
+ * @return Returns the register value.
+ */
+uint32_t
+sli_reg_read(sli4_t *sli, sli4_regname_e reg)
+{
+ const sli4_reg_t *r = &(regmap[reg][sli->if_type]);
+
+ if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) {
+ ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type);
+ return UINT32_MAX;
+ }
+
+ return ocs_reg_read32(sli->os, r->rset, r->off);
+}
+
+/**
+ * @brief Write the value to the given SLI register.
+ *
+ * @param sli Pointer to the SLI context.
+ * @param reg Register name enum.
+ * @param val Value to write.
+ *
+ * @return None.
+ */
+void
+sli_reg_write(sli4_t *sli, sli4_regname_e reg, uint32_t val)
+{
+ const sli4_reg_t *r = &(regmap[reg][sli->if_type]);
+
+ if ((UINT32_MAX == r->rset) || (UINT32_MAX == r->off)) {
+ ocs_log_err(sli->os, "regname %d not defined for if_type %d\n", reg, sli->if_type);
+ return;
+ }
+
+ ocs_reg_write32(sli->os, r->rset, r->off, val);
+}
+
+/**
+ * @brief Check if the SLI_INTF register is valid.
+ *
+ * @param val 32-bit SLI_INTF register value.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static uint8_t
+sli_intf_valid_check(uint32_t val)
+{
+ return ((val >> SLI4_INTF_VALID_SHIFT) & SLI4_INTF_VALID_MASK) != SLI4_INTF_VALID;
+}
+
+/**
+ * @brief Retrieve the SLI revision level.
+ *
+ * @param val 32-bit SLI_INTF register value.
+ *
+ * @return Returns the SLI revision level.
+ */
+static uint8_t
+sli_intf_sli_revision(uint32_t val)
+{
+ return ((val >> SLI4_INTF_SLI_REVISION_SHIFT) & SLI4_INTF_SLI_REVISION_MASK);
+}
+
+static uint8_t
+sli_intf_sli_family(uint32_t val)
+{
+ return ((val >> SLI4_INTF_SLI_FAMILY_SHIFT) & SLI4_INTF_SLI_FAMILY_MASK);
+}
+
+/**
+ * @brief Retrieve the SLI interface type.
+ *
+ * @param val 32-bit SLI_INTF register value.
+ *
+ * @return Returns the SLI interface type.
+ */
+static uint8_t
+sli_intf_if_type(uint32_t val)
+{
+ return ((val >> SLI4_INTF_IF_TYPE_SHIFT) & SLI4_INTF_IF_TYPE_MASK);
+}
+
+/**
+ * @brief Retrieve PCI revision ID.
+ *
+ * @param val 32-bit PCI CLASS_REVISION register value.
+ *
+ * @return Returns the PCI revision ID.
+ */
+static uint8_t
+sli_pci_rev_id(uint32_t val)
+{
+ return ((val >> SLI4_PCI_REV_ID_SHIFT) & SLI4_PCI_REV_ID_MASK);
+}
+
+/**
+ * @brief retrieve SLI ASIC generation
+ *
+ * @param val 32-bit SLI_ASIC_ID register value
+ *
+ * @return SLI ASIC generation
+ */
+static uint8_t
+sli_asic_gen(uint32_t val)
+{
+ return ((val >> SLI4_ASIC_GEN_SHIFT) & SLI4_ASIC_GEN_MASK);
+}
+
+/**
+ * @brief Wait for the bootstrap mailbox to report "ready".
+ *
+ * @param sli4 SLI context pointer.
+ * @param msec Number of milliseconds to wait.
+ *
+ * @return Returns 0 if BMBX is ready, or non-zero otherwise (i.e. time out occurred).
+ */
+static int32_t
+sli_bmbx_wait(sli4_t *sli4, uint32_t msec)
+{
+ uint32_t val = 0;
+
+ do {
+ ocs_udelay(SLI4_BMBX_DELAY_US);
+ val = sli_reg_read(sli4, SLI4_REG_BMBX);
+ msec--;
+ } while(msec && !(val & SLI4_BMBX_RDY));
+
+ return(!(val & SLI4_BMBX_RDY));
+}
+
+/**
+ * @brief Write bootstrap mailbox.
+ *
+ * @param sli4 SLI context pointer.
+ *
+ * @return Returns 0 if command succeeded, or non-zero otherwise.
+ */
+static int32_t
+sli_bmbx_write(sli4_t *sli4)
+{
+ uint32_t val = 0;
+
+ /* write buffer location to bootstrap mailbox register */
+ ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_PREWRITE);
+ val = SLI4_BMBX_WRITE_HI(sli4->bmbx.phys);
+ sli_reg_write(sli4, SLI4_REG_BMBX, val);
+
+ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
+ ocs_log_crit(sli4->os, "BMBX WRITE_HI failed\n");
+ return -1;
+ }
+ val = SLI4_BMBX_WRITE_LO(sli4->bmbx.phys);
+ sli_reg_write(sli4, SLI4_REG_BMBX, val);
+
+ /* wait for SLI Port to set ready bit */
+ return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC/*XXX*/);
+}
+
+#if defined(OCS_INCLUDE_DEBUG)
+/**
+ * @ingroup sli
+ * @brief Dump BMBX mailbox command.
+ *
+ * @par Description
+ * Convenience function for dumping BMBX mailbox commands. Takes
+ * into account which mailbox command is given since SLI_CONFIG
+ * commands are special.
+ *
+ * @b Note: This function takes advantage of
+ * the one-command-at-a-time nature of the BMBX to be able to
+ * display non-embedded SLI_CONFIG commands. This will not work
+ * for mailbox commands on the MQ. Luckily, all current non-emb
+ * mailbox commands go through the BMBX.
+ *
+ * @param sli4 SLI context pointer.
+ * @param mbx Pointer to mailbox command to dump.
+ * @param prefix Prefix for dump label.
+ *
+ * @return None.
+ */
+static void
+sli_dump_bmbx_command(sli4_t *sli4, void *mbx, const char *prefix)
+{
+ uint32_t size = 0;
+ char label[64];
+ uint32_t i;
+ /* Mailbox diagnostic logging */
+ sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mbx;
+
+ if (!ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)) {
+ return;
+ }
+
+ if (hdr->command == SLI4_MBOX_COMMAND_SLI_CONFIG) {
+ sli4_cmd_sli_config_t *sli_config = (sli4_cmd_sli_config_t *)hdr;
+ sli4_req_hdr_t *sli_config_hdr;
+ if (sli_config->emb) {
+ ocs_snprintf(label, sizeof(label), "%s (emb)", prefix);
+
+ /* if embedded, dump entire command */
+ sli_config_hdr = (sli4_req_hdr_t *)sli_config->payload.embed;
+ size = sizeof(*sli_config) - sizeof(sli_config->payload) +
+ sli_config_hdr->request_length + (4*sizeof(uint32_t));
+ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
+ (uint8_t *)sli4->bmbx.virt, size);
+ } else {
+ sli4_sli_config_pmd_t *pmd;
+ ocs_snprintf(label, sizeof(label), "%s (non-emb hdr)", prefix);
+
+ /* if non-embedded, break up into two parts: SLI_CONFIG hdr
+ and the payload(s) */
+ size = sizeof(*sli_config) - sizeof(sli_config->payload) + (12 * sli_config->pmd_count);
+ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
+ (uint8_t *)sli4->bmbx.virt, size);
+
+ /* as sanity check, make sure first PMD matches what was saved */
+ pmd = &sli_config->payload.mem;
+ if ((pmd->address_high == ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys)) &&
+ (pmd->address_low == ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys))) {
+ for (i = 0; i < sli_config->pmd_count; i++, pmd++) {
+ sli_config_hdr = sli4->bmbx_non_emb_pmd->virt;
+ ocs_snprintf(label, sizeof(label), "%s (non-emb pay[%d])",
+ prefix, i);
+ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, label,
+ (uint8_t *)sli4->bmbx_non_emb_pmd->virt,
+ sli_config_hdr->request_length + (4*sizeof(uint32_t)));
+ }
+ } else {
+ ocs_log_debug(sli4->os, "pmd addr does not match pmd:%x %x (%x %x)\n",
+ pmd->address_high, pmd->address_low,
+ ocs_addr32_hi(sli4->bmbx_non_emb_pmd->phys),
+ ocs_addr32_lo(sli4->bmbx_non_emb_pmd->phys));
+ }
+
+ }
+ } else {
+ /* not an SLI_CONFIG command, just display first 64 bytes, like we do
+ for MQEs */
+ size = 64;
+ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, prefix,
+ (uint8_t *)mbx, size);
+ }
+}
+#endif
+
+/**
+ * @ingroup sli
+ * @brief Submit a command to the bootstrap mailbox and check the status.
+ *
+ * @param sli4 SLI context pointer.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_bmbx_command(sli4_t *sli4)
+{
+ void *cqe = (uint8_t *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
+
+#if defined(OCS_INCLUDE_DEBUG)
+ sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmd");
+#endif
+
+ if (sli_fw_error_status(sli4) > 0) {
+ ocs_log_crit(sli4->os, "Chip is in an error state - Mailbox "
+ "command rejected status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS),
+ sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1),
+ sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2));
+ return -1;
+ }
+
+ if (sli_bmbx_write(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail phys=%p reg=%#x\n",
+ (void*)sli4->bmbx.phys,
+ sli_reg_read(sli4, SLI4_REG_BMBX));
+ return -1;
+ }
+
+ /* check completion queue entry status */
+ ocs_dma_sync(&sli4->bmbx, OCS_DMASYNC_POSTREAD);
+ if (((sli4_mcqe_t *)cqe)->val) {
+#if defined(OCS_INCLUDE_DEBUG)
+ sli_dump_bmbx_command(sli4, sli4->bmbx.virt, "bmbx cmpl");
+ ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "bmbx cqe", cqe, sizeof(sli4_mcqe_t));
+#endif
+ return sli_cqe_mq(cqe);
+ } else {
+ ocs_log_err(sli4->os, "invalid or wrong type\n");
+ return -1;
+ }
+}
+
+/****************************************************************************
+ * Messages
+ */
+
+/**
+ * @ingroup sli
+ * @brief Write a CONFIG_LINK command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_config_link(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_cmd_config_link_t *config_link = buf;
+
+ ocs_memset(buf, 0, size);
+
+ config_link->hdr.command = SLI4_MBOX_COMMAND_CONFIG_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return sizeof(sli4_cmd_config_link_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a DOWN_LINK command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_down_link(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_mbox_command_header_t *hdr = buf;
+
+ ocs_memset(buf, 0, size);
+
+ hdr->command = SLI4_MBOX_COMMAND_DOWN_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return sizeof(sli4_mbox_command_header_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a DUMP Type 4 command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param wki The well known item ID.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_dump_type4(sli4_t *sli4, void *buf, size_t size, uint16_t wki)
+{
+ sli4_cmd_dump4_t *cmd = buf;
+
+ ocs_memset(buf, 0, size);
+
+ cmd->hdr.command = SLI4_MBOX_COMMAND_DUMP;
+ cmd->type = 4;
+ cmd->wki_selection = wki;
+ return sizeof(sli4_cmd_dump4_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_READ_TRANSCEIVER_DATA command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param page_num The page of SFP data to retrieve (0xa0 or 0xa2).
+ * @param dma DMA structure from which the data will be copied.
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_read_transceiver_data(sli4_t *sli4, void *buf, size_t size, uint32_t page_num,
+ ocs_dma_t *dma)
+{
+ sli4_req_common_read_transceiver_data_t *req = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t payload_size;
+
+ if (dma == NULL) {
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_read_transceiver_data_t),
+ sizeof(sli4_res_common_read_transceiver_data_t));
+ } else {
+ payload_size = dma->size;
+ }
+
+ if (sli4->port_type == SLI4_PORT_TYPE_FC) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, dma);
+ }
+
+ if (dma == NULL) {
+ req = (sli4_req_common_read_transceiver_data_t *)((uint8_t *)buf + sli_config_off);
+ } else {
+ req = (sli4_req_common_read_transceiver_data_t *)dma->virt;
+ ocs_memset(req, 0, dma->size);
+ }
+
+ req->hdr.opcode = SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+
+ req->page_number = page_num;
+ req->port = sli4->physical_port;
+
+ return(sli_config_off + sizeof(sli4_req_common_read_transceiver_data_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a READ_LINK_STAT command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param req_ext_counters If TRUE, then the extended counters will be requested.
+ * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
+ * @param clear_all_counters If TRUE, the counters will be cleared.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_read_link_stats(sli4_t *sli4, void *buf, size_t size,
+ uint8_t req_ext_counters,
+ uint8_t clear_overflow_flags,
+ uint8_t clear_all_counters)
+{
+ sli4_cmd_read_link_stats_t *cmd = buf;
+
+ ocs_memset(buf, 0, size);
+
+ cmd->hdr.command = SLI4_MBOX_COMMAND_READ_LNK_STAT;
+ cmd->rec = req_ext_counters;
+ cmd->clrc = clear_all_counters;
+ cmd->clof = clear_overflow_flags;
+ return sizeof(sli4_cmd_read_link_stats_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a READ_STATUS command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param clear_counters If TRUE, the counters will be cleared.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size,
+ uint8_t clear_counters)
+{
+ sli4_cmd_read_status_t *cmd = buf;
+
+ ocs_memset(buf, 0, size);
+
+ cmd->hdr.command = SLI4_MBOX_COMMAND_READ_STATUS;
+ cmd->cc = clear_counters;
+ return sizeof(sli4_cmd_read_status_t);
+}
+
+/**
+ * @brief Write a FW_DEINITIALIZE command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_fw_deinitialize(sli4_t *sli4, void *buf, size_t size)
+{
+
+ ocs_memset(buf, 0, size);
+ ocs_memcpy(buf, sli4_fw_deinitialize, sizeof(sli4_fw_deinitialize));
+
+ return sizeof(sli4_fw_deinitialize);
+}
+
+/**
+ * @brief Write a FW_INITIALIZE command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_fw_initialize(sli4_t *sli4, void *buf, size_t size)
+{
+
+ ocs_memset(buf, 0, size);
+ ocs_memcpy(buf, sli4_fw_initialize, sizeof(sli4_fw_initialize));
+
+ return sizeof(sli4_fw_initialize);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an INIT_LINK command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param speed Link speed.
+ * @param reset_alpa For native FC, this is the selective reset AL_PA
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_init_link(sli4_t *sli4, void *buf, size_t size, uint32_t speed, uint8_t reset_alpa)
+{
+ sli4_cmd_init_link_t *init_link = buf;
+
+ ocs_memset(buf, 0, size);
+
+ init_link->hdr.command = SLI4_MBOX_COMMAND_INIT_LINK;
+
+ /* Most fields only have meaning for FC links */
+ if (sli4->config.topology != SLI4_READ_CFG_TOPO_FCOE) {
+ init_link->selective_reset_al_pa = reset_alpa;
+ init_link->link_flags.loopback = FALSE;
+
+ init_link->link_speed_selection_code = speed;
+ switch (speed) {
+ case FC_LINK_SPEED_1G:
+ case FC_LINK_SPEED_2G:
+ case FC_LINK_SPEED_4G:
+ case FC_LINK_SPEED_8G:
+ case FC_LINK_SPEED_16G:
+ case FC_LINK_SPEED_32G:
+ init_link->link_flags.fixed_speed = TRUE;
+ break;
+ case FC_LINK_SPEED_10G:
+ ocs_log_test(sli4->os, "unsupported FC speed %d\n", speed);
+ return 0;
+ }
+
+ switch (sli4->config.topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ /* Attempt P2P but failover to FC-AL */
+ init_link->link_flags.enable_topology_failover = TRUE;
+
+ if (sli_get_asic_type(sli4) == SLI4_ASIC_TYPE_LANCER)
+ init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_FAIL_OVER;
+ else
+ init_link->link_flags.topology = SLI4_INIT_LINK_F_P2P_FAIL_OVER;
+
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ init_link->link_flags.topology = SLI4_INIT_LINK_F_FCAL_ONLY;
+ if ((init_link->link_speed_selection_code == FC_LINK_SPEED_16G) ||
+ (init_link->link_speed_selection_code == FC_LINK_SPEED_32G)) {
+ ocs_log_test(sli4->os, "unsupported FC-AL speed %d\n", speed);
+ return 0;
+ }
+ break;
+ case SLI4_READ_CFG_TOPO_FC_DA:
+ init_link->link_flags.topology = FC_TOPOLOGY_P2P;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported topology %#x\n", sli4->config.topology);
+ return 0;
+ }
+
+ init_link->link_flags.unfair = FALSE;
+ init_link->link_flags.skip_lirp_lilp = FALSE;
+ init_link->link_flags.gen_loop_validity_check = FALSE;
+ init_link->link_flags.skip_lisa = FALSE;
+ init_link->link_flags.select_hightest_al_pa = FALSE;
+ }
+
+ return sizeof(sli4_cmd_init_link_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an INIT_VFI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param vfi VFI
+ * @param fcfi FCFI
+ * @param vpi VPI (Set to -1 if unused.)
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_init_vfi(sli4_t *sli4, void *buf, size_t size, uint16_t vfi,
+ uint16_t fcfi, uint16_t vpi)
+{
+ sli4_cmd_init_vfi_t *init_vfi = buf;
+
+ ocs_memset(buf, 0, size);
+
+ init_vfi->hdr.command = SLI4_MBOX_COMMAND_INIT_VFI;
+
+ init_vfi->vfi = vfi;
+ init_vfi->fcfi = fcfi;
+
+ /*
+ * If the VPI is valid, initialize it at the same time as
+ * the VFI
+ */
+ if (0xffff != vpi) {
+ init_vfi->vp = TRUE;
+ init_vfi->vpi = vpi;
+ }
+
+ return sizeof(sli4_cmd_init_vfi_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an INIT_VPI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param vpi VPI allocated.
+ * @param vfi VFI associated with this VPI.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_init_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t vpi, uint16_t vfi)
+{
+ sli4_cmd_init_vpi_t *init_vpi = buf;
+
+ ocs_memset(buf, 0, size);
+
+ init_vpi->hdr.command = SLI4_MBOX_COMMAND_INIT_VPI;
+ init_vpi->vpi = vpi;
+ init_vpi->vfi = vfi;
+
+ return sizeof(sli4_cmd_init_vpi_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a POST_XRI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param xri_base Starting XRI value for range of XRI given to SLI Port.
+ * @param xri_count Number of XRIs provided to the SLI Port.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_post_xri(sli4_t *sli4, void *buf, size_t size, uint16_t xri_base, uint16_t xri_count)
+{
+ sli4_cmd_post_xri_t *post_xri = buf;
+
+ ocs_memset(buf, 0, size);
+
+ post_xri->hdr.command = SLI4_MBOX_COMMAND_POST_XRI;
+ post_xri->xri_base = xri_base;
+ post_xri->xri_count = xri_count;
+
+ if (sli4->config.auto_xfer_rdy == 0) {
+ post_xri->enx = TRUE;
+ post_xri->val = TRUE;
+ }
+
+ return sizeof(sli4_cmd_post_xri_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a RELEASE_XRI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param num_xri The number of XRIs to be released.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_release_xri(sli4_t *sli4, void *buf, size_t size, uint8_t num_xri)
+{
+ sli4_cmd_release_xri_t *release_xri = buf;
+
+ ocs_memset(buf, 0, size);
+
+ release_xri->hdr.command = SLI4_MBOX_COMMAND_RELEASE_XRI;
+ release_xri->xri_count = num_xri;
+
+ return sizeof(sli4_cmd_release_xri_t);
+}
+
+/**
+ * @brief Write a READ_CONFIG command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_read_config(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_cmd_read_config_t *read_config = buf;
+
+ ocs_memset(buf, 0, size);
+
+ read_config->hdr.command = SLI4_MBOX_COMMAND_READ_CONFIG;
+
+ return sizeof(sli4_cmd_read_config_t);
+}
+
+/**
+ * @brief Write a READ_NVPARMS command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_read_nvparms(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_cmd_read_nvparms_t *read_nvparms = buf;
+
+ ocs_memset(buf, 0, size);
+
+ read_nvparms->hdr.command = SLI4_MBOX_COMMAND_READ_NVPARMS;
+
+ return sizeof(sli4_cmd_read_nvparms_t);
+}
+
+/**
+ * @brief Write a WRITE_NVPARMS command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param wwpn WWPN to write - pointer to array of 8 uint8_t.
+ * @param wwnn WWNN to write - pointer to array of 8 uint8_t.
+ * @param hard_alpa Hard ALPA to write.
+ * @param preferred_d_id Preferred D_ID to write.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_write_nvparms(sli4_t *sli4, void *buf, size_t size, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa,
+ uint32_t preferred_d_id)
+{
+ sli4_cmd_write_nvparms_t *write_nvparms = buf;
+
+ ocs_memset(buf, 0, size);
+
+ write_nvparms->hdr.command = SLI4_MBOX_COMMAND_WRITE_NVPARMS;
+ ocs_memcpy(write_nvparms->wwpn, wwpn, 8);
+ ocs_memcpy(write_nvparms->wwnn, wwnn, 8);
+ write_nvparms->hard_alpa = hard_alpa;
+ write_nvparms->preferred_d_id = preferred_d_id;
+
+ return sizeof(sli4_cmd_write_nvparms_t);
+}
+
+/**
+ * @brief Write a READ_REV command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param vpd Pointer to the buffer.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_read_rev(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *vpd)
+{
+ sli4_cmd_read_rev_t *read_rev = buf;
+
+ ocs_memset(buf, 0, size);
+
+ read_rev->hdr.command = SLI4_MBOX_COMMAND_READ_REV;
+
+ if (vpd && vpd->size) {
+ read_rev->vpd = TRUE;
+
+ read_rev->available_length = vpd->size;
+
+ read_rev->physical_address_low = ocs_addr32_lo(vpd->phys);
+ read_rev->physical_address_high = ocs_addr32_hi(vpd->phys);
+ }
+
+ return sizeof(sli4_cmd_read_rev_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a READ_SPARM64 command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param dma DMA buffer for the service parameters.
+ * @param vpi VPI used to determine the WWN.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_read_sparm64(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
+ uint16_t vpi)
+{
+ sli4_cmd_read_sparm64_t *read_sparm64 = buf;
+
+ ocs_memset(buf, 0, size);
+
+ if (SLI4_READ_SPARM64_VPI_SPECIAL == vpi) {
+ ocs_log_test(sli4->os, "special VPI not supported!!!\n");
+ return -1;
+ }
+
+ if (!dma || !dma->phys) {
+ ocs_log_test(sli4->os, "bad DMA buffer\n");
+ return -1;
+ }
+
+ read_sparm64->hdr.command = SLI4_MBOX_COMMAND_READ_SPARM64;
+
+ read_sparm64->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64;
+ read_sparm64->bde_64.buffer_length = dma->size;
+ read_sparm64->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ read_sparm64->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+
+ read_sparm64->vpi = vpi;
+
+ return sizeof(sli4_cmd_read_sparm64_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a READ_TOPOLOGY command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param dma DMA buffer for loop map (optional).
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_read_topology(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
+{
+ sli4_cmd_read_topology_t *read_topo = buf;
+
+ ocs_memset(buf, 0, size);
+
+ read_topo->hdr.command = SLI4_MBOX_COMMAND_READ_TOPOLOGY;
+
+ if (dma && dma->size) {
+ if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
+ ocs_log_test(sli4->os, "loop map buffer too small %jd\n",
+ dma->size);
+ return 0;
+ }
+
+ ocs_memset(dma->virt, 0, dma->size);
+
+ read_topo->bde_loop_map.bde_type = SLI4_BDE_TYPE_BDE_64;
+ read_topo->bde_loop_map.buffer_length = dma->size;
+ read_topo->bde_loop_map.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ read_topo->bde_loop_map.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+ }
+
+ return sizeof(sli4_cmd_read_topology_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a REG_FCFI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param index FCF index returned by READ_FCF_TABLE.
+ * @param rq_cfg RQ_ID/R_CTL/TYPE routing information
+ * @param vlan_id VLAN ID tag.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_reg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t index, sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t vlan_id)
+{
+ sli4_cmd_reg_fcfi_t *reg_fcfi = buf;
+ uint32_t i;
+
+ ocs_memset(buf, 0, size);
+
+ reg_fcfi->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI;
+
+ reg_fcfi->fcf_index = index;
+
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ switch(i) {
+ case 0: reg_fcfi->rq_id_0 = rq_cfg[0].rq_id; break;
+ case 1: reg_fcfi->rq_id_1 = rq_cfg[1].rq_id; break;
+ case 2: reg_fcfi->rq_id_2 = rq_cfg[2].rq_id; break;
+ case 3: reg_fcfi->rq_id_3 = rq_cfg[3].rq_id; break;
+ }
+ reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
+ }
+
+ if (vlan_id) {
+ reg_fcfi->vv = TRUE;
+ reg_fcfi->vlan_tag = vlan_id;
+ }
+
+ return sizeof(sli4_cmd_reg_fcfi_t);
+}
+
+/**
+ * @brief Write REG_FCFI_MRQ to provided command buffer
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param fcf_index FCF index returned by READ_FCF_TABLE.
+ * @param vlan_id VLAN ID tag.
+ * @param rr_quant Round robin quanta if RQ selection policy is 2
+ * @param rq_selection_policy RQ selection policy
+ * @param num_rqs Array of count of RQs per filter
+ * @param rq_ids Array of RQ ids per filter
+ * @param rq_cfg RQ_ID/R_CTL/TYPE routing information
+ *
+ * @return returns 0 for success, a negative error code value for failure.
+ */
+int32_t
+sli_cmd_reg_fcfi_mrq(sli4_t *sli4, void *buf, size_t size, uint8_t mode,
+ uint16_t fcf_index, uint16_t vlan_id, uint8_t rq_selection_policy,
+ uint8_t mrq_bit_mask, uint16_t num_mrqs,
+ sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG])
+{
+ sli4_cmd_reg_fcfi_mrq_t *reg_fcfi_mrq = buf;
+ uint32_t i;
+
+ ocs_memset(buf, 0, size);
+
+ reg_fcfi_mrq->hdr.command = SLI4_MBOX_COMMAND_REG_FCFI_MRQ;
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
+ reg_fcfi_mrq->fcf_index = fcf_index;
+ if (vlan_id) {
+ reg_fcfi_mrq->vv = TRUE;
+ reg_fcfi_mrq->vlan_tag = vlan_id;
+ }
+ goto done;
+ }
+
+ reg_fcfi_mrq->mode = mode;
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
+
+ switch(i) {
+ case 3: reg_fcfi_mrq->rq_id_3 = rq_cfg[i].rq_id; break;
+ case 2: reg_fcfi_mrq->rq_id_2 = rq_cfg[i].rq_id; break;
+ case 1: reg_fcfi_mrq->rq_id_1 = rq_cfg[i].rq_id; break;
+ case 0: reg_fcfi_mrq->rq_id_0 = rq_cfg[i].rq_id; break;
+ }
+ }
+
+ reg_fcfi_mrq->rq_selection_policy = rq_selection_policy;
+ reg_fcfi_mrq->mrq_filter_bitmask = mrq_bit_mask;
+ reg_fcfi_mrq->num_mrq_pairs = num_mrqs;
+done:
+ return sizeof(sli4_cmd_reg_fcfi_mrq_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a REG_RPI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param nport_id Remote F/N_Port_ID.
+ * @param rpi Previously-allocated Remote Port Indicator.
+ * @param vpi Previously-allocated Virtual Port Indicator.
+ * @param dma DMA buffer that contains the remote port's service parameters.
+ * @param update Boolean indicating an update to an existing RPI (TRUE)
+ * or a new registration (FALSE).
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_reg_rpi(sli4_t *sli4, void *buf, size_t size, uint32_t nport_id, uint16_t rpi,
+ uint16_t vpi, ocs_dma_t *dma, uint8_t update, uint8_t enable_t10_pi)
+{
+ sli4_cmd_reg_rpi_t *reg_rpi = buf;
+
+ ocs_memset(buf, 0, size);
+
+ reg_rpi->hdr.command = SLI4_MBOX_COMMAND_REG_RPI;
+
+ reg_rpi->rpi = rpi;
+ reg_rpi->remote_n_port_id = nport_id;
+ reg_rpi->upd = update;
+ reg_rpi->etow = enable_t10_pi;
+
+ reg_rpi->bde_64.bde_type = SLI4_BDE_TYPE_BDE_64;
+ reg_rpi->bde_64.buffer_length = SLI4_REG_RPI_BUF_LEN;
+ reg_rpi->bde_64.u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ reg_rpi->bde_64.u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+
+ reg_rpi->vpi = vpi;
+
+ return sizeof(sli4_cmd_reg_rpi_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a REG_VFI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param domain Pointer to the domain object.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_reg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain)
+{
+ sli4_cmd_reg_vfi_t *reg_vfi = buf;
+
+ if (!sli4 || !buf || !domain) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ reg_vfi->hdr.command = SLI4_MBOX_COMMAND_REG_VFI;
+
+ reg_vfi->vfi = domain->indicator;
+
+ reg_vfi->fcfi = domain->fcf_indicator;
+
+ /* TODO contents of domain->dma only valid if topo == FABRIC */
+ reg_vfi->sparm.bde_type = SLI4_BDE_TYPE_BDE_64;
+ reg_vfi->sparm.buffer_length = 0x70;
+ reg_vfi->sparm.u.data.buffer_address_low = ocs_addr32_lo(domain->dma.phys);
+ reg_vfi->sparm.u.data.buffer_address_high = ocs_addr32_hi(domain->dma.phys);
+
+ reg_vfi->e_d_tov = sli4->config.e_d_tov;
+ reg_vfi->r_a_tov = sli4->config.r_a_tov;
+
+ reg_vfi->vp = TRUE;
+ reg_vfi->vpi = domain->sport->indicator;
+ ocs_memcpy(reg_vfi->wwpn, &domain->sport->sli_wwpn, sizeof(reg_vfi->wwpn));
+ reg_vfi->local_n_port_id = domain->sport->fc_id;
+
+ return sizeof(sli4_cmd_reg_vfi_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a REG_VPI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param sport Point to SLI Port object.
+ * @param update Boolean indicating whether to update the existing VPI (true)
+ * or create a new VPI (false).
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_reg_vpi(sli4_t *sli4, void *buf, size_t size, ocs_sli_port_t *sport, uint8_t update)
+{
+ sli4_cmd_reg_vpi_t *reg_vpi = buf;
+
+ if (!sli4 || !buf || !sport) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ reg_vpi->hdr.command = SLI4_MBOX_COMMAND_REG_VPI;
+
+ reg_vpi->local_n_port_id = sport->fc_id;
+ reg_vpi->upd = update != 0;
+ ocs_memcpy(reg_vpi->wwpn, &sport->sli_wwpn, sizeof(reg_vpi->wwpn));
+ reg_vpi->vpi = sport->indicator;
+ reg_vpi->vfi = sport->domain->indicator;
+
+ return sizeof(sli4_cmd_reg_vpi_t);
+}
+
+/**
+ * @brief Write a REQUEST_FEATURES command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param mask Features to request.
+ * @param query Use feature query mode (does not change FW).
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_request_features(sli4_t *sli4, void *buf, size_t size, sli4_features_t mask, uint8_t query)
+{
+ sli4_cmd_request_features_t *features = buf;
+
+ ocs_memset(buf, 0, size);
+
+ features->hdr.command = SLI4_MBOX_COMMAND_REQUEST_FEATURES;
+
+ if (query) {
+ features->qry = TRUE;
+ }
+ features->command.dword = mask.dword;
+
+ return sizeof(sli4_cmd_request_features_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a SLI_CONFIG command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param length Length in bytes of attached command.
+ * @param dma DMA buffer for non-embedded commands.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_sli_config(sli4_t *sli4, void *buf, size_t size, uint32_t length, ocs_dma_t *dma)
+{
+ sli4_cmd_sli_config_t *sli_config = NULL;
+
+ if ((length > sizeof(sli_config->payload.embed)) && (dma == NULL)) {
+ ocs_log_test(sli4->os, "length(%d) > payload(%ld)\n",
+ length, sizeof(sli_config->payload.embed));
+ return -1;
+ }
+
+ sli_config = buf;
+
+ ocs_memset(buf, 0, size);
+
+ sli_config->hdr.command = SLI4_MBOX_COMMAND_SLI_CONFIG;
+ if (NULL == dma) {
+ sli_config->emb = TRUE;
+ sli_config->payload_length = length;
+ } else {
+ sli_config->emb = FALSE;
+
+ sli_config->pmd_count = 1;
+
+ sli_config->payload.mem.address_low = ocs_addr32_lo(dma->phys);
+ sli_config->payload.mem.address_high = ocs_addr32_hi(dma->phys);
+ sli_config->payload.mem.length = dma->size;
+ sli_config->payload_length = dma->size;
+#if defined(OCS_INCLUDE_DEBUG)
+ /* save pointer to DMA for BMBX dumping purposes */
+ sli4->bmbx_non_emb_pmd = dma;
+#endif
+
+ }
+
+ return offsetof(sli4_cmd_sli_config_t, payload.embed);
+}
+
+/**
+ * @brief Initialize SLI Port control register.
+ *
+ * @param sli4 SLI context pointer.
+ * @param endian Endian value to write.
+ *
+ * @return Returns 0 on success, or a negative error code value on failure.
+ */
+
+static int32_t
+sli_sliport_control(sli4_t *sli4, uint32_t endian)
+{
+ uint32_t iter;
+ int32_t rc;
+
+ rc = -1;
+
+ /* Initialize port, endian */
+ sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, endian | SLI4_SLIPORT_CONTROL_IP);
+
+ for (iter = 0; iter < 3000; iter ++) {
+ ocs_udelay(SLI4_INIT_PORT_DELAY_US);
+ if (sli_fw_ready(sli4) == 1) {
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ ocs_log_crit(sli4->os, "port failed to become ready after initialization\n");
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a UNREG_FCFI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param indicator Indicator value.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_unreg_fcfi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator)
+{
+ sli4_cmd_unreg_fcfi_t *unreg_fcfi = buf;
+
+ if (!sli4 || !buf) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ unreg_fcfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_FCFI;
+
+ unreg_fcfi->fcfi = indicator;
+
+ return sizeof(sli4_cmd_unreg_fcfi_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an UNREG_RPI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param indicator Indicator value.
+ * @param which Type of unregister, such as node, port, domain, or FCF.
+ * @param fc_id FC address.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_unreg_rpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, sli4_resource_e which,
+ uint32_t fc_id)
+{
+ sli4_cmd_unreg_rpi_t *unreg_rpi = buf;
+ uint8_t index_indicator = 0;
+
+ if (!sli4 || !buf) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ unreg_rpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_RPI;
+
+ switch (which) {
+ case SLI_RSRC_FCOE_RPI:
+ index_indicator = SLI4_UNREG_RPI_II_RPI;
+ if (fc_id != UINT32_MAX) {
+ unreg_rpi->dp = TRUE;
+ unreg_rpi->destination_n_port_id = fc_id & 0x00ffffff;
+ }
+ break;
+ case SLI_RSRC_FCOE_VPI:
+ index_indicator = SLI4_UNREG_RPI_II_VPI;
+ break;
+ case SLI_RSRC_FCOE_VFI:
+ index_indicator = SLI4_UNREG_RPI_II_VFI;
+ break;
+ case SLI_RSRC_FCOE_FCFI:
+ index_indicator = SLI4_UNREG_RPI_II_FCFI;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unknown type %#x\n", which);
+ return 0;
+ }
+
+ unreg_rpi->ii = index_indicator;
+ unreg_rpi->index = indicator;
+
+ return sizeof(sli4_cmd_unreg_rpi_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an UNREG_VFI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param domain Pointer to the domain object
+ * @param which Type of unregister, such as domain, FCFI, or everything.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_unreg_vfi(sli4_t *sli4, void *buf, size_t size, ocs_domain_t *domain, uint32_t which)
+{
+ sli4_cmd_unreg_vfi_t *unreg_vfi = buf;
+
+ if (!sli4 || !buf || !domain) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ unreg_vfi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VFI;
+ switch (which) {
+ case SLI4_UNREG_TYPE_DOMAIN:
+ unreg_vfi->index = domain->indicator;
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ unreg_vfi->index = domain->fcf_indicator;
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ unreg_vfi->index = UINT16_MAX;
+ break;
+ default:
+ return 0;
+ }
+
+ if (SLI4_UNREG_TYPE_DOMAIN != which) {
+ unreg_vfi->ii = SLI4_UNREG_VFI_II_FCFI;
+ }
+
+ return sizeof(sli4_cmd_unreg_vfi_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an UNREG_VPI command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param indicator Indicator value.
+ * @param which Type of unregister: port, domain, FCFI, everything
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_unreg_vpi(sli4_t *sli4, void *buf, size_t size, uint16_t indicator, uint32_t which)
+{
+ sli4_cmd_unreg_vpi_t *unreg_vpi = buf;
+
+ if (!sli4 || !buf) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ unreg_vpi->hdr.command = SLI4_MBOX_COMMAND_UNREG_VPI;
+ unreg_vpi->index = indicator;
+ switch (which) {
+ case SLI4_UNREG_TYPE_PORT:
+ unreg_vpi->ii = SLI4_UNREG_VPI_II_VPI;
+ break;
+ case SLI4_UNREG_TYPE_DOMAIN:
+ unreg_vpi->ii = SLI4_UNREG_VPI_II_VFI;
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI;
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ unreg_vpi->index = UINT16_MAX; /* override indicator */
+ unreg_vpi->ii = SLI4_UNREG_VPI_II_FCFI;
+ break;
+ default:
+ return 0;
+ }
+
+ return sizeof(sli4_cmd_unreg_vpi_t);
+}
+
+
+/**
+ * @ingroup sli
+ * @brief Write an CONFIG_AUTO_XFER_RDY command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param max_burst_len if the write FCP_DL is less than this size,
+ * then the SLI port will generate the auto XFER_RDY.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_config_auto_xfer_rdy(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len)
+{
+ sli4_cmd_config_auto_xfer_rdy_t *req = buf;
+
+ if (!sli4 || !buf) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY;
+ req->max_burst_len = max_burst_len;
+
+ return sizeof(sli4_cmd_config_auto_xfer_rdy_t);
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an CONFIG_AUTO_XFER_RDY_HP command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to the destination buffer.
+ * @param size Buffer size, in bytes.
+ * @param max_burst_len if the write FCP_DL is less than this size,
+ * @param esoc enable start offset computation,
+ * @param block_size block size,
+ * then the SLI port will generate the auto XFER_RDY.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_config_auto_xfer_rdy_hp(sli4_t *sli4, void *buf, size_t size, uint32_t max_burst_len,
+ uint32_t esoc, uint32_t block_size )
+{
+ sli4_cmd_config_auto_xfer_rdy_hp_t *req = buf;
+
+ if (!sli4 || !buf) {
+ return 0;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ req->hdr.command = SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP;
+ req->max_burst_len = max_burst_len;
+ req->esoc = esoc;
+ req->block_size = block_size;
+ return sizeof(sli4_cmd_config_auto_xfer_rdy_hp_t);
+}
+
+/**
+ * @brief Write a COMMON_FUNCTION_RESET command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_function_reset(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_req_common_function_reset_t *reset = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_function_reset_t),
+ sizeof(sli4_res_common_function_reset_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ reset = (sli4_req_common_function_reset_t *)((uint8_t *)buf + sli_config_off);
+
+ reset->hdr.opcode = SLI4_OPC_COMMON_FUNCTION_RESET;
+ reset->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+
+ return(sli_config_off + sizeof(sli4_req_common_function_reset_t));
+}
+
+/**
+ * @brief Write a COMMON_CREATE_CQ command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param qmem DMA memory for the queue.
+ * @param eq_id Associated EQ_ID
+ * @param ignored This parameter carries the ULP which is only used for WQ and RQs
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_create_cq(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *qmem, uint16_t eq_id, uint16_t ignored)
+{
+ sli4_req_common_create_cq_v0_t *cqv0 = NULL;
+ sli4_req_common_create_cq_v2_t *cqv2 = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t p;
+ uintptr_t addr;
+ uint32_t if_type = sli4->if_type;
+ uint32_t page_bytes = 0;
+ uint32_t num_pages = 0;
+ uint32_t cmd_size = 0;
+ uint32_t page_size = 0;
+ uint32_t n_cqe = 0;
+
+ /* First calculate number of pages and the mailbox cmd length */
+ switch (if_type)
+ {
+ case SLI4_IF_TYPE_BE3_SKH_PF:
+ page_bytes = SLI_PAGE_SIZE;
+ num_pages = sli_page_count(qmem->size, page_bytes);
+ cmd_size = sizeof(sli4_req_common_create_cq_v0_t) + (8 * num_pages);
+ break;
+ case SLI4_IF_TYPE_LANCER_FC_ETH:
+ n_cqe = qmem->size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = 1;
+ break;
+ case 4096:
+ page_size = 2;
+ break;
+ default:
+ return 0;
+ }
+ page_bytes = page_size * SLI_PAGE_SIZE;
+ num_pages = sli_page_count(qmem->size, page_bytes);
+ cmd_size = sizeof(sli4_req_common_create_cq_v2_t) + (8 * num_pages);
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type);
+ return -1;
+ }
+
+
+ /* now that we have the mailbox command size, we can set SLI_CONFIG fields */
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+
+ switch (if_type)
+ {
+ case SLI4_IF_TYPE_BE3_SKH_PF:
+ cqv0 = (sli4_req_common_create_cq_v0_t *)((uint8_t *)buf + sli_config_off);
+ cqv0->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ;
+ cqv0->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ cqv0->hdr.version = 0;
+ cqv0->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
+
+ /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
+ cqv0->num_pages = num_pages;
+ switch (cqv0->num_pages) {
+ case 1:
+ cqv0->cqecnt = SLI4_CQ_CNT_256;
+ break;
+ case 2:
+ cqv0->cqecnt = SLI4_CQ_CNT_512;
+ break;
+ case 4:
+ cqv0->cqecnt = SLI4_CQ_CNT_1024;
+ break;
+ default:
+ ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv0->num_pages);
+ return -1;
+ }
+ cqv0->evt = TRUE;
+ cqv0->valid = TRUE;
+ /* TODO cq->nodelay = ???; */
+ /* TODO cq->clswm = ???; */
+ cqv0->arm = FALSE;
+ cqv0->eq_id = eq_id;
+
+ for (p = 0, addr = qmem->phys;
+ p < cqv0->num_pages;
+ p++, addr += page_bytes) {
+ cqv0->page_physical_address[p].low = ocs_addr32_lo(addr);
+ cqv0->page_physical_address[p].high = ocs_addr32_hi(addr);
+ }
+
+ break;
+ case SLI4_IF_TYPE_LANCER_FC_ETH:
+ {
+ cqv2 = (sli4_req_common_create_cq_v2_t *)((uint8_t *)buf + sli_config_off);
+ cqv2->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ;
+ cqv2->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ cqv2->hdr.version = 2;
+ cqv2->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
+
+ cqv2->page_size = page_size;
+
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
+ cqv2->num_pages = num_pages;
+ if (!cqv2->num_pages || (cqv2->num_pages > SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES)) {
+ return 0;
+ }
+
+ switch (cqv2->num_pages) {
+ case 1:
+ cqv2->cqecnt = SLI4_CQ_CNT_256;
+ break;
+ case 2:
+ cqv2->cqecnt = SLI4_CQ_CNT_512;
+ break;
+ case 4:
+ cqv2->cqecnt = SLI4_CQ_CNT_1024;
+ break;
+ case 8:
+ cqv2->cqecnt = SLI4_CQ_CNT_LARGE;
+ cqv2->cqe_count = n_cqe;
+ break;
+ default:
+ ocs_log_test(sli4->os, "num_pages %d not valid\n", cqv2->num_pages);
+ return -1;
+ }
+
+ cqv2->evt = TRUE;
+ cqv2->valid = TRUE;
+ /* TODO cq->nodelay = ???; */
+ /* TODO cq->clswm = ???; */
+ cqv2->arm = FALSE;
+ cqv2->eq_id = eq_id;
+
+ for (p = 0, addr = qmem->phys;
+ p < cqv2->num_pages;
+ p++, addr += page_bytes) {
+ cqv2->page_physical_address[p].low = ocs_addr32_lo(addr);
+ cqv2->page_physical_address[p].high = ocs_addr32_hi(addr);
+ }
+ }
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", if_type);
+ return -1;
+ }
+
+ return (sli_config_off + cmd_size);
+}
+
+/**
+ * @brief Write a COMMON_DESTROY_CQ command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param cq_id CQ ID
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_destroy_cq(sli4_t *sli4, void *buf, size_t size, uint16_t cq_id)
+{
+ sli4_req_common_destroy_cq_t *cq = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ /* Payload length must accommodate both request and response */
+ max(sizeof(sli4_req_common_destroy_cq_t),
+ sizeof(sli4_res_hdr_t)),
+ NULL);
+ }
+ cq = (sli4_req_common_destroy_cq_t *)((uint8_t *)buf + sli_config_off);
+
+ cq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_CQ;
+ cq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ cq->hdr.request_length = sizeof(sli4_req_common_destroy_cq_t) -
+ sizeof(sli4_req_hdr_t);
+ cq->cq_id = cq_id;
+
+ return(sli_config_off + sizeof(sli4_req_common_destroy_cq_t));
+}
+
+/**
+ * @brief Write a COMMON_MODIFY_EQ_DELAY command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param q Queue object array.
+ * @param num_q Queue object array count.
+ * @param shift Phase shift for staggering interrupts.
+ * @param delay_mult Delay multiplier for limiting interrupt frequency.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_modify_eq_delay(sli4_t *sli4, void *buf, size_t size, sli4_queue_t *q, int num_q, uint32_t shift,
+ uint32_t delay_mult)
+{
+ sli4_req_common_modify_eq_delay_t *modify_delay = NULL;
+ uint32_t sli_config_off = 0;
+ int i;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ /* Payload length must accommodate both request and response */
+ max(sizeof(sli4_req_common_modify_eq_delay_t), sizeof(sli4_res_hdr_t)),
+ NULL);
+ }
+
+ modify_delay = (sli4_req_common_modify_eq_delay_t *)((uint8_t *)buf + sli_config_off);
+
+ modify_delay->hdr.opcode = SLI4_OPC_COMMON_MODIFY_EQ_DELAY;
+ modify_delay->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ modify_delay->hdr.request_length = sizeof(sli4_req_common_modify_eq_delay_t) -
+ sizeof(sli4_req_hdr_t);
+
+ modify_delay->num_eq = num_q;
+
+ for (i = 0; i<num_q; i++) {
+ modify_delay->eq_delay_record[i].eq_id = q[i].id;
+ modify_delay->eq_delay_record[i].phase = shift;
+ modify_delay->eq_delay_record[i].delay_multiplier = delay_mult;
+ }
+
+ return(sli_config_off + sizeof(sli4_req_common_modify_eq_delay_t));
+}
+
+/**
+ * @brief Write a COMMON_CREATE_EQ command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param qmem DMA memory for the queue.
+ * @param ignored1 Ignored (used for consistency among queue creation functions).
+ * @param ignored2 Ignored (used for consistency among queue creation functions).
+ *
+ * @note Other queue creation routines use the last parameter to pass in
+ * the associated Q_ID and ULP. EQ doesn't have an associated queue or ULP,
+ * so these parameters are ignored
+ *
+ * @note This creates a Version 0 message
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_create_eq(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem,
+ uint16_t ignored1, uint16_t ignored2)
+{
+ sli4_req_common_create_eq_t *eq = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t p;
+ uintptr_t addr;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_create_eq_t),
+ sizeof(sli4_res_common_create_queue_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ eq = (sli4_req_common_create_eq_t *)((uint8_t *)buf + sli_config_off);
+
+ eq->hdr.opcode = SLI4_OPC_COMMON_CREATE_EQ;
+ eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ eq->hdr.request_length = sizeof(sli4_req_common_create_eq_t) -
+ sizeof(sli4_req_hdr_t);
+ /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
+ eq->num_pages = qmem->size / SLI_PAGE_SIZE;
+ switch (eq->num_pages) {
+ case 1:
+ eq->eqesz = SLI4_EQE_SIZE_4;
+ eq->count = SLI4_EQ_CNT_1024;
+ break;
+ case 2:
+ eq->eqesz = SLI4_EQE_SIZE_4;
+ eq->count = SLI4_EQ_CNT_2048;
+ break;
+ case 4:
+ eq->eqesz = SLI4_EQE_SIZE_4;
+ eq->count = SLI4_EQ_CNT_4096;
+ break;
+ default:
+ ocs_log_test(sli4->os, "num_pages %d not valid\n", eq->num_pages);
+ return -1;
+ }
+ eq->valid = TRUE;
+ eq->arm = FALSE;
+ eq->delay_multiplier = 32;
+
+ for (p = 0, addr = qmem->phys;
+ p < eq->num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ eq->page_address[p].low = ocs_addr32_lo(addr);
+ eq->page_address[p].high = ocs_addr32_hi(addr);
+ }
+
+ return(sli_config_off + sizeof(sli4_req_common_create_eq_t));
+}
+
+
+/**
+ * @brief Write a COMMON_DESTROY_EQ command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param eq_id Queue ID to destroy.
+ *
+ * @note Other queue creation routines use the last parameter to pass in
+ * the associated Q_ID. EQ doesn't have an associated queue so this
+ * parameter is ignored.
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_destroy_eq(sli4_t *sli4, void *buf, size_t size, uint16_t eq_id)
+{
+ sli4_req_common_destroy_eq_t *eq = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ /* Payload length must accommodate both request and response */
+ max(sizeof(sli4_req_common_destroy_eq_t),
+ sizeof(sli4_res_hdr_t)),
+ NULL);
+ }
+ eq = (sli4_req_common_destroy_eq_t *)((uint8_t *)buf + sli_config_off);
+
+ eq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_EQ;
+ eq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ eq->hdr.request_length = sizeof(sli4_req_common_destroy_eq_t) -
+ sizeof(sli4_req_hdr_t);
+
+ eq->eq_id = eq_id;
+
+ return(sli_config_off + sizeof(sli4_req_common_destroy_eq_t));
+}
+
+/**
+ * @brief Write a LOWLEVEL_SET_WATCHDOG command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param timeout watchdog timer timeout in seconds
+ *
+ * @return void
+ */
+void
+sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout)
+{
+
+ sli4_req_lowlevel_set_watchdog_t *req = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ /* Payload length must accommodate both request and response */
+ max(sizeof(sli4_req_lowlevel_set_watchdog_t),
+ sizeof(sli4_res_lowlevel_set_watchdog_t)),
+ NULL);
+ }
+ req = (sli4_req_lowlevel_set_watchdog_t *)((uint8_t *)buf + sli_config_off);
+
+ req->hdr.opcode = SLI4_OPC_LOWLEVEL_SET_WATCHDOG;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_LOWLEVEL;
+ req->hdr.request_length = sizeof(sli4_req_lowlevel_set_watchdog_t) - sizeof(sli4_req_hdr_t);
+ req->watchdog_timeout = timeout;
+
+ return;
+}
+
+static int32_t
+sli_cmd_common_get_cntl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
+{
+ sli4_req_hdr_t *hdr = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof(sli4_req_hdr_t),
+ dma);
+ }
+
+ if (dma == NULL) {
+ return 0;
+ }
+
+ ocs_memset(dma->virt, 0, dma->size);
+
+ hdr = dma->virt;
+
+ hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = dma->size;
+
+ return(sli_config_off + sizeof(sli4_req_hdr_t));
+}
+
+/**
+ * @brief Write a COMMON_GET_CNTL_ADDL_ATTRIBUTES command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param dma DMA structure from which the data will be copied.
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_get_cntl_addl_attributes(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
+{
+ sli4_req_hdr_t *hdr = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, sizeof(sli4_req_hdr_t), dma);
+ }
+
+ if (dma == NULL) {
+ return 0;
+ }
+
+ ocs_memset(dma->virt, 0, dma->size);
+
+ hdr = dma->virt;
+
+ hdr->opcode = SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = dma->size;
+
+ return(sli_config_off + sizeof(sli4_req_hdr_t));
+}
+
+/**
+ * @brief Write a COMMON_CREATE_MQ_EXT command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param qmem DMA memory for the queue.
+ * @param cq_id Associated CQ_ID.
+ * @param ignored This parameter carries the ULP which is only used for WQ and RQs
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_create_mq_ext(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *qmem, uint16_t cq_id, uint16_t ignored)
+{
+ sli4_req_common_create_mq_ext_t *mq = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t p;
+ uintptr_t addr;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_create_mq_ext_t),
+ sizeof(sli4_res_common_create_queue_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ mq = (sli4_req_common_create_mq_ext_t *)((uint8_t *)buf + sli_config_off);
+
+ mq->hdr.opcode = SLI4_OPC_COMMON_CREATE_MQ_EXT;
+ mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ mq->hdr.request_length = sizeof(sli4_req_common_create_mq_ext_t) -
+ sizeof(sli4_req_hdr_t);
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
+ mq->num_pages = qmem->size / SLI_PAGE_SIZE;
+ switch (mq->num_pages) {
+ case 1:
+ mq->ring_size = SLI4_MQE_SIZE_16;
+ break;
+ case 2:
+ mq->ring_size = SLI4_MQE_SIZE_32;
+ break;
+ case 4:
+ mq->ring_size = SLI4_MQE_SIZE_64;
+ break;
+ case 8:
+ mq->ring_size = SLI4_MQE_SIZE_128;
+ break;
+ default:
+ ocs_log_test(sli4->os, "num_pages %d not valid\n", mq->num_pages);
+ return -1;
+ }
+
+ /* TODO break this down by sli4->config.topology */
+ mq->async_event_bitmap = SLI4_ASYNC_EVT_FC_FCOE;
+
+ if (sli4->config.mq_create_version) {
+ mq->cq_id_v1 = cq_id;
+ mq->hdr.version = 1;
+ }
+ else {
+ mq->cq_id_v0 = cq_id;
+ }
+ mq->val = TRUE;
+
+ for (p = 0, addr = qmem->phys;
+ p < mq->num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ mq->page_physical_address[p].low = ocs_addr32_lo(addr);
+ mq->page_physical_address[p].high = ocs_addr32_hi(addr);
+ }
+
+ return(sli_config_off + sizeof(sli4_req_common_create_mq_ext_t));
+}
+
+/**
+ * @brief Write a COMMON_DESTROY_MQ command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param mq_id MQ ID
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_destroy_mq(sli4_t *sli4, void *buf, size_t size, uint16_t mq_id)
+{
+ sli4_req_common_destroy_mq_t *mq = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ /* Payload length must accommodate both request and response */
+ max(sizeof(sli4_req_common_destroy_mq_t),
+ sizeof(sli4_res_hdr_t)),
+ NULL);
+ }
+ mq = (sli4_req_common_destroy_mq_t *)((uint8_t *)buf + sli_config_off);
+
+ mq->hdr.opcode = SLI4_OPC_COMMON_DESTROY_MQ;
+ mq->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ mq->hdr.request_length = sizeof(sli4_req_common_destroy_mq_t) -
+ sizeof(sli4_req_hdr_t);
+
+ mq->mq_id = mq_id;
+
+ return(sli_config_off + sizeof(sli4_req_common_destroy_mq_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_NOP command
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param context NOP context value (passed to response, except on FC/FCoE).
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_nop(sli4_t *sli4, void *buf, size_t size, uint64_t context)
+{
+ sli4_req_common_nop_t *nop = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ /* Payload length must accommodate both request and response */
+ max(sizeof(sli4_req_common_nop_t), sizeof(sli4_res_common_nop_t)),
+ NULL);
+ }
+
+ nop = (sli4_req_common_nop_t *)((uint8_t *)buf + sli_config_off);
+
+ nop->hdr.opcode = SLI4_OPC_COMMON_NOP;
+ nop->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ nop->hdr.request_length = 8;
+
+ ocs_memcpy(&nop->context, &context, sizeof(context));
+
+ return(sli_config_off + sizeof(sli4_req_common_nop_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_GET_RESOURCE_EXTENT_INFO command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param rtype Resource type (for example, XRI, VFI, VPI, and RPI).
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_get_resource_extent_info(sli4_t *sli4, void *buf, size_t size, uint16_t rtype)
+{
+ sli4_req_common_get_resource_extent_info_t *extent = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof(sli4_req_common_get_resource_extent_info_t),
+ NULL);
+ }
+
+ extent = (sli4_req_common_get_resource_extent_info_t *)((uint8_t *)buf + sli_config_off);
+
+ extent->hdr.opcode = SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO;
+ extent->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ extent->hdr.request_length = 4;
+
+ extent->resource_type = rtype;
+
+ return(sli_config_off + sizeof(sli4_req_common_get_resource_extent_info_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_GET_SLI4_PARAMETERS command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_get_sli4_parameters(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_req_hdr_t *hdr = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof(sli4_res_common_get_sli4_parameters_t),
+ NULL);
+ }
+
+ hdr = (sli4_req_hdr_t *)((uint8_t *)buf + sli_config_off);
+
+ hdr->opcode = SLI4_OPC_COMMON_GET_SLI4_PARAMETERS;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = 0x50;
+
+ return(sli_config_off + sizeof(sli4_req_hdr_t));
+}
+
+/**
+ * @brief Write a COMMON_QUERY_FW_CONFIG command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to destination buffer.
+ * @param size Buffer size in bytes.
+ *
+ * @return Returns the number of bytes written
+ */
+static int32_t
+sli_cmd_common_query_fw_config(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_req_common_query_fw_config_t *fw_config;
+ uint32_t sli_config_off = 0;
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_query_fw_config_t),
+ sizeof(sli4_res_common_query_fw_config_t));
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ payload_size,
+ NULL);
+ }
+
+ fw_config = (sli4_req_common_query_fw_config_t*)((uint8_t*)buf + sli_config_off);
+ fw_config->hdr.opcode = SLI4_OPC_COMMON_QUERY_FW_CONFIG;
+ fw_config->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ fw_config->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ return sli_config_off + sizeof(sli4_req_common_query_fw_config_t);
+}
+
+/**
+ * @brief Write a COMMON_GET_PORT_NAME command to the provided buffer.
+ *
+ * @param sli4 SLI context pointer.
+ * @param buf Virtual pointer to destination buffer.
+ * @param size Buffer size in bytes.
+ *
+ * @note Function supports both version 0 and 1 forms of this command via
+ * the IF_TYPE.
+ *
+ * @return Returns the number of bytes written.
+ */
+static int32_t
+sli_cmd_common_get_port_name(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_req_common_get_port_name_t *port_name;
+ uint32_t sli_config_off = 0;
+ uint32_t payload_size;
+ uint8_t version = 0;
+ uint8_t pt = 0;
+
+ /* Select command version according to IF_TYPE */
+ switch (sli4->if_type) {
+ case SLI4_IF_TYPE_BE3_SKH_PF:
+ case SLI4_IF_TYPE_BE3_SKH_VF:
+ version = 0;
+ break;
+ case SLI4_IF_TYPE_LANCER_FC_ETH:
+ case SLI4_IF_TYPE_LANCER_RDMA:
+ version = 1;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported IF_TYPE %d\n", sli4->if_type);
+ return 0;
+ }
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_get_port_name_t),
+ sizeof(sli4_res_common_get_port_name_t));
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ payload_size,
+ NULL);
+
+ pt = 1;
+ }
+
+ port_name = (sli4_req_common_get_port_name_t *)((uint8_t *)buf + sli_config_off);
+
+ port_name->hdr.opcode = SLI4_OPC_COMMON_GET_PORT_NAME;
+ port_name->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ port_name->hdr.request_length = sizeof(sli4_req_hdr_t) + (version * sizeof(uint32_t));
+ port_name->hdr.version = version;
+
+ /* Set the port type value (ethernet=0, FC=1) for V1 commands */
+ if (version == 1) {
+ port_name->pt = pt;
+ }
+
+ return sli_config_off + port_name->hdr.request_length;
+}
+
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_WRITE_OBJECT command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param noc True if the object should be written but not committed to flash.
+ * @param eof True if this is the last write for this object.
+ * @param desired_write_length Number of bytes of data to write to the object.
+ * @param offset Offset, in bytes, from the start of the object.
+ * @param object_name Name of the object to write.
+ * @param dma DMA structure from which the data will be copied.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_write_object(sli4_t *sli4, void *buf, size_t size,
+ uint16_t noc, uint16_t eof, uint32_t desired_write_length,
+ uint32_t offset,
+ char *object_name,
+ ocs_dma_t *dma)
+{
+ sli4_req_common_write_object_t *wr_obj = NULL;
+ uint32_t sli_config_off = 0;
+ sli4_bde_t *host_buffer;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_write_object_t) + sizeof (sli4_bde_t),
+ NULL);
+ }
+
+ wr_obj = (sli4_req_common_write_object_t *)((uint8_t *)buf + sli_config_off);
+
+ wr_obj->hdr.opcode = SLI4_OPC_COMMON_WRITE_OBJECT;
+ wr_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ wr_obj->hdr.request_length = sizeof(*wr_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t);
+ wr_obj->hdr.timeout = 0;
+ wr_obj->hdr.version = 0;
+
+ wr_obj->noc = noc;
+ wr_obj->eof = eof;
+ wr_obj->desired_write_length = desired_write_length;
+ wr_obj->write_offset = offset;
+ ocs_strncpy(wr_obj->object_name, object_name, sizeof(wr_obj->object_name));
+ wr_obj->host_buffer_descriptor_count = 1;
+
+ host_buffer = (sli4_bde_t *)wr_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64;
+ host_buffer->buffer_length = desired_write_length;
+ host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+
+
+ return(sli_config_off + sizeof(sli4_req_common_write_object_t) + sizeof (sli4_bde_t));
+}
+
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_DELETE_OBJECT command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param object_name Name of the object to write.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_delete_object(sli4_t *sli4, void *buf, size_t size,
+ char *object_name)
+{
+ sli4_req_common_delete_object_t *del_obj = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_delete_object_t),
+ NULL);
+ }
+
+ del_obj = (sli4_req_common_delete_object_t *)((uint8_t *)buf + sli_config_off);
+
+ del_obj->hdr.opcode = SLI4_OPC_COMMON_DELETE_OBJECT;
+ del_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ del_obj->hdr.request_length = sizeof(*del_obj);
+ del_obj->hdr.timeout = 0;
+ del_obj->hdr.version = 0;
+
+ ocs_strncpy(del_obj->object_name, object_name, sizeof(del_obj->object_name));
+ return(sli_config_off + sizeof(sli4_req_common_delete_object_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_READ_OBJECT command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param desired_read_length Number of bytes of data to read from the object.
+ * @param offset Offset, in bytes, from the start of the object.
+ * @param object_name Name of the object to read.
+ * @param dma DMA structure from which the data will be copied.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_read_object(sli4_t *sli4, void *buf, size_t size,
+ uint32_t desired_read_length,
+ uint32_t offset,
+ char *object_name,
+ ocs_dma_t *dma)
+{
+ sli4_req_common_read_object_t *rd_obj = NULL;
+ uint32_t sli_config_off = 0;
+ sli4_bde_t *host_buffer;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_read_object_t) + sizeof (sli4_bde_t),
+ NULL);
+ }
+
+ rd_obj = (sli4_req_common_read_object_t *)((uint8_t *)buf + sli_config_off);
+
+ rd_obj->hdr.opcode = SLI4_OPC_COMMON_READ_OBJECT;
+ rd_obj->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ rd_obj->hdr.request_length = sizeof(*rd_obj) - 4*sizeof(uint32_t) + sizeof(sli4_bde_t);
+ rd_obj->hdr.timeout = 0;
+ rd_obj->hdr.version = 0;
+
+ rd_obj->desired_read_length = desired_read_length;
+ rd_obj->read_offset = offset;
+ ocs_strncpy(rd_obj->object_name, object_name, sizeof(rd_obj->object_name));
+ rd_obj->host_buffer_descriptor_count = 1;
+
+ host_buffer = (sli4_bde_t *)rd_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ host_buffer->bde_type = SLI4_BDE_TYPE_BDE_64;
+ host_buffer->buffer_length = desired_read_length;
+ if (dma != NULL) {
+ host_buffer->u.data.buffer_address_low = ocs_addr32_lo(dma->phys);
+ host_buffer->u.data.buffer_address_high = ocs_addr32_hi(dma->phys);
+ } else {
+ host_buffer->u.data.buffer_address_low = 0;
+ host_buffer->u.data.buffer_address_high = 0;
+ }
+
+
+ return(sli_config_off + sizeof(sli4_req_common_read_object_t) + sizeof (sli4_bde_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a DMTF_EXEC_CLP_CMD command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param cmd DMA structure that describes the buffer for the command.
+ * @param resp DMA structure that describes the buffer for the response.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *cmd,
+ ocs_dma_t *resp)
+{
+ sli4_req_dmtf_exec_clp_cmd_t *clp_cmd = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_dmtf_exec_clp_cmd_t),
+ NULL);
+ }
+
+ clp_cmd = (sli4_req_dmtf_exec_clp_cmd_t*)((uint8_t *)buf + sli_config_off);
+
+ clp_cmd->hdr.opcode = SLI4_OPC_DMTF_EXEC_CLP_CMD;
+ clp_cmd->hdr.subsystem = SLI4_SUBSYSTEM_DMTF;
+ clp_cmd->hdr.request_length = sizeof(sli4_req_dmtf_exec_clp_cmd_t) -
+ sizeof(sli4_req_hdr_t);
+ clp_cmd->hdr.timeout = 0;
+ clp_cmd->hdr.version = 0;
+ clp_cmd->cmd_buf_length = cmd->size;
+ clp_cmd->cmd_buf_addr_low = ocs_addr32_lo(cmd->phys);
+ clp_cmd->cmd_buf_addr_high = ocs_addr32_hi(cmd->phys);
+ clp_cmd->resp_buf_length = resp->size;
+ clp_cmd->resp_buf_addr_low = ocs_addr32_lo(resp->phys);
+ clp_cmd->resp_buf_addr_high = ocs_addr32_hi(resp->phys);
+
+ return(sli_config_off + sizeof(sli4_req_dmtf_exec_clp_cmd_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_SET_DUMP_LOCATION command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param query Zero to set dump location, non-zero to query dump size
+ * @param is_buffer_list Set to one if the buffer is a set of buffer descriptors or
+ * set to 0 if the buffer is a contiguous dump area.
+ * @param buffer DMA structure to which the dump will be copied.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size,
+ uint8_t query, uint8_t is_buffer_list,
+ ocs_dma_t *buffer, uint8_t fdb)
+{
+ sli4_req_common_set_dump_location_t *set_dump_loc = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_set_dump_location_t),
+ NULL);
+ }
+
+ set_dump_loc = (sli4_req_common_set_dump_location_t *)((uint8_t *)buf + sli_config_off);
+
+ set_dump_loc->hdr.opcode = SLI4_OPC_COMMON_SET_DUMP_LOCATION;
+ set_dump_loc->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ set_dump_loc->hdr.request_length = sizeof(sli4_req_common_set_dump_location_t) - sizeof(sli4_req_hdr_t);
+ set_dump_loc->hdr.timeout = 0;
+ set_dump_loc->hdr.version = 0;
+
+ set_dump_loc->blp = is_buffer_list;
+ set_dump_loc->qry = query;
+ set_dump_loc->fdb = fdb;
+
+ if (buffer) {
+ set_dump_loc->buf_addr_low = ocs_addr32_lo(buffer->phys);
+ set_dump_loc->buf_addr_high = ocs_addr32_hi(buffer->phys);
+ set_dump_loc->buffer_length = buffer->len;
+ } else {
+ set_dump_loc->buf_addr_low = 0;
+ set_dump_loc->buf_addr_high = 0;
+ set_dump_loc->buffer_length = 0;
+ }
+
+ return(sli_config_off + sizeof(sli4_req_common_set_dump_location_t));
+}
+
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_SET_FEATURES command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param feature Feature to set.
+ * @param param_len Length of the parameter (must be a multiple of 4 bytes).
+ * @param parameter Pointer to the parameter value.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_set_features(sli4_t *sli4, void *buf, size_t size,
+ uint32_t feature,
+ uint32_t param_len,
+ void* parameter)
+{
+ sli4_req_common_set_features_t *cmd = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_set_features_t),
+ NULL);
+ }
+
+ cmd = (sli4_req_common_set_features_t *)((uint8_t *)buf + sli_config_off);
+
+ cmd->hdr.opcode = SLI4_OPC_COMMON_SET_FEATURES;
+ cmd->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ cmd->hdr.request_length = sizeof(sli4_req_common_set_features_t) - sizeof(sli4_req_hdr_t);
+ cmd->hdr.timeout = 0;
+ cmd->hdr.version = 0;
+
+ cmd->feature = feature;
+ cmd->param_len = param_len;
+ ocs_memcpy(cmd->params, parameter, param_len);
+
+ return(sli_config_off + sizeof(sli4_req_common_set_features_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_COMMON_GET_PROFILE_CONFIG command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size in bytes.
+ * @param dma DMA capable memory used to retrieve profile.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
+{
+ sli4_req_common_get_profile_config_t *req = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t payload_size;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_get_profile_config_t),
+ dma);
+ }
+
+ if (dma != NULL) {
+ req = dma->virt;
+ ocs_memset(req, 0, dma->size);
+ payload_size = dma->size;
+ } else {
+ req = (sli4_req_common_get_profile_config_t *)((uint8_t *)buf + sli_config_off);
+ payload_size = sizeof(sli4_req_common_get_profile_config_t);
+ }
+
+ req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_CONFIG;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ req->hdr.version = 1;
+
+ return(sli_config_off + sizeof(sli4_req_common_get_profile_config_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_COMMON_SET_PROFILE_CONFIG command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param dma DMA capable memory containing profile.
+ * @param profile_id Profile ID to configure.
+ * @param descriptor_count Number of descriptors in DMA buffer.
+ * @param isap Implicit Set Active Profile value to use.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
+ uint8_t profile_id, uint32_t descriptor_count, uint8_t isap)
+{
+ sli4_req_common_set_profile_config_t *req = NULL;
+ uint32_t cmd_off = 0;
+ uint32_t payload_size;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ cmd_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_set_profile_config_t),
+ dma);
+ }
+
+ if (dma != NULL) {
+ req = dma->virt;
+ ocs_memset(req, 0, dma->size);
+ payload_size = dma->size;
+ } else {
+ req = (sli4_req_common_set_profile_config_t *)((uint8_t *)buf + cmd_off);
+ payload_size = sizeof(sli4_req_common_set_profile_config_t);
+ }
+
+ req->hdr.opcode = SLI4_OPC_COMMON_SET_PROFILE_CONFIG;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ req->hdr.version = 1;
+ req->profile_id = profile_id;
+ req->desc_count = descriptor_count;
+ req->isap = isap;
+
+ return(cmd_off + sizeof(sli4_req_common_set_profile_config_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_COMMON_GET_PROFILE_LIST command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size in bytes.
+ * @param start_profile_index First profile index to return.
+ * @param dma Buffer into which the list will be written.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf, size_t size,
+ uint32_t start_profile_index, ocs_dma_t *dma)
+{
+ sli4_req_common_get_profile_list_t *req = NULL;
+ uint32_t cmd_off = 0;
+ uint32_t payload_size;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ cmd_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof (sli4_req_common_get_profile_list_t),
+ dma);
+ }
+
+ if (dma != NULL) {
+ req = dma->virt;
+ ocs_memset(req, 0, dma->size);
+ payload_size = dma->size;
+ } else {
+ req = (sli4_req_common_get_profile_list_t *)((uint8_t *)buf + cmd_off);
+ payload_size = sizeof(sli4_req_common_get_profile_list_t);
+ }
+
+ req->hdr.opcode = SLI4_OPC_COMMON_GET_PROFILE_LIST;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ req->hdr.version = 0;
+
+ req->start_profile_index = start_profile_index;
+
+ return(cmd_off + sizeof(sli4_req_common_get_profile_list_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_COMMON_GET_ACTIVE_PROFILE command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size in bytes.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf, size_t size)
+{
+ sli4_req_common_get_active_profile_t *req = NULL;
+ uint32_t cmd_off = 0;
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_get_active_profile_t),
+ sizeof(sli4_res_common_get_active_profile_t));
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ cmd_off = sli_cmd_sli_config(sli4, buf, size,
+ payload_size,
+ NULL);
+ }
+
+ req = (sli4_req_common_get_active_profile_t *)
+ ((uint8_t*)buf + cmd_off);
+
+ req->hdr.opcode = SLI4_OPC_COMMON_GET_ACTIVE_PROFILE;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ req->hdr.version = 0;
+
+ return(cmd_off + sizeof(sli4_req_common_get_active_profile_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_COMMON_SET_ACTIVE_PROFILE command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size in bytes.
+ * @param fd If non-zero, set profile to factory default.
+ * @param active_profile_id ID of new active profile.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf, size_t size,
+ uint32_t fd, uint32_t active_profile_id)
+{
+ sli4_req_common_set_active_profile_t *req = NULL;
+ uint32_t cmd_off = 0;
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_set_active_profile_t),
+ sizeof(sli4_res_common_set_active_profile_t));
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ cmd_off = sli_cmd_sli_config(sli4, buf, size,
+ payload_size,
+ NULL);
+ }
+
+ req = (sli4_req_common_set_active_profile_t *)
+ ((uint8_t*)buf + cmd_off);
+
+ req->hdr.opcode = SLI4_OPC_COMMON_SET_ACTIVE_PROFILE;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ req->hdr.version = 0;
+ req->fd = fd;
+ req->active_profile_id = active_profile_id;
+
+ return(cmd_off + sizeof(sli4_req_common_set_active_profile_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_GET_RECONFIG_LINK_INFO command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size in bytes.
+ * @param dma Buffer to store the supported link configuration modes from the physical device.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma)
+{
+ sli4_req_common_get_reconfig_link_info_t *req = NULL;
+ uint32_t cmd_off = 0;
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_get_reconfig_link_info_t),
+ sizeof(sli4_res_common_get_reconfig_link_info_t));
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ cmd_off = sli_cmd_sli_config(sli4, buf, size,
+ payload_size,
+ dma);
+ }
+
+ if (dma != NULL) {
+ req = dma->virt;
+ ocs_memset(req, 0, dma->size);
+ payload_size = dma->size;
+ } else {
+ req = (sli4_req_common_get_reconfig_link_info_t *)((uint8_t *)buf + cmd_off);
+ payload_size = sizeof(sli4_req_common_get_reconfig_link_info_t);
+ }
+
+ req->hdr.opcode = SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ req->hdr.version = 0;
+
+ return(cmd_off + sizeof(sli4_req_common_get_reconfig_link_info_t));
+}
+
+/**
+ * @ingroup sli
+ * @brief Write a COMMON_SET_RECONFIG_LINK_ID command.
+ *
+ * @param sli4 SLI context.
+ * @param buf destination buffer for the command.
+ * @param size buffer size in bytes.
+ * @param fd If non-zero, set link config to factory default.
+ * @param active_link_config_id ID of new active profile.
+ * @param dma Buffer to assign the link configuration mode that is to become active from the physical device.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma,
+ uint32_t fd, uint32_t active_link_config_id)
+{
+ sli4_req_common_set_reconfig_link_id_t *req = NULL;
+ uint32_t cmd_off = 0;
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_common_set_reconfig_link_id_t),
+ sizeof(sli4_res_common_set_reconfig_link_id_t));
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ cmd_off = sli_cmd_sli_config(sli4, buf, size,
+ payload_size,
+ NULL);
+ }
+
+ if (dma != NULL) {
+ req = dma->virt;
+ ocs_memset(req, 0, dma->size);
+ payload_size = dma->size;
+ } else {
+ req = (sli4_req_common_set_reconfig_link_id_t *)((uint8_t *)buf + cmd_off);
+ payload_size = sizeof(sli4_req_common_set_reconfig_link_id_t);
+ }
+
+ req->hdr.opcode = SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_COMMON;
+ req->hdr.request_length = payload_size - sizeof(sli4_req_hdr_t);
+ req->hdr.version = 0;
+ req->fd = fd;
+ req->next_link_config_id = active_link_config_id;
+
+ return(cmd_off + sizeof(sli4_req_common_set_reconfig_link_id_t));
+}
+
+
+/**
+ * @ingroup sli
+ * @brief Check the mailbox/queue completion entry.
+ *
+ * @param buf Pointer to the MCQE.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_cqe_mq(void *buf)
+{
+ sli4_mcqe_t *mcqe = buf;
+
+ /*
+ * Firmware can split mbx completions into two MCQEs: first with only
+ * the "consumed" bit set and a second with the "complete" bit set.
+ * Thus, ignore MCQE unless "complete" is set.
+ */
+ if (!mcqe->cmp) {
+ return -2;
+ }
+
+ if (mcqe->completion_status) {
+ ocs_log_debug(NULL, "bad status (cmpl=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
+ mcqe->completion_status,
+ mcqe->extended_status,
+ mcqe->con,
+ mcqe->cmp,
+ mcqe->ae,
+ mcqe->val);
+ }
+
+ return mcqe->completion_status;
+}
+
+/**
+ * @ingroup sli
+ * @brief Check the asynchronous event completion entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Pointer to the ACQE.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_cqe_async(sli4_t *sli4, void *buf)
+{
+ sli4_acqe_t *acqe = buf;
+ int32_t rc = -1;
+
+ if (!sli4 || !buf) {
+ ocs_log_err(NULL, "bad parameter sli4=%p buf=%p\n", sli4, buf);
+ return -1;
+ }
+
+ switch (acqe->event_code) {
+ case SLI4_ACQE_EVENT_CODE_LINK_STATE:
+ rc = sli_fc_process_link_state(sli4, buf);
+ break;
+ case SLI4_ACQE_EVENT_CODE_FCOE_FIP:
+ rc = sli_fc_process_fcoe(sli4, buf);
+ break;
+ case SLI4_ACQE_EVENT_CODE_GRP_5:
+ /*TODO*/ocs_log_debug(sli4->os, "ACQE GRP5\n");
+ break;
+ case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
+ ocs_log_debug(sli4->os,"ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
+ acqe->event_type, acqe->event_data[0], acqe->event_data[1]);
+#if defined(OCS_INCLUDE_DEBUG)
+ ocs_dump32(OCS_DEBUG_ALWAYS, sli4->os, "acq", acqe, sizeof(*acqe));
+#endif
+ break;
+ case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
+ rc = sli_fc_process_link_attention(sli4, buf);
+ break;
+ default:
+ /*TODO*/ocs_log_test(sli4->os, "ACQE unknown=%#x\n", acqe->event_code);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Check the SLI_CONFIG response.
+ *
+ * @par Description
+ * Function checks the SLI_CONFIG response and the payload status.
+ *
+ * @param buf Pointer to SLI_CONFIG response.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+sli_res_sli_config(void *buf)
+{
+ sli4_cmd_sli_config_t *sli_config = buf;
+
+ if (!buf || (SLI4_MBOX_COMMAND_SLI_CONFIG != sli_config->hdr.command)) {
+ ocs_log_err(NULL, "bad parameter buf=%p cmd=%#x\n", buf,
+ buf ? sli_config->hdr.command : -1);
+ return -1;
+ }
+
+ if (sli_config->hdr.status) {
+ return sli_config->hdr.status;
+ }
+
+ if (sli_config->emb) {
+ return sli_config->payload.embed[4];
+ } else {
+ ocs_log_test(NULL, "external buffers not supported\n");
+ return -1;
+ }
+}
+
+/**
+ * @brief Issue a COMMON_FUNCTION_RESET command.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+sli_common_function_reset(sli4_t *sli4)
+{
+
+ if (sli_cmd_common_function_reset(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COM_FUNC_RESET)\n");
+ return -1;
+ }
+ if (sli_res_sli_config(sli4->bmbx.virt)) {
+ ocs_log_err(sli4->os, "bad status COM_FUNC_RESET\n");
+ return -1;
+ }
+ } else {
+ ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief check to see if the FW is ready.
+ *
+ * @par Description
+ * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>.
+ *
+ * @param sli4 SLI context.
+ * @param timeout_ms Time, in milliseconds, to wait for the port to be ready
+ * before failing.
+ *
+ * @return Returns TRUE for ready, or FALSE otherwise.
+ */
+static int32_t
+sli_wait_for_fw_ready(sli4_t *sli4, uint32_t timeout_ms)
+{
+ uint32_t iter = timeout_ms / (SLI4_INIT_PORT_DELAY_US / 1000);
+ uint32_t ready = FALSE;
+
+ do {
+ iter--;
+ ocs_udelay(SLI4_INIT_PORT_DELAY_US);
+ if (sli_fw_ready(sli4) == 1) {
+ ready = TRUE;
+ }
+ } while (!ready && (iter > 0));
+
+ return ready;
+}
+
+/**
+ * @brief Initialize the firmware.
+ *
+ * @par Description
+ * Based on <i>SLI-4 Architecture Specification, Revision 4.x0-13 (2012).</i>.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+sli_fw_init(sli4_t *sli4)
+{
+ uint32_t ready;
+ uint32_t endian;
+
+ /*
+ * Is firmware ready for operation?
+ */
+ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
+ if (!ready) {
+ ocs_log_crit(sli4->os, "FW status is NOT ready\n");
+ return -1;
+ }
+
+ /*
+ * Reset port to a known state
+ */
+ switch (sli4->if_type) {
+ case SLI4_IF_TYPE_BE3_SKH_PF:
+ case SLI4_IF_TYPE_BE3_SKH_VF:
+ /* No SLIPORT_CONTROL register so use command sequence instead */
+ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n");
+ return -1;
+ }
+
+ if (sli_cmd_fw_initialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_INIT)\n");
+ return -1;
+ }
+ } else {
+ ocs_log_crit(sli4->os, "bad FW_INIT write\n");
+ return -1;
+ }
+
+ if (sli_common_function_reset(sli4)) {
+ ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
+ return -1;
+ }
+ break;
+ case SLI4_IF_TYPE_LANCER_FC_ETH:
+#if BYTE_ORDER == LITTLE_ENDIAN
+ endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN;
+#else
+ endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN;
+#endif
+
+ if (sli_sliport_control(sli4, endian))
+ return -1;
+ break;
+ default:
+ ocs_log_test(sli4->os, "if_type %d not supported\n", sli4->if_type);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Terminate the firmware.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+sli_fw_term(sli4_t *sli4)
+{
+ uint32_t endian;
+
+ if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF ||
+ sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF) {
+ /* No SLIPORT_CONTROL register so use command sequence instead */
+ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox not ready\n");
+ return -1;
+ }
+
+ if (sli_common_function_reset(sli4)) {
+ ocs_log_err(sli4->os, "bad COM_FUNC_RESET write\n");
+ return -1;
+ }
+
+ if (sli_cmd_fw_deinitialize(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (FW_DEINIT)\n");
+ return -1;
+ }
+ } else {
+ ocs_log_test(sli4->os, "bad FW_DEINIT write\n");
+ return -1;
+ }
+ } else {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ endian = SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN;
+#else
+ endian = SLI4_SLIPORT_CONTROL_BIG_ENDIAN;
+#endif
+ /* type 2 etc. use SLIPORT_CONTROL to initialize port */
+ sli_sliport_control(sli4, endian);
+ }
+ return 0;
+}
+
+/**
+ * @brief Write the doorbell register associated with the queue object.
+ *
+ * @param sli4 SLI context.
+ * @param q Queue object.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+sli_queue_doorbell(sli4_t *sli4, sli4_queue_t *q)
+{
+ uint32_t val = 0;
+
+ switch (q->type) {
+ case SLI_QTYPE_EQ:
+ val = sli_eq_doorbell(q->n_posted, q->id, FALSE);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ break;
+ case SLI_QTYPE_CQ:
+ val = sli_cq_doorbell(q->n_posted, q->id, FALSE);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ break;
+ case SLI_QTYPE_MQ:
+ val = SLI4_MQ_DOORBELL(q->n_posted, q->id);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ break;
+ case SLI_QTYPE_RQ:
+ {
+ uint32_t n_posted = q->n_posted;
+ /*
+ * FC/FCoE has different rules for Receive Queues. The host
+ * should only update the doorbell of the RQ-pair containing
+ * the headers since the header / payload RQs are treated
+ * as a matched unit.
+ */
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ /*
+ * In RQ-pair, an RQ either contains the FC header
+ * (i.e. is_hdr == TRUE) or the payload.
+ *
+ * Don't ring doorbell for payload RQ
+ */
+ if (!q->u.flag.is_hdr) {
+ break;
+ }
+ /*
+ * Some RQ cannot be incremented one entry at a time. Instead,
+ * the driver collects a number of entries and updates the
+ * RQ in batches.
+ */
+ if (q->u.flag.rq_batch) {
+ if (((q->index + q->n_posted) % SLI4_QUEUE_RQ_BATCH)) {
+ break;
+ }
+ n_posted = SLI4_QUEUE_RQ_BATCH;
+ }
+ }
+
+ val = SLI4_RQ_DOORBELL(n_posted, q->id);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ break;
+ }
+ case SLI_QTYPE_WQ:
+ val = SLI4_WQ_DOORBELL(q->n_posted, q->index, q->id);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ break;
+ default:
+ ocs_log_test(sli4->os, "bad queue type %d\n", q->type);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int32_t
+sli_request_features(sli4_t *sli4, sli4_features_t *features, uint8_t query)
+{
+
+ if (sli_cmd_request_features(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
+ *features, query)) {
+ sli4_cmd_request_features_t *req_features = sli4->bmbx.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (REQUEST_FEATURES)\n");
+ return -1;
+ }
+ if (req_features->hdr.status) {
+ ocs_log_err(sli4->os, "REQUEST_FEATURES bad status %#x\n",
+ req_features->hdr.status);
+ return -1;
+ }
+ features->dword = req_features->response.dword;
+ } else {
+ ocs_log_err(sli4->os, "bad REQUEST_FEATURES write\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Calculate max queue entries.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+void
+sli_calc_max_qentries(sli4_t *sli4)
+{
+ sli4_qtype_e q;
+ uint32_t alloc_size, qentries, qentry_size;
+
+ for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
+ sli4->config.max_qentries[q] = sli_convert_mask_to_count(sli4->config.count_method[q],
+ sli4->config.count_mask[q]);
+ }
+
+ /* single, continguous DMA allocations will be called for each queue
+ * of size (max_qentries * queue entry size); since these can be large,
+ * check against the OS max DMA allocation size
+ */
+ for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
+ qentries = sli4->config.max_qentries[q];
+ qentry_size = sli_get_queue_entry_size(sli4, q);
+ alloc_size = qentries * qentry_size;
+ if (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) {
+ while (alloc_size > ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE)) {
+ /* cut the qentries in hwf until alloc_size <= max DMA alloc size */
+ qentries >>= 1;
+ alloc_size = qentries * qentry_size;
+ }
+ ocs_log_debug(sli4->os, "[%s]: max_qentries from %d to %d (max dma %d)\n",
+ SLI_QNAME[q], sli4->config.max_qentries[q],
+ qentries, ocs_max_dma_alloc(sli4->os, SLI_PAGE_SIZE));
+ sli4->config.max_qentries[q] = qentries;
+ }
+ }
+}
+
+/**
+ * @brief Issue a FW_CONFIG mailbox command and store the results.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static int32_t
+sli_query_fw_config(sli4_t *sli4)
+{
+ /*
+ * Read the device configuration
+ *
+ * Note: Only ulp0 fields contain values
+ */
+ if (sli_cmd_common_query_fw_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ sli4_res_common_query_fw_config_t *fw_config =
+ (sli4_res_common_query_fw_config_t *)
+ (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (QUERY_FW_CONFIG)\n");
+ return -1;
+ }
+ if (fw_config->hdr.status) {
+ ocs_log_err(sli4->os, "COMMON_QUERY_FW_CONFIG bad status %#x\n",
+ fw_config->hdr.status);
+ return -1;
+ }
+
+ sli4->physical_port = fw_config->physical_port;
+ sli4->config.dual_ulp_capable = ((fw_config->function_mode & SLI4_FUNCTION_MODE_DUA_MODE) == 0 ? 0 : 1);
+ sli4->config.is_ulp_fc[0] = ((fw_config->ulp0_mode &
+ (SLI4_ULP_MODE_FCOE_INI |
+ SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1);
+ sli4->config.is_ulp_fc[1] = ((fw_config->ulp1_mode &
+ (SLI4_ULP_MODE_FCOE_INI |
+ SLI4_ULP_MODE_FCOE_TGT)) == 0 ? 0 : 1);
+
+ if (sli4->config.dual_ulp_capable) {
+ /*
+ * Lancer will not support this, so we use the values
+ * from the READ_CONFIG.
+ */
+ if (sli4->config.is_ulp_fc[0] &&
+ sli4->config.is_ulp_fc[1]) {
+ sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total + fw_config->ulp1_toe_wq_total;
+ sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total + fw_config->ulp1_toe_defrq_total;
+ } else if (sli4->config.is_ulp_fc[0]) {
+ sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp0_toe_wq_total;
+ sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp0_toe_defrq_total;
+ } else {
+ sli4->config.max_qcount[SLI_QTYPE_WQ] = fw_config->ulp1_toe_wq_total;
+ sli4->config.max_qcount[SLI_QTYPE_RQ] = fw_config->ulp1_toe_defrq_total;
+ }
+ }
+ } else {
+ ocs_log_err(sli4->os, "bad QUERY_FW_CONFIG write\n");
+ return -1;
+ }
+ return 0;
+}
+
+
+static int32_t
+sli_get_config(sli4_t *sli4)
+{
+ ocs_dma_t get_cntl_addl_data;
+
+ /*
+ * Read the device configuration
+ */
+ if (sli_cmd_read_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ sli4_res_read_config_t *read_config = sli4->bmbx.virt;
+ uint32_t i;
+ uint32_t total;
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_CONFIG)\n");
+ return -1;
+ }
+ if (read_config->hdr.status) {
+ ocs_log_err(sli4->os, "READ_CONFIG bad status %#x\n",
+ read_config->hdr.status);
+ return -1;
+ }
+
+ sli4->config.has_extents = read_config->ext;
+ if (FALSE == sli4->config.has_extents) {
+ uint32_t i = 0;
+ uint32_t *base = sli4->config.extent[0].base;
+
+ if (!base) {
+ if (NULL == (base = ocs_malloc(sli4->os, SLI_RSRC_MAX * sizeof(uint32_t),
+ OCS_M_ZERO | OCS_M_NOWAIT))) {
+ ocs_log_err(sli4->os, "memory allocation failed for sli4_resource_t\n");
+ return -1;
+ }
+ }
+
+ for (i = 0; i < SLI_RSRC_MAX; i++) {
+ sli4->config.extent[i].number = 1;
+ sli4->config.extent[i].n_alloc = 0;
+ sli4->config.extent[i].base = &base[i];
+ }
+
+ sli4->config.extent[SLI_RSRC_FCOE_VFI].base[0] = read_config->vfi_base;
+ sli4->config.extent[SLI_RSRC_FCOE_VFI].size = read_config->vfi_count;
+
+ sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] = read_config->vpi_base;
+ sli4->config.extent[SLI_RSRC_FCOE_VPI].size = read_config->vpi_count;
+
+ sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0] = read_config->rpi_base;
+ sli4->config.extent[SLI_RSRC_FCOE_RPI].size = read_config->rpi_count;
+
+ sli4->config.extent[SLI_RSRC_FCOE_XRI].base[0] = read_config->xri_base;
+ sli4->config.extent[SLI_RSRC_FCOE_XRI].size = read_config->xri_count;
+
+ sli4->config.extent[SLI_RSRC_FCOE_FCFI].base[0] = 0;
+ sli4->config.extent[SLI_RSRC_FCOE_FCFI].size = read_config->fcfi_count;
+ } else {
+ /* TODO extents*/
+ ;
+ }
+
+ for (i = 0; i < SLI_RSRC_MAX; i++) {
+ total = sli4->config.extent[i].number * sli4->config.extent[i].size;
+ sli4->config.extent[i].use_map = ocs_bitmap_alloc(total);
+ if (NULL == sli4->config.extent[i].use_map) {
+ ocs_log_err(sli4->os, "bitmap memory allocation failed "
+ "resource %d\n", i);
+ return -1;
+ }
+ sli4->config.extent[i].map_size = total;
+ }
+
+ sli4->config.topology = read_config->topology;
+ switch (sli4->config.topology) {
+ case SLI4_READ_CFG_TOPO_FCOE:
+ ocs_log_debug(sli4->os, "FCoE\n");
+ break;
+ case SLI4_READ_CFG_TOPO_FC:
+ ocs_log_debug(sli4->os, "FC (unknown)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_FC_DA:
+ ocs_log_debug(sli4->os, "FC (direct attach)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ ocs_log_debug(sli4->os, "FC (arbitrated loop)\n");
+ break;
+ default:
+ ocs_log_test(sli4->os, "bad topology %#x\n", sli4->config.topology);
+ }
+
+ sli4->config.e_d_tov = read_config->e_d_tov;
+ sli4->config.r_a_tov = read_config->r_a_tov;
+
+ sli4->config.link_module_type = read_config->lmt;
+
+ sli4->config.max_qcount[SLI_QTYPE_EQ] = read_config->eq_count;
+ sli4->config.max_qcount[SLI_QTYPE_CQ] = read_config->cq_count;
+ sli4->config.max_qcount[SLI_QTYPE_WQ] = read_config->wq_count;
+ sli4->config.max_qcount[SLI_QTYPE_RQ] = read_config->rq_count;
+
+ /*
+ * READ_CONFIG doesn't give the max number of MQ. Applications
+ * will typically want 1, but we may need another at some future
+ * date. Dummy up a "max" MQ count here.
+ */
+ sli4->config.max_qcount[SLI_QTYPE_MQ] = SLI_USER_MQ_COUNT;
+ } else {
+ ocs_log_err(sli4->os, "bad READ_CONFIG write\n");
+ return -1;
+ }
+
+ if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ sli4_res_common_get_sli4_parameters_t *parms = (sli4_res_common_get_sli4_parameters_t *)
+ (((uint8_t *)sli4->bmbx.virt) + offsetof(sli4_cmd_sli_config_t, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_SLI4_PARAMETERS)\n");
+ return -1;
+ } else if (parms->hdr.status) {
+ ocs_log_err(sli4->os, "COMMON_GET_SLI4_PARAMETERS bad status %#x att'l %#x\n",
+ parms->hdr.status, parms->hdr.additional_status);
+ return -1;
+ }
+
+ sli4->config.auto_reg = parms->areg;
+ sli4->config.auto_xfer_rdy = parms->agxf;
+ sli4->config.hdr_template_req = parms->hdrr;
+ sli4->config.t10_dif_inline_capable = parms->timm;
+ sli4->config.t10_dif_separate_capable = parms->tsmm;
+
+ sli4->config.mq_create_version = parms->mqv;
+ sli4->config.cq_create_version = parms->cqv;
+ sli4->config.rq_min_buf_size = parms->min_rq_buffer_size;
+ sli4->config.rq_max_buf_size = parms->max_rq_buffer_size;
+
+ sli4->config.qpage_count[SLI_QTYPE_EQ] = parms->eq_page_cnt;
+ sli4->config.qpage_count[SLI_QTYPE_CQ] = parms->cq_page_cnt;
+ sli4->config.qpage_count[SLI_QTYPE_MQ] = parms->mq_page_cnt;
+ sli4->config.qpage_count[SLI_QTYPE_WQ] = parms->wq_page_cnt;
+ sli4->config.qpage_count[SLI_QTYPE_RQ] = parms->rq_page_cnt;
+
+ /* save count methods and masks for each queue type */
+ sli4->config.count_mask[SLI_QTYPE_EQ] = parms->eqe_count_mask;
+ sli4->config.count_method[SLI_QTYPE_EQ] = parms->eqe_count_method;
+ sli4->config.count_mask[SLI_QTYPE_CQ] = parms->cqe_count_mask;
+ sli4->config.count_method[SLI_QTYPE_CQ] = parms->cqe_count_method;
+ sli4->config.count_mask[SLI_QTYPE_MQ] = parms->mqe_count_mask;
+ sli4->config.count_method[SLI_QTYPE_MQ] = parms->mqe_count_method;
+ sli4->config.count_mask[SLI_QTYPE_WQ] = parms->wqe_count_mask;
+ sli4->config.count_method[SLI_QTYPE_WQ] = parms->wqe_count_method;
+ sli4->config.count_mask[SLI_QTYPE_RQ] = parms->rqe_count_mask;
+ sli4->config.count_method[SLI_QTYPE_RQ] = parms->rqe_count_method;
+
+ /* now calculate max queue entries */
+ sli_calc_max_qentries(sli4);
+
+ sli4->config.max_sgl_pages = parms->sgl_page_cnt; /* max # of pages */
+ sli4->config.sgl_page_sizes = parms->sgl_page_sizes; /* bit map of available sizes */
+ /* ignore HLM here. Use value from REQUEST_FEATURES */
+
+ sli4->config.sge_supported_length = parms->sge_supported_length;
+ if (sli4->config.sge_supported_length > OCS_MAX_SGE_SIZE)
+ sli4->config.sge_supported_length = OCS_MAX_SGE_SIZE;
+
+ sli4->config.sgl_pre_registration_required = parms->sglr;
+ /* default to using pre-registered SGL's */
+ sli4->config.sgl_pre_registered = TRUE;
+
+ sli4->config.perf_hint = parms->phon;
+ sli4->config.perf_wq_id_association = parms->phwq;
+
+ sli4->config.rq_batch = parms->rq_db_window;
+
+ /* save the fields for skyhawk SGL chaining */
+ sli4->config.sgl_chaining_params.chaining_capable =
+ (parms->sglc == 1);
+ sli4->config.sgl_chaining_params.frag_num_field_offset =
+ parms->frag_num_field_offset;
+ sli4->config.sgl_chaining_params.frag_num_field_mask =
+ (1ull << parms->frag_num_field_size) - 1;
+ sli4->config.sgl_chaining_params.sgl_index_field_offset =
+ parms->sgl_index_field_offset;
+ sli4->config.sgl_chaining_params.sgl_index_field_mask =
+ (1ull << parms->sgl_index_field_size) - 1;
+ sli4->config.sgl_chaining_params.chain_sge_initial_value_lo =
+ parms->chain_sge_initial_value_lo;
+ sli4->config.sgl_chaining_params.chain_sge_initial_value_hi =
+ parms->chain_sge_initial_value_hi;
+
+ /* Use the highest available WQE size. */
+ if (parms->wqe_sizes & SLI4_128BYTE_WQE_SUPPORT) {
+ sli4->config.wqe_size = SLI4_WQE_EXT_BYTES;
+ } else {
+ sli4->config.wqe_size = SLI4_WQE_BYTES;
+ }
+ }
+
+ if (sli_query_fw_config(sli4)) {
+ ocs_log_err(sli4->os, "Error sending QUERY_FW_CONFIG\n");
+ return -1;
+ }
+
+ sli4->config.port_number = 0;
+
+ /*
+ * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
+ * uses VPD DMA buffer as the response won't fit in the embedded
+ * buffer.
+ */
+ if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) {
+ sli4_res_common_get_cntl_attributes_t *attr = sli4->vpd.data.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_CNTL_ATTRIBUTES)\n");
+ return -1;
+ } else if (attr->hdr.status) {
+ ocs_log_err(sli4->os, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x att'l %#x\n",
+ attr->hdr.status, attr->hdr.additional_status);
+ return -1;
+ }
+
+ sli4->config.port_number = attr->port_number;
+
+ ocs_memcpy(sli4->config.bios_version_string, attr->bios_version_string,
+ sizeof(sli4->config.bios_version_string));
+ } else {
+ ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
+ return -1;
+ }
+
+ if (ocs_dma_alloc(sli4->os, &get_cntl_addl_data, sizeof(sli4_res_common_get_cntl_addl_attributes_t),
+ OCS_MIN_DMA_ALIGNMENT)) {
+ ocs_log_err(sli4->os, "Failed to allocate memory for GET_CNTL_ADDL_ATTR data\n");
+ } else {
+ if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
+ &get_cntl_addl_data)) {
+ sli4_res_common_get_cntl_addl_attributes_t *attr = get_cntl_addl_data.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os,
+ "bootstrap mailbox write fail (COMMON_GET_CNTL_ADDL_ATTRIBUTES)\n");
+ ocs_dma_free(sli4->os, &get_cntl_addl_data);
+ return -1;
+ }
+ if (attr->hdr.status) {
+ ocs_log_err(sli4->os, "COMMON_GET_CNTL_ADDL_ATTRIBUTES bad status %#x\n",
+ attr->hdr.status);
+ ocs_dma_free(sli4->os, &get_cntl_addl_data);
+ return -1;
+ }
+
+ ocs_memcpy(sli4->config.ipl_name, attr->ipl_file_name, sizeof(sli4->config.ipl_name));
+
+ ocs_log_debug(sli4->os, "IPL:%s \n", (char*)sli4->config.ipl_name);
+ } else {
+ ocs_log_err(sli4->os, "bad COMMON_GET_CNTL_ADDL_ATTRIBUTES write\n");
+ ocs_dma_free(sli4->os, &get_cntl_addl_data);
+ return -1;
+ }
+
+ ocs_dma_free(sli4->os, &get_cntl_addl_data);
+ }
+
+ if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ sli4_res_common_get_port_name_t *port_name = (sli4_res_common_get_port_name_t *)(((uint8_t *)sli4->bmbx.virt) +
+ offsetof(sli4_cmd_sli_config_t, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (COMMON_GET_PORT_NAME)\n");
+ return -1;
+ }
+
+ sli4->config.port_name[0] = port_name->port_name[sli4->config.port_number];
+ }
+ sli4->config.port_name[1] = '\0';
+
+ if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &sli4->vpd.data)) {
+ sli4_cmd_read_rev_t *read_rev = sli4->bmbx.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_REV)\n");
+ return -1;
+ }
+ if (read_rev->hdr.status) {
+ ocs_log_err(sli4->os, "READ_REV bad status %#x\n",
+ read_rev->hdr.status);
+ return -1;
+ }
+
+ sli4->config.fw_rev[0] = read_rev->first_fw_id;
+ ocs_memcpy(sli4->config.fw_name[0],read_rev->first_fw_name, sizeof(sli4->config.fw_name[0]));
+
+ sli4->config.fw_rev[1] = read_rev->second_fw_id;
+ ocs_memcpy(sli4->config.fw_name[1],read_rev->second_fw_name, sizeof(sli4->config.fw_name[1]));
+
+ sli4->config.hw_rev[0] = read_rev->first_hw_revision;
+ sli4->config.hw_rev[1] = read_rev->second_hw_revision;
+ sli4->config.hw_rev[2] = read_rev->third_hw_revision;
+
+ ocs_log_debug(sli4->os, "FW1:%s (%08x) / FW2:%s (%08x)\n",
+ read_rev->first_fw_name, read_rev->first_fw_id,
+ read_rev->second_fw_name, read_rev->second_fw_id);
+
+ ocs_log_debug(sli4->os, "HW1: %08x / HW2: %08x\n", read_rev->first_hw_revision,
+ read_rev->second_hw_revision);
+
+ /* Check that all VPD data was returned */
+ if (read_rev->returned_vpd_length != read_rev->actual_vpd_length) {
+ ocs_log_test(sli4->os, "VPD length: available=%d returned=%d actual=%d\n",
+ read_rev->available_length,
+ read_rev->returned_vpd_length,
+ read_rev->actual_vpd_length);
+ }
+ sli4->vpd.length = read_rev->returned_vpd_length;
+ } else {
+ ocs_log_err(sli4->os, "bad READ_REV write\n");
+ return -1;
+ }
+
+ if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE)) {
+ sli4_cmd_read_nvparms_t *read_nvparms = sli4->bmbx.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (READ_NVPARMS)\n");
+ return -1;
+ }
+ if (read_nvparms->hdr.status) {
+ ocs_log_err(sli4->os, "READ_NVPARMS bad status %#x\n",
+ read_nvparms->hdr.status);
+ return -1;
+ }
+
+ ocs_memcpy(sli4->config.wwpn, read_nvparms->wwpn, sizeof(sli4->config.wwpn));
+ ocs_memcpy(sli4->config.wwnn, read_nvparms->wwnn, sizeof(sli4->config.wwnn));
+
+ ocs_log_debug(sli4->os, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->config.wwpn[0],
+ sli4->config.wwpn[1],
+ sli4->config.wwpn[2],
+ sli4->config.wwpn[3],
+ sli4->config.wwpn[4],
+ sli4->config.wwpn[5],
+ sli4->config.wwpn[6],
+ sli4->config.wwpn[7]);
+ ocs_log_debug(sli4->os, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->config.wwnn[0],
+ sli4->config.wwnn[1],
+ sli4->config.wwnn[2],
+ sli4->config.wwnn[3],
+ sli4->config.wwnn[4],
+ sli4->config.wwnn[5],
+ sli4->config.wwnn[6],
+ sli4->config.wwnn[7]);
+ } else {
+ ocs_log_err(sli4->os, "bad READ_NVPARMS write\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ * Public functions
+ */
+
+/**
+ * @ingroup sli
+ * @brief Set up the SLI context.
+ *
+ * @param sli4 SLI context.
+ * @param os Device abstraction.
+ * @param port_type Protocol type of port (for example, FC and NIC).
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_setup(sli4_t *sli4, ocs_os_handle_t os, sli4_port_type_e port_type)
+{
+ uint32_t sli_intf = UINT32_MAX;
+ uint32_t pci_class_rev = 0;
+ uint32_t rev_id = 0;
+ uint32_t family = 0;
+ uint32_t i;
+ sli4_asic_entry_t *asic;
+
+ ocs_memset(sli4, 0, sizeof(sli4_t));
+
+ sli4->os = os;
+ sli4->port_type = port_type;
+
+ /*
+ * Read the SLI_INTF register to discover the register layout
+ * and other capability information
+ */
+ sli_intf = ocs_config_read32(os, SLI4_INTF_REG);
+
+ if (sli_intf_valid_check(sli_intf)) {
+ ocs_log_err(os, "SLI_INTF is not valid\n");
+ return -1;
+ }
+
+ /* driver only support SLI-4 */
+ sli4->sli_rev = sli_intf_sli_revision(sli_intf);
+ if (4 != sli4->sli_rev) {
+ ocs_log_err(os, "Unsupported SLI revision (intf=%#x)\n",
+ sli_intf);
+ return -1;
+ }
+
+ sli4->sli_family = sli_intf_sli_family(sli_intf);
+
+ sli4->if_type = sli_intf_if_type(sli_intf);
+
+ if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) {
+ ocs_log_debug(os, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS),
+ sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR1),
+ sli_reg_read(sli4, SLI4_REG_SLIPORT_ERROR2));
+ }
+
+ /*
+ * set the ASIC type and revision
+ */
+ pci_class_rev = ocs_config_read32(os, SLI4_PCI_CLASS_REVISION);
+ rev_id = sli_pci_rev_id(pci_class_rev);
+ family = sli4->sli_family;
+ if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
+ uint32_t asic_id = ocs_config_read32(os, SLI4_ASIC_ID_REG);
+ family = sli_asic_gen(asic_id);
+ }
+
+ for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); i++, asic++) {
+ if ((rev_id == asic->rev_id) && (family == asic->family)) {
+ sli4->asic_type = asic->type;
+ sli4->asic_rev = asic->rev;
+ break;
+ }
+ }
+ /* Fail if no matching asic type/rev was found */
+ if( (sli4->asic_type == 0) || (sli4->asic_rev == 0)) {
+ ocs_log_err(os, "no matching asic family/rev found: %02x/%02x\n", family, rev_id);
+ return -1;
+ }
+
+ /*
+ * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
+ * entry, a CQ with a single 16 byte entry, and no event queue.
+ * Alignment must be 16 bytes as the low order address bits in the
+ * address register are also control / status.
+ */
+ if (ocs_dma_alloc(sli4->os, &sli4->bmbx, SLI4_BMBX_SIZE +
+ sizeof(sli4_mcqe_t), 16)) {
+ ocs_log_err(os, "bootstrap mailbox allocation failed\n");
+ return -1;
+ }
+
+ if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
+ ocs_log_err(os, "bad alignment for bootstrap mailbox\n");
+ return -1;
+ }
+
+ ocs_log_debug(os, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
+ ocs_addr32_hi(sli4->bmbx.phys),
+ ocs_addr32_lo(sli4->bmbx.phys),
+ sli4->bmbx.size);
+
+ /* TODO 4096 is arbitrary. What should this value actually be? */
+ if (ocs_dma_alloc(sli4->os, &sli4->vpd.data, 4096/*TODO*/, 4096)) {
+ /* Note that failure isn't fatal in this specific case */
+ sli4->vpd.data.size = 0;
+ ocs_log_test(os, "VPD buffer allocation failed\n");
+ }
+
+ if (sli_fw_init(sli4)) {
+ ocs_log_err(sli4->os, "FW initialization failed\n");
+ return -1;
+ }
+
+ /*
+ * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
+ * in addition to any other desired features
+ */
+ sli4->config.features.flag.iaab = TRUE;
+ sli4->config.features.flag.npiv = TRUE;
+ sli4->config.features.flag.dif = TRUE;
+ sli4->config.features.flag.vf = TRUE;
+ sli4->config.features.flag.fcpc = TRUE;
+ sli4->config.features.flag.iaar = TRUE;
+ sli4->config.features.flag.hlm = TRUE;
+ sli4->config.features.flag.perfh = TRUE;
+ sli4->config.features.flag.rxseq = TRUE;
+ sli4->config.features.flag.rxri = TRUE;
+ sli4->config.features.flag.mrqp = TRUE;
+
+ /* use performance hints if available */
+ if (sli4->config.perf_hint) {
+ sli4->config.features.flag.perfh = TRUE;
+ }
+
+ if (sli_request_features(sli4, &sli4->config.features, TRUE)) {
+ return -1;
+ }
+
+ if (sli_get_config(sli4)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t
+sli_init(sli4_t *sli4)
+{
+
+ if (sli4->config.has_extents) {
+ /* TODO COMMON_ALLOC_RESOURCE_EXTENTS */;
+ ocs_log_test(sli4->os, "XXX need to implement extent allocation\n");
+ return -1;
+ }
+
+ sli4->config.features.flag.hlm = sli4->config.high_login_mode;
+ sli4->config.features.flag.rxseq = FALSE;
+ sli4->config.features.flag.rxri = FALSE;
+
+ if (sli_request_features(sli4, &sli4->config.features, FALSE)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t
+sli_reset(sli4_t *sli4)
+{
+ uint32_t i;
+
+ if (sli_fw_init(sli4)) {
+ ocs_log_crit(sli4->os, "FW initialization failed\n");
+ return -1;
+ }
+
+ if (sli4->config.extent[0].base) {
+ ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t));
+ sli4->config.extent[0].base = NULL;
+ }
+
+ for (i = 0; i < SLI_RSRC_MAX; i++) {
+ if (sli4->config.extent[i].use_map) {
+ ocs_bitmap_free(sli4->config.extent[i].use_map);
+ sli4->config.extent[i].use_map = NULL;
+ }
+ sli4->config.extent[i].base = NULL;
+ }
+
+ if (sli_get_config(sli4)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Issue a Firmware Reset.
+ *
+ * @par Description
+ * Issues a Firmware Reset to the chip. This reset affects the entire chip,
+ * so all PCI function on the same PCI bus and device are affected.
+ * @n @n This type of reset can be used to activate newly downloaded firmware.
+ * @n @n The driver should be considered to be in an unknown state after this
+ * reset and should be reloaded.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 0 on success, or -1 otherwise.
+ */
+
+int32_t
+sli_fw_reset(sli4_t *sli4)
+{
+ uint32_t val;
+ uint32_t ready;
+
+ /*
+ * Firmware must be ready before issuing the reset.
+ */
+ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
+ if (!ready) {
+ ocs_log_crit(sli4->os, "FW status is NOT ready\n");
+ return -1;
+ }
+ switch(sli4->if_type) {
+ case SLI4_IF_TYPE_BE3_SKH_PF:
+ /* BE3 / Skyhawk use PCICFG_SOFT_RESET_CSR */
+ val = ocs_config_read32(sli4->os, SLI4_PCI_SOFT_RESET_CSR);
+ val |= SLI4_PCI_SOFT_RESET_MASK;
+ ocs_config_write32(sli4->os, SLI4_PCI_SOFT_RESET_CSR, val);
+ break;
+ case SLI4_IF_TYPE_LANCER_FC_ETH:
+ /* Lancer uses PHYDEV_CONTROL */
+
+ val = SLI4_PHYDEV_CONTROL_FRST;
+ sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, val);
+ break;
+ default:
+ ocs_log_test(sli4->os, "Unexpected iftype %d\n", sli4->if_type);
+ return -1;
+ break;
+ }
+
+ /* wait for the FW to become ready after the reset */
+ ready = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
+ if (!ready) {
+ ocs_log_crit(sli4->os, "Failed to become ready after firmware reset\n");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Tear down a SLI context.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 0 on success, or non-zero otherwise.
+ */
+int32_t
+sli_teardown(sli4_t *sli4)
+{
+ uint32_t i;
+
+ if (sli4->config.extent[0].base) {
+ ocs_free(sli4->os, sli4->config.extent[0].base, SLI_RSRC_MAX * sizeof(uint32_t));
+ sli4->config.extent[0].base = NULL;
+ }
+
+ for (i = 0; i < SLI_RSRC_MAX; i++) {
+ if (sli4->config.has_extents) {
+ /* TODO COMMON_DEALLOC_RESOURCE_EXTENTS */;
+ }
+
+ sli4->config.extent[i].base = NULL;
+
+ ocs_bitmap_free(sli4->config.extent[i].use_map);
+ sli4->config.extent[i].use_map = NULL;
+ }
+
+ if (sli_fw_term(sli4)) {
+ ocs_log_err(sli4->os, "FW deinitialization failed\n");
+ }
+
+ ocs_dma_free(sli4->os, &sli4->vpd.data);
+ ocs_dma_free(sli4->os, &sli4->bmbx);
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Register a callback for the given event.
+ *
+ * @param sli4 SLI context.
+ * @param which Event of interest.
+ * @param func Function to call when the event occurs.
+ * @param arg Argument passed to the callback function.
+ *
+ * @return Returns 0 on success, or non-zero otherwise.
+ */
+int32_t
+sli_callback(sli4_t *sli4, sli4_callback_e which, void *func, void *arg)
+{
+
+ if (!sli4 || !func || (which >= SLI4_CB_MAX)) {
+ ocs_log_err(NULL, "bad parameter sli4=%p which=%#x func=%p\n",
+ sli4, which, func);
+ return -1;
+ }
+
+ switch (which) {
+ case SLI4_CB_LINK:
+ sli4->link = func;
+ sli4->link_arg = arg;
+ break;
+ case SLI4_CB_FIP:
+ sli4->fip = func;
+ sli4->fip_arg = arg;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unknown callback %#x\n", which);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Initialize a queue object.
+ *
+ * @par Description
+ * This initializes the sli4_queue_t object members, including the underlying
+ * DMA memory.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to queue object.
+ * @param qtype Type of queue to create.
+ * @param size Size of each entry.
+ * @param n_entries Number of entries to allocate.
+ * @param align Starting memory address alignment.
+ *
+ * @note Checks if using the existing DMA memory (if any) is possible. If not,
+ * it frees the existing memory and re-allocates.
+ *
+ * @return Returns 0 on success, or non-zero otherwise.
+ */
+int32_t
+__sli_queue_init(sli4_t *sli4, sli4_queue_t *q, uint32_t qtype,
+ size_t size, uint32_t n_entries, uint32_t align)
+{
+
+ if ((q->dma.virt == NULL) || (size != q->size) || (n_entries != q->length)) {
+ if (q->dma.size) {
+ ocs_dma_free(sli4->os, &q->dma);
+ }
+
+ ocs_memset(q, 0, sizeof(sli4_queue_t));
+
+ if (ocs_dma_alloc(sli4->os, &q->dma, size * n_entries, align)) {
+ ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]);
+ return -1;
+ }
+
+ ocs_memset(q->dma.virt, 0, size * n_entries);
+
+ ocs_lock_init(sli4->os, &q->lock, "%s lock[%d:%p]",
+ SLI_QNAME[qtype], ocs_instance(sli4->os), &q->lock);
+
+ q->type = qtype;
+ q->size = size;
+ q->length = n_entries;
+
+ /* Limit to hwf the queue size per interrupt */
+ q->proc_limit = n_entries / 2;
+
+ switch(q->type) {
+ case SLI_QTYPE_EQ:
+ q->posted_limit = q->length / 2;
+ break;
+ default:
+ if ((sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) ||
+ (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_VF)) {
+ /* For Skyhawk, ring the doorbell more often */
+ q->posted_limit = 8;
+ } else {
+ q->posted_limit = 64;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Issue the command to create a queue.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to queue object.
+ *
+ * @return Returns 0 on success, or non-zero otherwise.
+ */
+int32_t
+__sli_create_queue(sli4_t *sli4, sli4_queue_t *q)
+{
+ sli4_res_common_create_queue_t *res_q = NULL;
+
+ if (sli_bmbx_command(sli4)){
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail %s\n",
+ SLI_QNAME[q->type]);
+ ocs_dma_free(sli4->os, &q->dma);
+ return -1;
+ }
+ if (sli_res_sli_config(sli4->bmbx.virt)) {
+ ocs_log_err(sli4->os, "bad status create %s\n", SLI_QNAME[q->type]);
+ ocs_dma_free(sli4->os, &q->dma);
+ return -1;
+ }
+ res_q = (void *)((uint8_t *)sli4->bmbx.virt +
+ offsetof(sli4_cmd_sli_config_t, payload));
+
+ if (res_q->hdr.status) {
+ ocs_log_err(sli4->os, "bad create %s status=%#x addl=%#x\n",
+ SLI_QNAME[q->type],
+ res_q->hdr.status, res_q->hdr.additional_status);
+ ocs_dma_free(sli4->os, &q->dma);
+ return -1;
+ } else {
+ q->id = res_q->q_id;
+ q->doorbell_offset = res_q->db_offset;
+ q->doorbell_rset = res_q->db_rs;
+
+ switch (q->type) {
+ case SLI_QTYPE_EQ:
+ /* No doorbell information in response for EQs */
+ q->doorbell_offset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].off;
+ q->doorbell_rset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].rset;
+ break;
+ case SLI_QTYPE_CQ:
+ /* No doorbell information in response for CQs */
+ q->doorbell_offset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].off;
+ q->doorbell_rset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].rset;
+ break;
+ case SLI_QTYPE_MQ:
+ /* No doorbell information in response for MQs */
+ q->doorbell_offset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].off;
+ q->doorbell_rset = regmap[SLI4_REG_MQ_DOORBELL][sli4->if_type].rset;
+ break;
+ case SLI_QTYPE_RQ:
+ /* set the doorbell for non-skyhawks */
+ if (!sli4->config.dual_ulp_capable) {
+ q->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off;
+ q->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset;
+ }
+ break;
+ case SLI_QTYPE_WQ:
+ /* set the doorbell for non-skyhawks */
+ if (!sli4->config.dual_ulp_capable) {
+ q->doorbell_offset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].off;
+ q->doorbell_rset = regmap[SLI4_REG_IO_WQ_DOORBELL][sli4->if_type].rset;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Get queue entry size.
+ *
+ * Get queue entry size given queue type.
+ *
+ * @param sli4 SLI context
+ * @param qtype Type for which the entry size is returned.
+ *
+ * @return Returns > 0 on success (queue entry size), or a negative value on failure.
+ */
+int32_t
+sli_get_queue_entry_size(sli4_t *sli4, uint32_t qtype)
+{
+ uint32_t size = 0;
+
+ if (!sli4) {
+ ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4);
+ return -1;
+ }
+
+ switch (qtype) {
+ case SLI_QTYPE_EQ:
+ size = sizeof(uint32_t);
+ break;
+ case SLI_QTYPE_CQ:
+ size = 16;
+ break;
+ case SLI_QTYPE_MQ:
+ size = 256;
+ break;
+ case SLI_QTYPE_WQ:
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ size = sli4->config.wqe_size;
+ } else {
+ /* TODO */
+ ocs_log_test(sli4->os, "unsupported queue entry size\n");
+ return -1;
+ }
+ break;
+ case SLI_QTYPE_RQ:
+ size = SLI4_FCOE_RQE_SIZE;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unknown queue type %d\n", qtype);
+ return -1;
+ }
+ return size;
+}
+
+/**
+ * @ingroup sli
+ * @brief Modify the delay timer for all the EQs
+ *
+ * @param sli4 SLI context.
+ * @param eq Array of EQs.
+ * @param num_eq Count of EQs.
+ * @param shift Phase shift for staggering interrupts.
+ * @param delay_mult Delay multiplier for limiting interrupt frequency.
+ *
+ * @return Returns 0 on success, or -1 otherwise.
+ */
+int32_t
+sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult)
+{
+
+ sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, eq, num_eq, shift, delay_mult);
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
+ return -1;
+ }
+ if (sli_res_sli_config(sli4->bmbx.virt)) {
+ ocs_log_err(sli4->os, "bad status MODIFY EQ DELAY\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Allocate a queue.
+ *
+ * @par Description
+ * Allocates DMA memory and configures the requested queue type.
+ *
+ * @param sli4 SLI context.
+ * @param qtype Type of queue to create.
+ * @param q Pointer to the queue object.
+ * @param n_entries Number of entries to allocate.
+ * @param assoc Associated queue (that is, the EQ for a CQ, the CQ for a MQ, and so on).
+ * @param ulp The ULP to bind, which is only used for WQ and RQs
+ *
+ * @return Returns 0 on success, or -1 otherwise.
+ */
+int32_t
+sli_queue_alloc(sli4_t *sli4, uint32_t qtype, sli4_queue_t *q, uint32_t n_entries,
+ sli4_queue_t *assoc, uint16_t ulp)
+{
+ int32_t size;
+ uint32_t align = 0;
+ sli4_create_q_fn_t create = NULL;
+
+ if (!sli4 || !q) {
+ ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q);
+ return -1;
+ }
+
+ /* get queue size */
+ size = sli_get_queue_entry_size(sli4, qtype);
+ if (size < 0)
+ return -1;
+ align = SLI_PAGE_SIZE;
+
+ switch (qtype) {
+ case SLI_QTYPE_EQ:
+ create = sli_cmd_common_create_eq;
+ break;
+ case SLI_QTYPE_CQ:
+ create = sli_cmd_common_create_cq;
+ break;
+ case SLI_QTYPE_MQ:
+ /* Validate the number of entries */
+ switch (n_entries) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ ocs_log_test(sli4->os, "illegal n_entries value %d for MQ\n", n_entries);
+ return -1;
+ }
+ assoc->u.flag.is_mq = TRUE;
+ create = sli_cmd_common_create_mq_ext;
+ break;
+ case SLI_QTYPE_WQ:
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) {
+ create = sli_cmd_fcoe_wq_create;
+ } else {
+ create = sli_cmd_fcoe_wq_create_v1;
+ }
+ } else {
+ /* TODO */
+ ocs_log_test(sli4->os, "unsupported WQ create\n");
+ return -1;
+ }
+ break;
+ default:
+ ocs_log_test(sli4->os, "unknown queue type %d\n", qtype);
+ return -1;
+ }
+
+
+ if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) {
+ ocs_log_err(sli4->os, "%s allocation failed\n", SLI_QNAME[qtype]);
+ return -1;
+ }
+
+ if (create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma, assoc ? assoc->id : 0, ulp)) {
+
+ if (__sli_create_queue(sli4, q)) {
+ ocs_log_err(sli4->os, "create %s failed\n", SLI_QNAME[qtype]);
+ return -1;
+ }
+ q->ulp = ulp;
+ } else {
+ ocs_log_err(sli4->os, "cannot create %s\n", SLI_QNAME[qtype]);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @ingroup sli
+ * @brief Allocate a c queue set.
+ *
+ * @param sli4 SLI context.
+ * @param num_cqs to create
+ * @param qs Pointers to the queue objects.
+ * @param n_entries Number of entries to allocate per CQ.
+ * @param eqs Associated event queues
+ *
+ * @return Returns 0 on success, or -1 otherwise.
+ */
+int32_t
+sli_cq_alloc_set(sli4_t *sli4, sli4_queue_t *qs[], uint32_t num_cqs,
+ uint32_t n_entries, sli4_queue_t *eqs[])
+{
+ uint32_t i, offset = 0, page_bytes = 0, payload_size, cmd_size = 0;
+ uint32_t p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
+ uintptr_t addr;
+ ocs_dma_t dma;
+ sli4_req_common_create_cq_set_v0_t *req = NULL;
+ sli4_res_common_create_queue_set_t *res = NULL;
+
+ if (!sli4) {
+ ocs_log_err(NULL, "bad parameter sli4=%p\n", sli4);
+ return -1;
+ }
+
+ /* Align the queue DMA memory */
+ for (i = 0; i < num_cqs; i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_CQ, SLI4_CQE_BYTES,
+ n_entries, SLI_PAGE_SIZE)) {
+ ocs_log_err(sli4->os, "Queue init failed.\n");
+ goto error;
+ }
+ }
+
+ n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = 1;
+ break;
+ case 4096:
+ page_size = 2;
+ break;
+ default:
+ return -1;
+ }
+
+ page_bytes = page_size * SLI_PAGE_SIZE;
+ num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
+ cmd_size = sizeof(sli4_req_common_create_cq_set_v0_t) + (8 * num_pages_cq * num_cqs);
+ payload_size = max((size_t)cmd_size, sizeof(sli4_res_common_create_queue_set_t));
+
+ if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) {
+ ocs_log_err(sli4->os, "DMA allocation failed\n");
+ goto error;
+ }
+ ocs_memset(dma.virt, 0, payload_size);
+
+ if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
+ payload_size, &dma) == -1) {
+ goto error;
+ }
+
+ /* Fill the request structure */
+
+ req = (sli4_req_common_create_cq_set_v0_t *)((uint8_t *)dma.virt);
+ req->hdr.opcode = SLI4_OPC_COMMON_CREATE_CQ_SET;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ req->hdr.version = 0;
+ req->hdr.request_length = cmd_size - sizeof(sli4_req_hdr_t);
+ req->page_size = page_size;
+
+ req->num_pages = num_pages_cq;
+ switch (req->num_pages) {
+ case 1:
+ req->cqecnt = SLI4_CQ_CNT_256;
+ break;
+ case 2:
+ req->cqecnt = SLI4_CQ_CNT_512;
+ break;
+ case 4:
+ req->cqecnt = SLI4_CQ_CNT_1024;
+ break;
+ case 8:
+ req->cqecnt = SLI4_CQ_CNT_LARGE;
+ req->cqe_count = n_cqe;
+ break;
+ default:
+ ocs_log_test(sli4->os, "num_pages %d not valid\n", req->num_pages);
+ goto error;
+ }
+
+ req->evt = TRUE;
+ req->valid = TRUE;
+ req->arm = FALSE;
+ req->num_cq_req = num_cqs;
+
+ /* Fill page addresses of all the CQs. */
+ for (i = 0; i < num_cqs; i++) {
+ req->eq_id[i] = eqs[i]->id;
+ for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += page_bytes) {
+ req->page_physical_address[offset].low = ocs_addr32_lo(addr);
+ req->page_physical_address[offset].high = ocs_addr32_hi(addr);
+ offset++;
+ }
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail CQSet\n");
+ goto error;
+ }
+
+ res = (void *)((uint8_t *)dma.virt);
+ if (res->hdr.status) {
+ ocs_log_err(sli4->os, "bad create CQSet status=%#x addl=%#x\n",
+ res->hdr.status, res->hdr.additional_status);
+ goto error;
+ } else {
+ /* Check if we got all requested CQs. */
+ if (res->num_q_allocated != num_cqs) {
+ ocs_log_crit(sli4->os, "Requested count CQs doesnt match.\n");
+ goto error;
+ }
+
+ /* Fill the resp cq ids. */
+ for (i = 0; i < num_cqs; i++) {
+ qs[i]->id = res->q_id + i;
+ qs[i]->doorbell_offset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].off;
+ qs[i]->doorbell_rset = regmap[SLI4_REG_EQCQ_DOORBELL][sli4->if_type].rset;
+ }
+ }
+
+ ocs_dma_free(sli4->os, &dma);
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++) {
+ if (qs[i]->dma.size) {
+ ocs_dma_free(sli4->os, &qs[i]->dma);
+ }
+ }
+
+ if (dma.size) {
+ ocs_dma_free(sli4->os, &dma);
+ }
+
+ return -1;
+}
+
+
+
+/**
+ * @ingroup sli
+ * @brief Free a queue.
+ *
+ * @par Description
+ * Frees DMA memory and de-registers the requested queue.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to the queue object.
+ * @param destroy_queues Non-zero if the mailbox commands should be sent to destroy the queues.
+ * @param free_memory Non-zero if the DMA memory associated with the queue should be freed.
+ *
+ * @return Returns 0 on success, or -1 otherwise.
+ */
+int32_t
+sli_queue_free(sli4_t *sli4, sli4_queue_t *q, uint32_t destroy_queues, uint32_t free_memory)
+{
+ sli4_destroy_q_fn_t destroy = NULL;
+ int32_t rc = -1;
+
+ if (!sli4 || !q) {
+ ocs_log_err(NULL, "bad parameter sli4=%p q=%p\n", sli4, q);
+ return -1;
+ }
+
+ if (destroy_queues) {
+ switch (q->type) {
+ case SLI_QTYPE_EQ:
+ destroy = sli_cmd_common_destroy_eq;
+ break;
+ case SLI_QTYPE_CQ:
+ destroy = sli_cmd_common_destroy_cq;
+ break;
+ case SLI_QTYPE_MQ:
+ destroy = sli_cmd_common_destroy_mq;
+ break;
+ case SLI_QTYPE_WQ:
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ destroy = sli_cmd_fcoe_wq_destroy;
+ } else {
+ /* TODO */
+ ocs_log_test(sli4->os, "unsupported WQ destroy\n");
+ return -1;
+ }
+ break;
+ case SLI_QTYPE_RQ:
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ destroy = sli_cmd_fcoe_rq_destroy;
+ } else {
+ /* TODO */
+ ocs_log_test(sli4->os, "unsupported RQ destroy\n");
+ return -1;
+ }
+ break;
+ default:
+ ocs_log_test(sli4->os, "bad queue type %d\n",
+ q->type);
+ return -1;
+ }
+
+ /*
+ * Destroying queues makes BE3 sad (version 0 interface type). Rely
+ * on COMMON_FUNCTION_RESET to free host allocated queue resources
+ * inside the SLI Port.
+ */
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
+ destroy = NULL;
+ }
+
+ /* Destroy the queue if the operation is defined */
+ if (destroy && destroy(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, q->id)) {
+ sli4_res_hdr_t *res = NULL;
+
+ if (sli_bmbx_command(sli4)){
+ ocs_log_crit(sli4->os, "bootstrap mailbox write fail destroy %s\n",
+ SLI_QNAME[q->type]);
+ } else if (sli_res_sli_config(sli4->bmbx.virt)) {
+ ocs_log_err(sli4->os, "bad status destroy %s\n", SLI_QNAME[q->type]);
+ } else {
+ res = (void *)((uint8_t *)sli4->bmbx.virt +
+ offsetof(sli4_cmd_sli_config_t, payload));
+
+ if (res->status) {
+ ocs_log_err(sli4->os, "bad destroy %s status=%#x addl=%#x\n",
+ SLI_QNAME[q->type],
+ res->status, res->additional_status);
+ } else {
+ rc = 0;
+ }
+ }
+ }
+ }
+
+ if (free_memory) {
+ ocs_lock_free(&q->lock);
+
+ if (ocs_dma_free(sli4->os, &q->dma)) {
+ ocs_log_err(sli4->os, "%s queue ID %d free failed\n",
+ SLI_QNAME[q->type], q->id);
+ rc = -1;
+ }
+ }
+
+ return rc;
+}
+
+int32_t
+sli_queue_reset(sli4_t *sli4, sli4_queue_t *q)
+{
+
+ ocs_lock(&q->lock);
+
+ q->index = 0;
+ q->n_posted = 0;
+
+ if (SLI_QTYPE_MQ == q->type) {
+ q->u.r_idx = 0;
+ }
+
+ if (q->dma.virt != NULL) {
+ ocs_memset(q->dma.virt, 0, (q->size * q->length));
+ }
+
+ ocs_unlock(&q->lock);
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Check if the given queue is empty.
+ *
+ * @par Description
+ * If the valid bit of the current entry is unset, the queue is empty.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to the queue object.
+ *
+ * @return Returns TRUE if empty, or FALSE otherwise.
+ */
+int32_t
+sli_queue_is_empty(sli4_t *sli4, sli4_queue_t *q)
+{
+ int32_t rc = TRUE;
+ uint8_t *qe = q->dma.virt;
+
+ ocs_lock(&q->lock);
+
+ ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD);
+
+ qe += q->index * q->size;
+
+ rc = !sli_queue_entry_is_valid(q, qe, FALSE);
+
+ ocs_unlock(&q->lock);
+
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Arm an EQ.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to queue object.
+ * @param arm If TRUE, arm the EQ.
+ *
+ * @return Returns 0 on success, or non-zero otherwise.
+ */
+int32_t
+sli_queue_eq_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm)
+{
+ uint32_t val = 0;
+
+ ocs_lock(&q->lock);
+ val = sli_eq_doorbell(q->n_posted, q->id, arm);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ q->n_posted = 0;
+ ocs_unlock(&q->lock);
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Arm a queue.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to queue object.
+ * @param arm If TRUE, arm the queue.
+ *
+ * @return Returns 0 on success, or non-zero otherwise.
+ */
+int32_t
+sli_queue_arm(sli4_t *sli4, sli4_queue_t *q, uint8_t arm)
+{
+ uint32_t val = 0;
+
+ ocs_lock(&q->lock);
+
+ switch (q->type) {
+ case SLI_QTYPE_EQ:
+ val = sli_eq_doorbell(q->n_posted, q->id, arm);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ q->n_posted = 0;
+ break;
+ case SLI_QTYPE_CQ:
+ val = sli_cq_doorbell(q->n_posted, q->id, arm);
+ ocs_reg_write32(sli4->os, q->doorbell_rset, q->doorbell_offset, val);
+ q->n_posted = 0;
+ break;
+ default:
+ ocs_log_test(sli4->os, "should only be used for EQ/CQ, not %s\n",
+ SLI_QNAME[q->type]);
+ }
+
+ ocs_unlock(&q->lock);
+
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an entry to the queue object.
+ *
+ * Note: Assumes the q->lock will be locked and released by the caller.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to the queue object.
+ * @param entry Pointer to the entry contents.
+ *
+ * @return Returns queue index on success, or negative error value otherwise.
+ */
+int32_t
+_sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
+{
+ int32_t rc = 0;
+ uint8_t *qe = q->dma.virt;
+ uint32_t qindex;
+
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ if (entry) {
+ if ((SLI_QTYPE_WQ == q->type) && sli4->config.perf_wq_id_association) {
+ sli_set_wq_id_association(entry, q->id);
+ }
+#if defined(OCS_INCLUDE_DEBUG)
+ switch (q->type) {
+ case SLI_QTYPE_WQ: {
+ ocs_dump32(OCS_DEBUG_ENABLE_WQ_DUMP, sli4->os, "wqe", entry, q->size);
+ break;
+
+ }
+ case SLI_QTYPE_MQ:
+ /* Note: we don't really need to dump the whole
+ * 256 bytes, just do 64 */
+ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mqe outbound", entry, 64);
+ break;
+
+ default:
+ break;
+ }
+#endif
+ ocs_memcpy(qe, entry, q->size);
+ q->n_posted = 1;
+ }
+
+ ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
+
+ rc = sli_queue_doorbell(sli4, q);
+
+ q->index = (q->index + q->n_posted) & (q->length - 1);
+ q->n_posted = 0;
+
+ if (rc < 0) {
+ /* failure */
+ return rc;
+ } else if (rc > 0) {
+ /* failure, but we need to return a negative value on failure */
+ return -rc;
+ } else {
+ return qindex;
+ }
+}
+
+/**
+ * @ingroup sli
+ * @brief Write an entry to the queue object.
+ *
+ * Note: Assumes the q->lock will be locked and released by the caller.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to the queue object.
+ * @param entry Pointer to the entry contents.
+ *
+ * @return Returns queue index on success, or negative error value otherwise.
+ */
+int32_t
+sli_queue_write(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
+{
+ int32_t rc;
+
+ ocs_lock(&q->lock);
+ rc = _sli_queue_write(sli4, q, entry);
+ ocs_unlock(&q->lock);
+
+ return rc;
+}
+
+/**
+ * @brief Check if the current queue entry is valid.
+ *
+ * @param q Pointer to the queue object.
+ * @param qe Pointer to the queue entry.
+ * @param clear Boolean to clear valid bit.
+ *
+ * @return Returns TRUE if the entry is valid, or FALSE otherwise.
+ */
+static uint8_t
+sli_queue_entry_is_valid(sli4_queue_t *q, uint8_t *qe, uint8_t clear)
+{
+ uint8_t valid = FALSE;
+
+ switch (q->type) {
+ case SLI_QTYPE_EQ:
+ valid = ((sli4_eqe_t *)qe)->vld;
+ if (valid && clear) {
+ ((sli4_eqe_t *)qe)->vld = 0;
+ }
+ break;
+ case SLI_QTYPE_CQ:
+ /*
+ * For both MCQE and WCQE/RCQE, the valid bit
+ * is bit 31 of dword 3 (0 based)
+ */
+ valid = (qe[15] & 0x80) != 0;
+ if (valid & clear) {
+ qe[15] &= ~0x80;
+ }
+ break;
+ case SLI_QTYPE_MQ:
+ valid = q->index != q->u.r_idx;
+ break;
+ case SLI_QTYPE_RQ:
+ valid = TRUE;
+ clear = FALSE;
+ break;
+ default:
+ ocs_log_test(NULL, "doesn't handle type=%#x\n", q->type);
+ }
+
+ if (clear) {
+ ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
+ }
+
+ return valid;
+}
+
+/**
+ * @ingroup sli
+ * @brief Read an entry from the queue object.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to the queue object.
+ * @param entry Destination pointer for the queue entry contents.
+ *
+ * @return Returns 0 on success, or non-zero otherwise.
+ */
+int32_t
+sli_queue_read(sli4_t *sli4, sli4_queue_t *q, uint8_t *entry)
+{
+ int32_t rc = 0;
+ uint8_t *qe = q->dma.virt;
+ uint32_t *qindex = NULL;
+
+ if (SLI_QTYPE_MQ == q->type) {
+ qindex = &q->u.r_idx;
+ } else {
+ qindex = &q->index;
+ }
+
+ ocs_lock(&q->lock);
+
+ ocs_dma_sync(&q->dma, OCS_DMASYNC_POSTREAD);
+
+ qe += *qindex * q->size;
+
+ if (!sli_queue_entry_is_valid(q, qe, TRUE)) {
+ ocs_unlock(&q->lock);
+ return -1;
+ }
+
+ if (entry) {
+ ocs_memcpy(entry, qe, q->size);
+#if defined(OCS_INCLUDE_DEBUG)
+ switch(q->type) {
+ case SLI_QTYPE_CQ:
+ ocs_dump32(OCS_DEBUG_ENABLE_CQ_DUMP, sli4->os, "cq", entry, q->size);
+ break;
+ case SLI_QTYPE_MQ:
+ ocs_dump32(OCS_DEBUG_ENABLE_MQ_DUMP, sli4->os, "mq Compl", entry, 64);
+ break;
+ case SLI_QTYPE_EQ:
+ ocs_dump32(OCS_DEBUG_ENABLE_EQ_DUMP, sli4->os, "eq Compl", entry, q->size);
+ break;
+ default:
+ break;
+ }
+#endif
+ }
+
+ switch (q->type) {
+ case SLI_QTYPE_EQ:
+ case SLI_QTYPE_CQ:
+ case SLI_QTYPE_MQ:
+ *qindex = (*qindex + 1) & (q->length - 1);
+ if (SLI_QTYPE_MQ != q->type) {
+ q->n_posted++;
+ }
+ break;
+ default:
+ /* reads don't update the index */
+ break;
+ }
+
+ ocs_unlock(&q->lock);
+
+ return rc;
+}
+
+int32_t
+sli_queue_index(sli4_t *sli4, sli4_queue_t *q)
+{
+
+ if (q) {
+ return q->index;
+ } else {
+ return -1;
+ }
+}
+
+int32_t
+sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry)
+{
+ int32_t rc;
+
+ ocs_lock(&q->lock);
+ rc = _sli_queue_poke(sli4, q, index, entry);
+ ocs_unlock(&q->lock);
+
+ return rc;
+}
+
+int32_t
+_sli_queue_poke(sli4_t *sli4, sli4_queue_t *q, uint32_t index, uint8_t *entry)
+{
+ int32_t rc = 0;
+ uint8_t *qe = q->dma.virt;
+
+ if (index >= q->length) {
+ return -1;
+ }
+
+ qe += index * q->size;
+
+ if (entry) {
+ ocs_memcpy(qe, entry, q->size);
+ }
+
+ ocs_dma_sync(&q->dma, OCS_DMASYNC_PREWRITE);
+
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Allocate SLI Port resources.
+ *
+ * @par Description
+ * Allocate port-related resources, such as VFI, RPI, XRI, and so on.
+ * Resources are modeled using extents, regardless of whether the underlying
+ * device implements resource extents. If the device does not implement
+ * extents, the SLI layer models this as a single (albeit large) extent.
+ *
+ * @param sli4 SLI context.
+ * @param rtype Resource type (for example, RPI or XRI)
+ * @param rid Allocated resource ID.
+ * @param index Index into the bitmap.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_resource_alloc(sli4_t *sli4, sli4_resource_e rtype, uint32_t *rid, uint32_t *index)
+{
+ int32_t rc = 0;
+ uint32_t size;
+ uint32_t extent_idx;
+ uint32_t item_idx;
+ int status;
+
+ *rid = UINT32_MAX;
+ *index = UINT32_MAX;
+
+ switch (rtype) {
+ case SLI_RSRC_FCOE_VFI:
+ case SLI_RSRC_FCOE_VPI:
+ case SLI_RSRC_FCOE_RPI:
+ case SLI_RSRC_FCOE_XRI:
+ status = ocs_bitmap_find(sli4->config.extent[rtype].use_map,
+ sli4->config.extent[rtype].map_size);
+ if (status < 0) {
+ ocs_log_err(sli4->os, "out of resource %d (alloc=%d)\n",
+ rtype, sli4->config.extent[rtype].n_alloc);
+ rc = -1;
+ break;
+ } else {
+ *index = status;
+ }
+
+ size = sli4->config.extent[rtype].size;
+
+ extent_idx = *index / size;
+ item_idx = *index % size;
+
+ *rid = sli4->config.extent[rtype].base[extent_idx] + item_idx;
+
+ sli4->config.extent[rtype].n_alloc++;
+ break;
+ default:
+ rc = -1;
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Free the SLI Port resources.
+ *
+ * @par Description
+ * Free port-related resources, such as VFI, RPI, XRI, and so. See discussion of
+ * "extent" usage in sli_resource_alloc.
+ *
+ * @param sli4 SLI context.
+ * @param rtype Resource type (for example, RPI or XRI).
+ * @param rid Allocated resource ID.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_resource_free(sli4_t *sli4, sli4_resource_e rtype, uint32_t rid)
+{
+ int32_t rc = -1;
+ uint32_t x;
+ uint32_t size, *base;
+
+ switch (rtype) {
+ case SLI_RSRC_FCOE_VFI:
+ case SLI_RSRC_FCOE_VPI:
+ case SLI_RSRC_FCOE_RPI:
+ case SLI_RSRC_FCOE_XRI:
+ /*
+ * Figure out which extent contains the resource ID. I.e. find
+ * the extent such that
+ * extent->base <= resource ID < extent->base + extent->size
+ */
+ base = sli4->config.extent[rtype].base;
+ size = sli4->config.extent[rtype].size;
+
+ /*
+ * In the case of FW reset, this may be cleared but the force_free path will
+ * still attempt to free the resource. Prevent a NULL pointer access.
+ */
+ if (base != NULL) {
+ for (x = 0; x < sli4->config.extent[rtype].number; x++) {
+ if ((rid >= base[x]) && (rid < (base[x] + size))) {
+ rid -= base[x];
+ ocs_bitmap_clear(sli4->config.extent[rtype].use_map,
+ (x * size) + rid);
+ rc = 0;
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ ;
+ }
+
+ return rc;
+}
+
+int32_t
+sli_resource_reset(sli4_t *sli4, sli4_resource_e rtype)
+{
+ int32_t rc = -1;
+ uint32_t i;
+
+ switch (rtype) {
+ case SLI_RSRC_FCOE_VFI:
+ case SLI_RSRC_FCOE_VPI:
+ case SLI_RSRC_FCOE_RPI:
+ case SLI_RSRC_FCOE_XRI:
+ for (i = 0; i < sli4->config.extent[rtype].map_size; i++) {
+ ocs_bitmap_clear(sli4->config.extent[rtype].use_map, i);
+ }
+ rc = 0;
+ break;
+ default:
+ ;
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Parse an EQ entry to retrieve the CQ_ID for this event.
+ *
+ * @param sli4 SLI context.
+ * @param buf Pointer to the EQ entry.
+ * @param cq_id CQ_ID for this entry (only valid on success).
+ *
+ * @return
+ * - 0 if success.
+ * - < 0 if error.
+ * - > 0 if firmware detects EQ overflow.
+ */
+int32_t
+sli_eq_parse(sli4_t *sli4, uint8_t *buf, uint16_t *cq_id)
+{
+ sli4_eqe_t *eqe = (void *)buf;
+ int32_t rc = 0;
+
+ if (!sli4 || !buf || !cq_id) {
+ ocs_log_err(NULL, "bad parameters sli4=%p buf=%p cq_id=%p\n",
+ sli4, buf, cq_id);
+ return -1;
+ }
+
+ switch (eqe->major_code) {
+ case SLI4_MAJOR_CODE_STANDARD:
+ *cq_id = eqe->resource_id;
+ break;
+ case SLI4_MAJOR_CODE_SENTINEL:
+ ocs_log_debug(sli4->os, "sentinel EQE\n");
+ rc = 1;
+ break;
+ default:
+ ocs_log_test(sli4->os, "Unsupported EQE: major %x minor %x\n",
+ eqe->major_code, eqe->minor_code);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Parse a CQ entry to retrieve the event type and the associated queue.
+ *
+ * @param sli4 SLI context.
+ * @param cq CQ to process.
+ * @param cqe Pointer to the CQ entry.
+ * @param etype CQ event type.
+ * @param q_id Queue ID associated with this completion message
+ * (that is, MQ_ID, RQ_ID, and so on).
+ *
+ * @return
+ * - 0 if call completed correctly and CQE status is SUCCESS.
+ * - -1 if call failed (no CQE status).
+ * - Other value if call completed correctly and return value is a CQE status value.
+ */
+int32_t
+sli_cq_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype,
+ uint16_t *q_id)
+{
+ int32_t rc = 0;
+
+ if (!sli4 || !cq || !cqe || !etype) {
+ ocs_log_err(NULL, "bad parameters sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
+ sli4, cq, cqe, etype, q_id);
+ return -1;
+ }
+
+ if (cq->u.flag.is_mq) {
+ sli4_mcqe_t *mcqe = (void *)cqe;
+
+ if (mcqe->ae) {
+ *etype = SLI_QENTRY_ASYNC;
+ } else {
+ *etype = SLI_QENTRY_MQ;
+ rc = sli_cqe_mq(mcqe);
+ }
+ *q_id = -1;
+ } else if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
+ } else {
+ ocs_log_test(sli4->os, "implement CQE parsing type = %#x\n",
+ sli4->port_type);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Cause chip to enter an unrecoverable error state.
+ *
+ * @par Description
+ * Cause chip to enter an unrecoverable error state. This is
+ * used when detecting unexpected FW behavior so FW can be
+ * hwted from the driver as soon as error is detected.
+ *
+ * @param sli4 SLI context.
+ * @param dump Generate dump as part of reset.
+ *
+ * @return Returns 0 if call completed correctly, or -1 if call failed (unsupported chip).
+ */
+int32_t sli_raise_ue(sli4_t *sli4, uint8_t dump)
+{
+#define FDD 2
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
+ switch(sli_get_asic_type(sli4)) {
+ case SLI4_ASIC_TYPE_BE3: {
+ sli_reg_write(sli4, SLI4_REG_SW_UE_CSR1, 0xffffffff);
+ sli_reg_write(sli4, SLI4_REG_SW_UE_CSR2, 0);
+ break;
+ }
+ case SLI4_ASIC_TYPE_SKYHAWK: {
+ uint32_t value;
+ value = ocs_config_read32(sli4->os, SLI4_SW_UE_REG);
+ ocs_config_write32(sli4->os, SLI4_SW_UE_REG, (value | (1U << 24)));
+ break;
+ }
+ default:
+ ocs_log_test(sli4->os, "invalid asic type %d\n", sli_get_asic_type(sli4));
+ return -1;
+ }
+ } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) {
+ if (dump == FDD) {
+ sli_reg_write(sli4, SLI4_REG_SLIPORT_CONTROL, SLI4_SLIPORT_CONTROL_FDD | SLI4_SLIPORT_CONTROL_IP);
+ } else {
+ uint32_t value = SLI4_PHYDEV_CONTROL_FRST;
+ if (dump == 1) {
+ value |= SLI4_PHYDEV_CONTROL_DD;
+ }
+ sli_reg_write(sli4, SLI4_REG_PHYSDEV_CONTROL, value);
+ }
+ } else {
+ ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup sli
+ * @brief Read the SLIPORT_STATUS register to to check if a dump is present.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return Returns 1 if the chip is ready, or 0 if the chip is not ready, 2 if fdp is present.
+ */
+int32_t sli_dump_is_ready(sli4_t *sli4)
+{
+ int32_t rc = 0;
+ uint32_t port_val;
+ uint32_t bmbx_val;
+ uint32_t uerr_lo;
+ uint32_t uerr_hi;
+ uint32_t uerr_mask_lo;
+ uint32_t uerr_mask_hi;
+
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
+ /* for iftype=0, dump ready when UE is encountered */
+ uerr_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO);
+ uerr_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI);
+ uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO);
+ uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI);
+ if ((uerr_lo & ~uerr_mask_lo) || (uerr_hi & ~uerr_mask_hi)) {
+ rc = 1;
+ }
+
+ } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(sli4)) {
+ /*
+ * Ensure that the port is ready AND the mailbox is
+ * ready before signaling that the dump is ready to go.
+ */
+ port_val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
+ bmbx_val = sli_reg_read(sli4, SLI4_REG_BMBX);
+
+ if ((bmbx_val & SLI4_BMBX_RDY) &&
+ SLI4_PORT_STATUS_READY(port_val)) {
+ if(SLI4_PORT_STATUS_DUMP_PRESENT(port_val)) {
+ rc = 1;
+ }else if( SLI4_PORT_STATUS_FDP_PRESENT(port_val)) {
+ rc = 2;
+ }
+ }
+ } else {
+ ocs_log_test(sli4->os, "invalid iftype=%d\n", sli_get_if_type(sli4));
+ return -1;
+ }
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Read the SLIPORT_STATUS register to check if a dump is present.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return
+ * - 0 if call completed correctly and no dump is present.
+ * - 1 if call completed and dump is present.
+ * - -1 if call failed (unsupported chip).
+ */
+int32_t sli_dump_is_present(sli4_t *sli4)
+{
+ uint32_t val;
+ uint32_t ready;
+
+ if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(sli4)) {
+ ocs_log_test(sli4->os, "Function only supported for I/F type 2");
+ return -1;
+ }
+
+ /* If the chip is not ready, then there cannot be a dump */
+ ready = sli_wait_for_fw_ready(sli4, SLI4_INIT_PORT_DELAY_US);
+ if (!ready) {
+ return 0;
+ }
+
+ val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
+ if (UINT32_MAX == val) {
+ ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n");
+ return -1;
+ } else {
+ return ((val & SLI4_PORT_STATUS_DIP) ? 1 : 0);
+ }
+}
+
+/**
+ * @ingroup sli
+ * @brief Read the SLIPORT_STATUS register to check if the reset required is set.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return
+ * - 0 if call completed correctly and reset is not required.
+ * - 1 if call completed and reset is required.
+ * - -1 if call failed.
+ */
+int32_t sli_reset_required(sli4_t *sli4)
+{
+ uint32_t val;
+
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
+ ocs_log_test(sli4->os, "reset required N/A for iftype 0\n");
+ return 0;
+ }
+
+ val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
+ if (UINT32_MAX == val) {
+ ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n");
+ return -1;
+ } else {
+ return ((val & SLI4_PORT_STATUS_RN) ? 1 : 0);
+ }
+}
+
+/**
+ * @ingroup sli
+ * @brief Read the SLIPORT_SEMAPHORE and SLIPORT_STATUS registers to check if
+ * the port status indicates that a FW error has occurred.
+ *
+ * @param sli4 SLI context.
+ *
+ * @return
+ * - 0 if call completed correctly and no FW error occurred.
+ * - > 0 which indicates that a FW error has occurred.
+ * - -1 if call failed.
+ */
+int32_t sli_fw_error_status(sli4_t *sli4)
+{
+ uint32_t sliport_semaphore;
+ int32_t rc = 0;
+
+ sliport_semaphore = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE);
+ if (UINT32_MAX == sliport_semaphore) {
+ ocs_log_err(sli4->os, "error reading SLIPORT_SEMAPHORE register\n");
+ return 1;
+ }
+ rc = (SLI4_PORT_SEMAPHORE_IN_ERR(sliport_semaphore) ? 1 : 0);
+
+ if (rc == 0) {
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type ||
+ (SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type)) {
+ uint32_t uerr_mask_lo, uerr_mask_hi;
+ uint32_t uerr_status_lo, uerr_status_hi;
+
+ uerr_mask_lo = sli_reg_read(sli4, SLI4_REG_UERR_MASK_LO);
+ uerr_mask_hi = sli_reg_read(sli4, SLI4_REG_UERR_MASK_HI);
+ uerr_status_lo = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_LO);
+ uerr_status_hi = sli_reg_read(sli4, SLI4_REG_UERR_STATUS_HI);
+ if ((uerr_mask_lo & uerr_status_lo) != 0 ||
+ (uerr_mask_hi & uerr_status_hi) != 0) {
+ rc = 1;
+ }
+ } else if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type)) {
+ uint32_t sliport_status;
+
+ sliport_status = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
+ rc = (SLI4_PORT_STATUS_ERROR(sliport_status) ? 1 : 0);
+ }
+ }
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Determine if the chip FW is in a ready state
+ *
+ * @param sli4 SLI context.
+ *
+ * @return
+ * - 0 if call completed correctly and FW is not ready.
+ * - 1 if call completed correctly and FW is ready.
+ * - -1 if call failed.
+ */
+int32_t
+sli_fw_ready(sli4_t *sli4)
+{
+ uint32_t val;
+ int32_t rc = -1;
+
+ /*
+ * Is firmware ready for operation? Check needed depends on IF_TYPE
+ */
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type ||
+ SLI4_IF_TYPE_BE3_SKH_VF == sli4->if_type) {
+ val = sli_reg_read(sli4, SLI4_REG_SLIPORT_SEMAPHORE);
+ rc = ((SLI4_PORT_SEMAPHORE_STATUS_POST_READY ==
+ SLI4_PORT_SEMAPHORE_PORT(val)) &&
+ (!SLI4_PORT_SEMAPHORE_IN_ERR(val)) ? 1 : 0);
+ } else if (SLI4_IF_TYPE_LANCER_FC_ETH == sli4->if_type) {
+ val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
+ rc = (SLI4_PORT_STATUS_READY(val) ? 1 : 0);
+ }
+ return rc;
+}
+
+/**
+ * @ingroup sli
+ * @brief Determine if the link can be configured
+ *
+ * @param sli4 SLI context.
+ *
+ * @return
+ * - 0 if link is not configurable.
+ * - 1 if link is configurable.
+ */
+int32_t sli_link_is_configurable(sli4_t *sli)
+{
+ int32_t rc = 0;
+ /*
+ * Link config works on: Skyhawk and Lancer
+ * Link config does not work on: LancerG6
+ */
+
+ switch (sli_get_asic_type(sli)) {
+ case SLI4_ASIC_TYPE_SKYHAWK:
+ case SLI4_ASIC_TYPE_LANCER:
+ case SLI4_ASIC_TYPE_CORSAIR:
+ rc = 1;
+ break;
+ case SLI4_ASIC_TYPE_LANCERG6:
+ case SLI4_ASIC_TYPE_BE3:
+ default:
+ rc = 0;
+ break;
+ }
+
+ return rc;
+
+}
+
+/* vim: set noexpandtab textwidth=120: */
+
+extern const char *SLI_QNAME[];
+extern const sli4_reg_t regmap[SLI4_REG_MAX][SLI4_MAX_IF_TYPES];
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_WQ_CREATE command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param qmem DMA memory for the queue.
+ * @param cq_id Associated CQ_ID.
+ * @param ulp The ULP to bind
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_wq_create(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp)
+{
+ sli4_req_fcoe_wq_create_t *wq = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t p;
+ uintptr_t addr;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_fcoe_wq_create_t),
+ sizeof(sli4_res_common_create_queue_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ wq = (sli4_req_fcoe_wq_create_t *)((uint8_t *)buf + sli_config_off);
+
+ wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE;
+ wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_t) -
+ sizeof(sli4_req_hdr_t);
+ /* valid values for number of pages: 1-4 (sec 4.5.1) */
+ wq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
+ if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES)) {
+ return 0;
+ }
+
+ wq->cq_id = cq_id;
+
+ if (sli4->config.dual_ulp_capable) {
+ wq->dua = 1;
+ wq->bqu = 1;
+ wq->ulp = ulp;
+ }
+
+ for (p = 0, addr = qmem->phys;
+ p < wq->num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ wq->page_physical_address[p].low = ocs_addr32_lo(addr);
+ wq->page_physical_address[p].high = ocs_addr32_hi(addr);
+ }
+
+ return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_WQ_CREATE_V1 command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param qmem DMA memory for the queue.
+ * @param cq_id Associated CQ_ID.
+ * @param ignored This parameter carries the ULP for WQ (ignored for V1)
+
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_wq_create_v1(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *qmem,
+ uint16_t cq_id, uint16_t ignored)
+{
+ sli4_req_fcoe_wq_create_v1_t *wq = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t p;
+ uintptr_t addr;
+ uint32_t page_size = 0;
+ uint32_t page_bytes = 0;
+ uint32_t n_wqe = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_fcoe_wq_create_v1_t),
+ sizeof(sli4_res_common_create_queue_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ wq = (sli4_req_fcoe_wq_create_v1_t *)((uint8_t *)buf + sli_config_off);
+
+ wq->hdr.opcode = SLI4_OPC_FCOE_WQ_CREATE;
+ wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_create_v1_t) -
+ sizeof(sli4_req_hdr_t);
+ wq->hdr.version = 1;
+
+ n_wqe = qmem->size / sli4->config.wqe_size;
+
+ /* This heuristic to determine the page size is simplistic
+ * but could be made more sophisticated
+ */
+ switch (qmem->size) {
+ case 4096:
+ case 8192:
+ case 16384:
+ case 32768:
+ page_size = 1;
+ break;
+ case 65536:
+ page_size = 2;
+ break;
+ case 131072:
+ page_size = 4;
+ break;
+ case 262144:
+ page_size = 8;
+ break;
+ case 524288:
+ page_size = 10;
+ break;
+ default:
+ return 0;
+ }
+ page_bytes = page_size * SLI_PAGE_SIZE;
+
+ /* valid values for number of pages: 1-8 */
+ wq->num_pages = sli_page_count(qmem->size, page_bytes);
+ if (!wq->num_pages || (wq->num_pages > SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES)) {
+ return 0;
+ }
+
+ wq->cq_id = cq_id;
+
+ wq->page_size = page_size;
+
+ if (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) {
+ wq->wqe_size = SLI4_WQE_EXT_SIZE;
+ } else {
+ wq->wqe_size = SLI4_WQE_SIZE;
+ }
+
+ wq->wqe_count = n_wqe;
+
+ for (p = 0, addr = qmem->phys;
+ p < wq->num_pages;
+ p++, addr += page_bytes) {
+ wq->page_physical_address[p].low = ocs_addr32_lo(addr);
+ wq->page_physical_address[p].high = ocs_addr32_hi(addr);
+ }
+
+ return(sli_config_off + sizeof(sli4_req_fcoe_wq_create_v1_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_WQ_DESTROY command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param wq_id WQ_ID.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_wq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t wq_id)
+{
+ sli4_req_fcoe_wq_destroy_t *wq = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_fcoe_wq_destroy_t),
+ sizeof(sli4_res_hdr_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ wq = (sli4_req_fcoe_wq_destroy_t *)((uint8_t *)buf + sli_config_off);
+
+ wq->hdr.opcode = SLI4_OPC_FCOE_WQ_DESTROY;
+ wq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ wq->hdr.request_length = sizeof(sli4_req_fcoe_wq_destroy_t) -
+ sizeof(sli4_req_hdr_t);
+
+ wq->wq_id = wq_id;
+
+ return(sli_config_off + sizeof(sli4_req_fcoe_wq_destroy_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_POST_SGL_PAGES command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param xri starting XRI
+ * @param xri_count XRI
+ * @param page0 First SGL memory page.
+ * @param page1 Second SGL memory page (optional).
+ * @param dma DMA buffer for non-embedded mailbox command (options)
+ *
+ * if non-embedded mbx command is used, dma buffer must be at least (32 + xri_count*16) in length
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_post_sgl_pages(sli4_t *sli4, void *buf, size_t size,
+ uint16_t xri, uint32_t xri_count, ocs_dma_t *page0[], ocs_dma_t *page1[], ocs_dma_t *dma)
+{
+ sli4_req_fcoe_post_sgl_pages_t *post = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t i;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_fcoe_post_sgl_pages_t),
+ sizeof(sli4_res_hdr_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ dma);
+ }
+ if (dma) {
+ post = dma->virt;
+ ocs_memset(post, 0, dma->size);
+ } else {
+ post = (sli4_req_fcoe_post_sgl_pages_t *)((uint8_t *)buf + sli_config_off);
+ }
+
+ post->hdr.opcode = SLI4_OPC_FCOE_POST_SGL_PAGES;
+ post->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ /* payload size calculation
+ * 4 = xri_start + xri_count
+ * xri_count = # of XRI's registered
+ * sizeof(uint64_t) = physical address size
+ * 2 = # of physical addresses per page set
+ */
+ post->hdr.request_length = 4 + (xri_count * (sizeof(uint64_t) * 2));
+
+ post->xri_start = xri;
+ post->xri_count = xri_count;
+
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page0_low = ocs_addr32_lo(page0[i]->phys);
+ post->page_set[i].page0_high = ocs_addr32_hi(page0[i]->phys);
+ }
+
+ if (page1) {
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page1_low = ocs_addr32_lo(page1[i]->phys);
+ post->page_set[i].page1_high = ocs_addr32_hi(page1[i]->phys);
+ }
+ }
+
+ return dma ? sli_config_off : (sli_config_off + sizeof(sli4_req_fcoe_post_sgl_pages_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_RQ_CREATE command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param qmem DMA memory for the queue.
+ * @param cq_id Associated CQ_ID.
+ * @param ulp This parameter carries the ULP for the RQ
+ * @param buffer_size Buffer size pointed to by each RQE.
+ *
+ * @note This creates a Version 0 message.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_rq_create(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp, uint16_t buffer_size)
+{
+ sli4_req_fcoe_rq_create_t *rq = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t p;
+ uintptr_t addr;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_fcoe_rq_create_t),
+ sizeof(sli4_res_common_create_queue_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ rq = (sli4_req_fcoe_rq_create_t *)((uint8_t *)buf + sli_config_off);
+
+ rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
+ rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_t) -
+ sizeof(sli4_req_hdr_t);
+ /* valid values for number of pages: 1-8 (sec 4.5.6) */
+ rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
+ if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES)) {
+ ocs_log_test(sli4->os, "num_pages %d not valid\n", rq->num_pages);
+ return 0;
+ }
+
+ /*
+ * RQE count is the log base 2 of the total number of entries
+ */
+ rq->rqe_count = ocs_lg2(qmem->size / SLI4_FCOE_RQE_SIZE);
+
+ if ((buffer_size < SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE) ||
+ (buffer_size > SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE)) {
+ ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n",
+ buffer_size,
+ SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE,
+ SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE);
+ return -1;
+ }
+ rq->buffer_size = buffer_size;
+
+ rq->cq_id = cq_id;
+
+ if (sli4->config.dual_ulp_capable) {
+ rq->dua = 1;
+ rq->bqu = 1;
+ rq->ulp = ulp;
+ }
+
+ for (p = 0, addr = qmem->phys;
+ p < rq->num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ rq->page_physical_address[p].low = ocs_addr32_lo(addr);
+ rq->page_physical_address[p].high = ocs_addr32_hi(addr);
+ }
+
+ return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_RQ_CREATE_V1 command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param qmem DMA memory for the queue.
+ * @param cq_id Associated CQ_ID.
+ * @param ulp This parameter carries the ULP for RQ (ignored for V1)
+ * @param buffer_size Buffer size pointed to by each RQE.
+ *
+ * @note This creates a Version 0 message
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_rq_create_v1(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *qmem, uint16_t cq_id, uint16_t ulp,
+ uint16_t buffer_size)
+{
+ sli4_req_fcoe_rq_create_v1_t *rq = NULL;
+ uint32_t sli_config_off = 0;
+ uint32_t p;
+ uintptr_t addr;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_fcoe_rq_create_v1_t),
+ sizeof(sli4_res_common_create_queue_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ rq = (sli4_req_fcoe_rq_create_v1_t *)((uint8_t *)buf + sli_config_off);
+
+ rq->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
+ rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v1_t) -
+ sizeof(sli4_req_hdr_t);
+ rq->hdr.version = 1;
+
+ /* Disable "no buffer warnings" to avoid Lancer bug */
+ rq->dnb = TRUE;
+
+ /* valid values for number of pages: 1-8 (sec 4.5.6) */
+ rq->num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
+ if (!rq->num_pages || (rq->num_pages > SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES)) {
+ ocs_log_test(sli4->os, "num_pages %d not valid, max %d\n",
+ rq->num_pages, SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES);
+ return 0;
+ }
+
+ /*
+ * RQE count is the total number of entries (note not lg2(# entries))
+ */
+ rq->rqe_count = qmem->size / SLI4_FCOE_RQE_SIZE;
+
+ rq->rqe_size = SLI4_FCOE_RQE_SIZE_8;
+
+ rq->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096;
+
+ if ((buffer_size < sli4->config.rq_min_buf_size) ||
+ (buffer_size > sli4->config.rq_max_buf_size)) {
+ ocs_log_err(sli4->os, "buffer_size %d out of range (%d-%d)\n",
+ buffer_size,
+ sli4->config.rq_min_buf_size,
+ sli4->config.rq_max_buf_size);
+ return -1;
+ }
+ rq->buffer_size = buffer_size;
+
+ rq->cq_id = cq_id;
+
+ for (p = 0, addr = qmem->phys;
+ p < rq->num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ rq->page_physical_address[p].low = ocs_addr32_lo(addr);
+ rq->page_physical_address[p].high = ocs_addr32_hi(addr);
+ }
+
+ return(sli_config_off + sizeof(sli4_req_fcoe_rq_create_v1_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_RQ_DESTROY command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param rq_id RQ_ID.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_rq_destroy(sli4_t *sli4, void *buf, size_t size, uint16_t rq_id)
+{
+ sli4_req_fcoe_rq_destroy_t *rq = NULL;
+ uint32_t sli_config_off = 0;
+
+ if (SLI4_PORT_TYPE_FC == sli4->port_type) {
+ uint32_t payload_size;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(sizeof(sli4_req_fcoe_rq_destroy_t),
+ sizeof(sli4_res_hdr_t));
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size,
+ NULL);
+ }
+ rq = (sli4_req_fcoe_rq_destroy_t *)((uint8_t *)buf + sli_config_off);
+
+ rq->hdr.opcode = SLI4_OPC_FCOE_RQ_DESTROY;
+ rq->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ rq->hdr.request_length = sizeof(sli4_req_fcoe_rq_destroy_t) -
+ sizeof(sli4_req_hdr_t);
+
+ rq->rq_id = rq_id;
+
+ return(sli_config_off + sizeof(sli4_req_fcoe_rq_destroy_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_READ_FCF_TABLE command.
+ *
+ * @note
+ * The response of this command exceeds the size of an embedded
+ * command and requires an external buffer with DMA capability to hold the results.
+ * The caller should allocate the ocs_dma_t structure / memory.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param dma Pointer to DMA memory structure. This is allocated by the caller.
+ * @param index FCF table index to retrieve.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_read_fcf_table(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *dma, uint16_t index)
+{
+ sli4_req_fcoe_read_fcf_table_t *read_fcf = NULL;
+
+ if (SLI4_PORT_TYPE_FC != sli4->port_type) {
+ ocs_log_test(sli4->os, "FCOE_READ_FCF_TABLE only supported on FC\n");
+ return -1;
+ }
+
+ read_fcf = dma->virt;
+
+ ocs_memset(read_fcf, 0, sizeof(sli4_req_fcoe_read_fcf_table_t));
+
+ read_fcf->hdr.opcode = SLI4_OPC_FCOE_READ_FCF_TABLE;
+ read_fcf->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ read_fcf->hdr.request_length = dma->size -
+ sizeof(sli4_req_fcoe_read_fcf_table_t);
+ read_fcf->fcf_index = index;
+
+ return sli_cmd_sli_config(sli4, buf, size, 0, dma);
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCOE_POST_HDR_TEMPLATES command.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the command.
+ * @param size Buffer size, in bytes.
+ * @param dma Pointer to DMA memory structure. This is allocated by the caller.
+ * @param rpi Starting RPI index for the header templates.
+ * @param payload_dma Pointer to DMA memory used to hold larger descriptor counts.
+ *
+ * @return Returns the number of bytes written.
+ */
+int32_t
+sli_cmd_fcoe_post_hdr_templates(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *dma, uint16_t rpi, ocs_dma_t *payload_dma)
+{
+ sli4_req_fcoe_post_hdr_templates_t *template = NULL;
+ uint32_t sli_config_off = 0;
+ uintptr_t phys = 0;
+ uint32_t i = 0;
+ uint32_t page_count;
+ uint32_t payload_size;
+
+ page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
+
+ payload_size = sizeof(sli4_req_fcoe_post_hdr_templates_t) +
+ page_count * sizeof(sli4_physical_page_descriptor_t);
+
+ if (page_count > 16) {
+ /* We can't fit more than 16 descriptors into an embedded mailbox
+ command, it has to be non-embedded */
+ if (ocs_dma_alloc(sli4->os, payload_dma, payload_size, 4)) {
+ ocs_log_err(sli4->os, "mailbox payload memory allocation fail\n");
+ return 0;
+ }
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, payload_dma);
+ template = (sli4_req_fcoe_post_hdr_templates_t *)payload_dma->virt;
+ } else {
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size, payload_size, NULL);
+ template = (sli4_req_fcoe_post_hdr_templates_t *)((uint8_t *)buf + sli_config_off);
+ }
+
+ if (UINT16_MAX == rpi) {
+ rpi = sli4->config.extent[SLI_RSRC_FCOE_RPI].base[0];
+ }
+
+ template->hdr.opcode = SLI4_OPC_FCOE_POST_HDR_TEMPLATES;
+ template->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ template->hdr.request_length = sizeof(sli4_req_fcoe_post_hdr_templates_t) -
+ sizeof(sli4_req_hdr_t);
+
+ template->rpi_offset = rpi;
+ template->page_count = page_count;
+ phys = dma->phys;
+ for (i = 0; i < template->page_count; i++) {
+ template->page_descriptor[i].low = ocs_addr32_lo(phys);
+ template->page_descriptor[i].high = ocs_addr32_hi(phys);
+
+ phys += SLI_PAGE_SIZE;
+ }
+
+ return(sli_config_off + payload_size);
+}
+
+int32_t
+sli_cmd_fcoe_rediscover_fcf(sli4_t *sli4, void *buf, size_t size, uint16_t index)
+{
+ sli4_req_fcoe_rediscover_fcf_t *redisc = NULL;
+ uint32_t sli_config_off = 0;
+
+ sli_config_off = sli_cmd_sli_config(sli4, buf, size,
+ sizeof(sli4_req_fcoe_rediscover_fcf_t),
+ NULL);
+
+ redisc = (sli4_req_fcoe_rediscover_fcf_t *)((uint8_t *)buf + sli_config_off);
+
+ redisc->hdr.opcode = SLI4_OPC_FCOE_REDISCOVER_FCF;
+ redisc->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ redisc->hdr.request_length = sizeof(sli4_req_fcoe_rediscover_fcf_t) -
+ sizeof(sli4_req_hdr_t);
+
+ if (index == UINT16_MAX) {
+ redisc->fcf_count = 0;
+ } else {
+ redisc->fcf_count = 1;
+ redisc->fcf_index[0] = index;
+ }
+
+ return(sli_config_off + sizeof(sli4_req_fcoe_rediscover_fcf_t));
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an ABORT_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param type Abort type, such as XRI, abort tag, and request tag.
+ * @param send_abts Boolean to cause the hardware to automatically generate an ABTS.
+ * @param ids ID of IOs to abort.
+ * @param mask Mask applied to the ID values to abort.
+ * @param tag Tag value associated with this abort.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param dnrx When set to 1, this field indicates that the SLI Port must not return the associated XRI to the SLI
+ * Port's optimized write XRI pool.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_abort_wqe(sli4_t *sli4, void *buf, size_t size, sli4_abort_type_e type, uint32_t send_abts,
+ uint32_t ids, uint32_t mask, uint16_t tag, uint16_t cq_id)
+{
+ sli4_abort_wqe_t *abort = buf;
+
+ ocs_memset(buf, 0, size);
+
+ switch (type) {
+ case SLI_ABORT_XRI:
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ if (mask) {
+ ocs_log_warn(sli4->os, "warning non-zero mask %#x when aborting XRI %#x\n", mask, ids);
+ mask = 0;
+ }
+ break;
+ case SLI_ABORT_ABORT_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
+ break;
+ case SLI_ABORT_REQUEST_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported type %#x\n", type);
+ return -1;
+ }
+
+ abort->ia = send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ir = 1;
+
+ abort->t_mask = mask;
+ abort->t_tag = ids;
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = tag;
+ abort->qosd = TRUE;
+ abort->cq_id = cq_id;
+ abort->cmd_type = SLI4_CMD_ABORT_WQE;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an ELS_REQUEST64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the ELS request.
+ * @param req_type ELS request type.
+ * @param req_len Length of ELS request in bytes.
+ * @param max_rsp_len Max length of ELS response in bytes.
+ * @param timeout Time, in seconds, before an IO times out. Zero means 2 * R_A_TOV.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rnode Destination of ELS request (that is, the remote node).
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_els_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint8_t req_type,
+ uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout,
+ uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode)
+{
+ sli4_els_request64_wqe_t *els = buf;
+ sli4_sge_t *sge = sgl->virt;
+ uint8_t is_fabric = FALSE;
+
+ ocs_memset(buf, 0, size);
+
+ if (sli4->config.sgl_pre_registered) {
+ els->xbl = FALSE;
+
+ els->dbde = TRUE;
+ els->els_request_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ els->els_request_payload.buffer_length = req_len;
+ els->els_request_payload.u.data.buffer_address_low = sge[0].buffer_address_low;
+ els->els_request_payload.u.data.buffer_address_high = sge[0].buffer_address_high;
+ } else {
+ els->xbl = TRUE;
+
+ els->els_request_payload.bde_type = SLI4_BDE_TYPE_BLP;
+
+ els->els_request_payload.buffer_length = 2 * sizeof(sli4_sge_t);
+ els->els_request_payload.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
+ els->els_request_payload.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
+ }
+
+ els->els_request_payload_length = req_len;
+ els->max_response_payload_length = max_rsp_len;
+
+ els->xri_tag = xri;
+ els->timer = timeout;
+ els->class = SLI4_ELS_REQUEST64_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_REQUEST64;
+
+ els->request_tag = tag;
+
+ if (rnode->node_group) {
+ els->hlm = TRUE;
+ els->remote_id = rnode->fc_id & 0x00ffffff;
+ }
+
+ els->iod = SLI4_ELS_REQUEST64_DIR_READ;
+
+ els->qosd = TRUE;
+
+ /* figure out the ELS_ID value from the request buffer */
+
+ switch (req_type) {
+ case FC_ELS_CMD_LOGO:
+ els->els_id = SLI4_ELS_REQUEST64_LOGO;
+ if (rnode->attached) {
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ els->context_tag = rnode->indicator;
+ } else {
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ els->context_tag = rnode->sport->indicator;
+ }
+ if (FC_ADDR_FABRIC == rnode->fc_id) {
+ is_fabric = TRUE;
+ }
+ break;
+ case FC_ELS_CMD_FDISC:
+ if (FC_ADDR_FABRIC == rnode->fc_id) {
+ is_fabric = TRUE;
+ }
+ if (0 == rnode->sport->fc_id) {
+ els->els_id = SLI4_ELS_REQUEST64_FDISC;
+ is_fabric = TRUE;
+ } else {
+ els->els_id = SLI4_ELS_REQUEST64_OTHER;
+ }
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ els->context_tag = rnode->sport->indicator;
+ els->sp = TRUE;
+ break;
+ case FC_ELS_CMD_FLOGI:
+ els->els_id = SLI4_ELS_REQUEST64_FLOGIN;
+ is_fabric = TRUE;
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
+ if (!rnode->sport->domain) {
+ ocs_log_test(sli4->os, "invalid domain handle\n");
+ return -1;
+ }
+ /*
+ * IF_TYPE 0 skips INIT_VFI/INIT_VPI and therefore must use the
+ * FCFI here
+ */
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_FCFI;
+ els->context_tag = rnode->sport->domain->fcf_indicator;
+ els->sp = TRUE;
+ } else {
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ els->context_tag = rnode->sport->indicator;
+
+ /*
+ * Set SP here ... we haven't done a REG_VPI yet
+ * TODO: need to maybe not set this when we have
+ * completed VFI/VPI registrations ...
+ *
+ * Use the FC_ID of the SPORT if it has been allocated, otherwise
+ * use an S_ID of zero.
+ */
+ els->sp = TRUE;
+ if (rnode->sport->fc_id != UINT32_MAX) {
+ els->sid = rnode->sport->fc_id;
+ }
+ }
+ break;
+ case FC_ELS_CMD_PLOGI:
+ els->els_id = SLI4_ELS_REQUEST64_PLOGI;
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ els->context_tag = rnode->sport->indicator;
+ break;
+ case FC_ELS_CMD_SCR:
+ els->els_id = SLI4_ELS_REQUEST64_OTHER;
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ els->context_tag = rnode->sport->indicator;
+ break;
+ default:
+ els->els_id = SLI4_ELS_REQUEST64_OTHER;
+ if (rnode->attached) {
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ els->context_tag = rnode->indicator;
+ } else {
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ els->context_tag = rnode->sport->indicator;
+ }
+ break;
+ }
+
+ if (is_fabric) {
+ els->cmd_type = SLI4_ELS_REQUEST64_CMD_FABRIC;
+ } else {
+ els->cmd_type = SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
+ }
+
+ els->cq_id = cq_id;
+
+ if (SLI4_ELS_REQUEST64_CONTEXT_RPI != els->ct) {
+ els->remote_id = rnode->fc_id;
+ }
+ if (SLI4_ELS_REQUEST64_CONTEXT_VPI == els->ct) {
+ els->temporary_rpi = rnode->indicator;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCP_ICMND64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the scatter gather list.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rpi remote node indicator (RPI)
+ * @param rnode Destination request (that is, the remote node).
+ * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fcp_icmnd64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl,
+ uint16_t xri, uint16_t tag, uint16_t cq_id,
+ uint32_t rpi, ocs_remote_node_t *rnode, uint8_t timeout)
+{
+ sli4_fcp_icmnd64_wqe_t *icmnd = buf;
+ sli4_sge_t *sge = NULL;
+
+ ocs_memset(buf, 0, size);
+
+ if (!sgl || !sgl->virt) {
+ ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -1;
+ }
+ sge = sgl->virt;
+
+ if (sli4->config.sgl_pre_registered) {
+ icmnd->xbl = FALSE;
+
+ icmnd->dbde = TRUE;
+ icmnd->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ icmnd->bde.buffer_length = sge[0].buffer_length;
+ icmnd->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
+ icmnd->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
+ } else {
+ icmnd->xbl = TRUE;
+
+ icmnd->bde.bde_type = SLI4_BDE_TYPE_BLP;
+
+ icmnd->bde.buffer_length = sgl->size;
+ icmnd->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
+ icmnd->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
+ }
+
+ icmnd->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
+ icmnd->xri_tag = xri;
+ icmnd->context_tag = rpi;
+ icmnd->timer = timeout;
+
+ icmnd->pu = 2; /* WQE word 4 contains read transfer length */
+ icmnd->class = SLI4_ELS_REQUEST64_CLASS_3;
+ icmnd->command = SLI4_WQE_FCP_ICMND64;
+ icmnd->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+
+ icmnd->abort_tag = xri;
+
+ icmnd->request_tag = tag;
+ icmnd->len_loc = 3;
+ if (rnode->node_group) {
+ icmnd->hlm = TRUE;
+ icmnd->remote_n_port_id = rnode->fc_id & 0x00ffffff;
+ }
+ if (((ocs_node_t *)rnode->node)->fcp2device) {
+ icmnd->erp = TRUE;
+ }
+ icmnd->cmd_type = SLI4_CMD_FCP_ICMND64_WQE;
+ icmnd->cq_id = cq_id;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCP_IREAD64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the scatter gather list.
+ * @param first_data_sge Index of first data sge (used if perf hints are enabled)
+ * @param xfer_len Data transfer length.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rpi remote node indicator (RPI)
+ * @param rnode Destination request (i.e. remote node).
+ * @param dif T10 DIF operation, or 0 to disable.
+ * @param bs T10 DIF block size, or 0 if DIF is disabled.
+ * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fcp_iread64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
+ uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
+ uint32_t rpi, ocs_remote_node_t *rnode,
+ uint8_t dif, uint8_t bs, uint8_t timeout)
+{
+ sli4_fcp_iread64_wqe_t *iread = buf;
+ sli4_sge_t *sge = NULL;
+
+ ocs_memset(buf, 0, size);
+
+ if (!sgl || !sgl->virt) {
+ ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -1;
+ }
+ sge = sgl->virt;
+
+ if (sli4->config.sgl_pre_registered) {
+ iread->xbl = FALSE;
+
+ iread->dbde = TRUE;
+ iread->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ iread->bde.buffer_length = sge[0].buffer_length;
+ iread->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
+ iread->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
+ } else {
+ iread->xbl = TRUE;
+
+ iread->bde.bde_type = SLI4_BDE_TYPE_BLP;
+
+ iread->bde.buffer_length = sgl->size;
+ iread->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
+ iread->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
+
+ /* fill out fcp_cmnd buffer len and change resp buffer to be of type
+ * "skip" (note: response will still be written to sge[1] if necessary) */
+ iread->fcp_cmd_buffer_length = sge[0].buffer_length;
+ sge[1].sge_type = SLI4_SGE_TYPE_SKIP;
+ }
+
+ iread->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
+ iread->total_transfer_length = xfer_len;
+
+ iread->xri_tag = xri;
+ iread->context_tag = rpi;
+
+ iread->timer = timeout;
+
+ iread->pu = 2; /* WQE word 4 contains read transfer length */
+ iread->class = SLI4_ELS_REQUEST64_CLASS_3;
+ iread->command = SLI4_WQE_FCP_IREAD64;
+ iread->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ iread->dif = dif;
+ iread->bs = bs;
+
+ iread->abort_tag = xri;
+
+ iread->request_tag = tag;
+ iread->len_loc = 3;
+ if (rnode->node_group) {
+ iread->hlm = TRUE;
+ iread->remote_n_port_id = rnode->fc_id & 0x00ffffff;
+ }
+ if (((ocs_node_t *)rnode->node)->fcp2device) {
+ iread->erp = TRUE;
+ }
+ iread->iod = 1;
+ iread->cmd_type = SLI4_CMD_FCP_IREAD64_WQE;
+ iread->cq_id = cq_id;
+
+ if (sli4->config.perf_hint) {
+ iread->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+ iread->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
+ iread->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
+ iread->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCP_IWRITE64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the scatter gather list.
+ * @param first_data_sge Index of first data sge (used if perf hints are enabled)
+ * @param xfer_len Data transfer length.
+ * @param first_burst The number of first burst bytes
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rpi remote node indicator (RPI)
+ * @param rnode Destination request (i.e. remote node)
+ * @param dif T10 DIF operation, or 0 to disable
+ * @param bs T10 DIF block size, or 0 if DIF is disabled
+ * @param timeout Time, in seconds, before an IO times out. Zero means no timeout.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fcp_iwrite64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
+ uint32_t xfer_len, uint32_t first_burst, uint16_t xri, uint16_t tag, uint16_t cq_id,
+ uint32_t rpi, ocs_remote_node_t *rnode,
+ uint8_t dif, uint8_t bs, uint8_t timeout)
+{
+ sli4_fcp_iwrite64_wqe_t *iwrite = buf;
+ sli4_sge_t *sge = NULL;
+
+ ocs_memset(buf, 0, size);
+
+ if (!sgl || !sgl->virt) {
+ ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -1;
+ }
+ sge = sgl->virt;
+
+ if (sli4->config.sgl_pre_registered) {
+ iwrite->xbl = FALSE;
+
+ iwrite->dbde = TRUE;
+ iwrite->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ iwrite->bde.buffer_length = sge[0].buffer_length;
+ iwrite->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
+ iwrite->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
+ } else {
+ iwrite->xbl = TRUE;
+
+ iwrite->bde.bde_type = SLI4_BDE_TYPE_BLP;
+
+ iwrite->bde.buffer_length = sgl->size;
+ iwrite->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
+ iwrite->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
+
+ /* fill out fcp_cmnd buffer len and change resp buffer to be of type
+ * "skip" (note: response will still be written to sge[1] if necessary) */
+ iwrite->fcp_cmd_buffer_length = sge[0].buffer_length;
+ sge[1].sge_type = SLI4_SGE_TYPE_SKIP;
+ }
+
+ iwrite->payload_offset_length = sge[0].buffer_length + sge[1].buffer_length;
+ iwrite->total_transfer_length = xfer_len;
+ iwrite->initial_transfer_length = MIN(xfer_len, first_burst);
+
+ iwrite->xri_tag = xri;
+ iwrite->context_tag = rpi;
+
+ iwrite->timer = timeout;
+
+ iwrite->pu = 2; /* WQE word 4 contains read transfer length */
+ iwrite->class = SLI4_ELS_REQUEST64_CLASS_3;
+ iwrite->command = SLI4_WQE_FCP_IWRITE64;
+ iwrite->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ iwrite->dif = dif;
+ iwrite->bs = bs;
+
+ iwrite->abort_tag = xri;
+
+ iwrite->request_tag = tag;
+ iwrite->len_loc = 3;
+ if (rnode->node_group) {
+ iwrite->hlm = TRUE;
+ iwrite->remote_n_port_id = rnode->fc_id & 0x00ffffff;
+ }
+ if (((ocs_node_t *)rnode->node)->fcp2device) {
+ iwrite->erp = TRUE;
+ }
+ iwrite->cmd_type = SLI4_CMD_FCP_IWRITE64_WQE;
+ iwrite->cq_id = cq_id;
+
+ if (sli4->config.perf_hint) {
+ iwrite->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+ iwrite->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
+ iwrite->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
+ iwrite->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCP_TRECEIVE64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the Scatter-Gather List.
+ * @param first_data_sge Index of first data sge (used if perf hints are enabled)
+ * @param relative_off Relative offset of the IO (if any).
+ * @param xfer_len Data transfer length.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param xid OX_ID for the exchange.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rpi remote node indicator (RPI)
+ * @param rnode Destination request (i.e. remote node).
+ * @param flags Optional attributes, including:
+ * - ACTIVE - IO is already active.
+ * - AUTO RSP - Automatically generate a good FCP_RSP.
+ * @param dif T10 DIF operation, or 0 to disable.
+ * @param bs T10 DIF block size, or 0 if DIF is disabled.
+ * @param csctl value of csctl field.
+ * @param app_id value for VM application header.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fcp_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
+ uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
+ uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags, uint8_t dif, uint8_t bs,
+ uint8_t csctl, uint32_t app_id)
+{
+ sli4_fcp_treceive64_wqe_t *trecv = buf;
+ sli4_fcp_128byte_wqe_t *trecv_128 = buf;
+ sli4_sge_t *sge = NULL;
+
+ ocs_memset(buf, 0, size);
+
+ if (!sgl || !sgl->virt) {
+ ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -1;
+ }
+ sge = sgl->virt;
+
+ if (sli4->config.sgl_pre_registered) {
+ trecv->xbl = FALSE;
+
+ trecv->dbde = TRUE;
+ trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ trecv->bde.buffer_length = sge[0].buffer_length;
+ trecv->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
+ trecv->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
+
+ trecv->payload_offset_length = sge[0].buffer_length;
+ } else {
+ trecv->xbl = TRUE;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif && (xfer_len <= sge[2].buffer_length)) {
+ trecv->dbde = TRUE;
+ trecv->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ trecv->bde.buffer_length = sge[2].buffer_length;
+ trecv->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
+ trecv->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
+ } else {
+ trecv->bde.bde_type = SLI4_BDE_TYPE_BLP;
+ trecv->bde.buffer_length = sgl->size;
+ trecv->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
+ trecv->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
+ }
+ }
+
+ trecv->relative_offset = relative_off;
+
+ if (flags & SLI4_IO_CONTINUATION) {
+ trecv->xc = TRUE;
+ }
+ trecv->xri_tag = xri;
+
+ trecv->context_tag = rpi;
+
+ trecv->pu = TRUE; /* WQE uses relative offset */
+
+ if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
+ trecv->ar = TRUE;
+ }
+
+ trecv->command = SLI4_WQE_FCP_TRECEIVE64;
+ trecv->class = SLI4_ELS_REQUEST64_CLASS_3;
+ trecv->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ trecv->dif = dif;
+ trecv->bs = bs;
+
+ trecv->remote_xid = xid;
+
+ trecv->request_tag = tag;
+
+ trecv->iod = 1;
+
+ trecv->len_loc = 0x2;
+
+ if (rnode->node_group) {
+ trecv->hlm = TRUE;
+ trecv->dword5.dword = rnode->fc_id & 0x00ffffff;
+ }
+
+ trecv->cmd_type = SLI4_CMD_FCP_TRECEIVE64_WQE;
+
+ trecv->cq_id = cq_id;
+
+ trecv->fcp_data_receive_length = xfer_len;
+
+ if (sli4->config.perf_hint) {
+ trecv->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+ trecv->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
+ trecv->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
+ trecv->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (csctl & SLI4_MASK_CCP) {
+ trecv->ccpe = 1;
+ trecv->ccp = (csctl & SLI4_MASK_CCP);
+ }
+
+ if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trecv->eat) {
+ trecv->app_id_valid = 1;
+ trecv->wqes = 1;
+ trecv_128->dw[31] = app_id;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCP_CONT_TRECEIVE64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the Scatter-Gather List.
+ * @param first_data_sge Index of first data sge (used if perf hints are enabled)
+ * @param relative_off Relative offset of the IO (if any).
+ * @param xfer_len Data transfer length.
+ * @param xri XRI for this exchange.
+ * @param sec_xri Secondary XRI for this exchange. (BZ 161832 workaround)
+ * @param tag IO tag value.
+ * @param xid OX_ID for the exchange.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rpi remote node indicator (RPI)
+ * @param rnode Destination request (i.e. remote node).
+ * @param flags Optional attributes, including:
+ * - ACTIVE - IO is already active.
+ * - AUTO RSP - Automatically generate a good FCP_RSP.
+ * @param dif T10 DIF operation, or 0 to disable.
+ * @param bs T10 DIF block size, or 0 if DIF is disabled.
+ * @param csctl value of csctl field.
+ * @param app_id value for VM application header.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fcp_cont_treceive64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
+ uint32_t relative_off, uint32_t xfer_len, uint16_t xri, uint16_t sec_xri, uint16_t tag,
+ uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode, uint32_t flags,
+ uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id)
+{
+ int32_t rc;
+
+ rc = sli_fcp_treceive64_wqe(sli4, buf, size, sgl, first_data_sge, relative_off, xfer_len, xri, tag,
+ cq_id, xid, rpi, rnode, flags, dif, bs, csctl, app_id);
+ if (rc == 0) {
+ sli4_fcp_treceive64_wqe_t *trecv = buf;
+
+ trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
+ trecv->dword5.sec_xri_tag = sec_xri;
+ }
+ return rc;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCP_TRSP64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the Scatter-Gather List.
+ * @param rsp_len Response data length.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param xid OX_ID for the exchange.
+ * @param rpi remote node indicator (RPI)
+ * @param rnode Destination request (i.e. remote node).
+ * @param flags Optional attributes, including:
+ * - ACTIVE - IO is already active
+ * - AUTO RSP - Automatically generate a good FCP_RSP.
+ * @param csctl value of csctl field.
+ * @param port_owned 0/1 to indicate if the XRI is port owned (used to set XBL=0)
+ * @param app_id value for VM application header.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fcp_trsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t rsp_len,
+ uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode,
+ uint32_t flags, uint8_t csctl, uint8_t port_owned, uint32_t app_id)
+{
+ sli4_fcp_trsp64_wqe_t *trsp = buf;
+ sli4_fcp_128byte_wqe_t *trsp_128 = buf;
+
+ ocs_memset(buf, 0, size);
+
+ if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
+ trsp->ag = TRUE;
+ /*
+ * The SLI-4 documentation states that the BDE is ignored when
+ * using auto-good response, but, at least for IF_TYPE 0 devices,
+ * this does not appear to be true.
+ */
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
+ trsp->bde.buffer_length = 12; /* byte size of RSP */
+ }
+ } else {
+ sli4_sge_t *sge = sgl->virt;
+
+ if (sli4->config.sgl_pre_registered || port_owned) {
+ trsp->dbde = TRUE;
+ } else {
+ trsp->xbl = TRUE;
+ }
+
+ trsp->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+ trsp->bde.buffer_length = sge[0].buffer_length;
+ trsp->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
+ trsp->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
+
+ trsp->fcp_response_length = rsp_len;
+ }
+
+ if (flags & SLI4_IO_CONTINUATION) {
+ trsp->xc = TRUE;
+ }
+
+ if (rnode->node_group) {
+ trsp->hlm = TRUE;
+ trsp->dword5 = rnode->fc_id & 0x00ffffff;
+ }
+
+ trsp->xri_tag = xri;
+ trsp->rpi = rpi;
+
+ trsp->command = SLI4_WQE_FCP_TRSP64;
+ trsp->class = SLI4_ELS_REQUEST64_CLASS_3;
+
+ trsp->remote_xid = xid;
+ trsp->request_tag = tag;
+ trsp->dnrx = ((flags & SLI4_IO_DNRX) == 0 ? 0 : 1);
+ trsp->len_loc = 0x1;
+ trsp->cq_id = cq_id;
+ trsp->cmd_type = SLI4_CMD_FCP_TRSP64_WQE;
+
+ /* The upper 7 bits of csctl is the priority */
+ if (csctl & SLI4_MASK_CCP) {
+ trsp->ccpe = 1;
+ trsp->ccp = (csctl & SLI4_MASK_CCP);
+ }
+
+ if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !trsp->eat) {
+ trsp->app_id_valid = 1;
+ trsp->wqes = 1;
+ trsp_128->dw[31] = app_id;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an FCP_TSEND64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the scatter gather list.
+ * @param first_data_sge Index of first data sge (used if perf hints are enabled)
+ * @param relative_off Relative offset of the IO (if any).
+ * @param xfer_len Data transfer length.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param xid OX_ID for the exchange.
+ * @param rpi remote node indicator (RPI)
+ * @param rnode Destination request (i.e. remote node).
+ * @param flags Optional attributes, including:
+ * - ACTIVE - IO is already active.
+ * - AUTO RSP - Automatically generate a good FCP_RSP.
+ * @param dif T10 DIF operation, or 0 to disable.
+ * @param bs T10 DIF block size, or 0 if DIF is disabled.
+ * @param csctl value of csctl field.
+ * @param app_id value for VM application header.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fcp_tsend64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl, uint32_t first_data_sge,
+ uint32_t relative_off, uint32_t xfer_len,
+ uint16_t xri, uint16_t tag, uint16_t cq_id, uint16_t xid, uint32_t rpi, ocs_remote_node_t *rnode,
+ uint32_t flags, uint8_t dif, uint8_t bs, uint8_t csctl, uint32_t app_id)
+{
+ sli4_fcp_tsend64_wqe_t *tsend = buf;
+ sli4_fcp_128byte_wqe_t *tsend_128 = buf;
+ sli4_sge_t *sge = NULL;
+
+ ocs_memset(buf, 0, size);
+
+ if (!sgl || !sgl->virt) {
+ ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -1;
+ }
+ sge = sgl->virt;
+
+ if (sli4->config.sgl_pre_registered) {
+ tsend->xbl = FALSE;
+
+ tsend->dbde = TRUE;
+ tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ /* TSEND64_WQE specifies first two SGE are skipped
+ * (i.e. 3rd is valid) */
+ tsend->bde.buffer_length = sge[2].buffer_length;
+ tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
+ tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
+ } else {
+ tsend->xbl = TRUE;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif && (xfer_len <= sge[2].buffer_length)) {
+ tsend->dbde = TRUE;
+ tsend->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+ /* TSEND64_WQE specifies first two SGE are skipped
+ * (i.e. 3rd is valid) */
+ tsend->bde.buffer_length = sge[2].buffer_length;
+ tsend->bde.u.data.buffer_address_low = sge[2].buffer_address_low;
+ tsend->bde.u.data.buffer_address_high = sge[2].buffer_address_high;
+ } else {
+ tsend->bde.bde_type = SLI4_BDE_TYPE_BLP;
+ tsend->bde.buffer_length = sgl->size;
+ tsend->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
+ tsend->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
+ }
+ }
+
+ tsend->relative_offset = relative_off;
+
+ if (flags & SLI4_IO_CONTINUATION) {
+ tsend->xc = TRUE;
+ }
+ tsend->xri_tag = xri;
+
+ tsend->rpi = rpi;
+
+ tsend->pu = TRUE; /* WQE uses relative offset */
+
+ if (flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
+ tsend->ar = TRUE;
+ }
+
+ tsend->command = SLI4_WQE_FCP_TSEND64;
+ tsend->class = SLI4_ELS_REQUEST64_CLASS_3;
+ tsend->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ tsend->dif = dif;
+ tsend->bs = bs;
+
+ tsend->remote_xid = xid;
+
+ tsend->request_tag = tag;
+
+ tsend->len_loc = 0x2;
+
+ if (rnode->node_group) {
+ tsend->hlm = TRUE;
+ tsend->dword5 = rnode->fc_id & 0x00ffffff;
+ }
+
+ tsend->cq_id = cq_id;
+
+ tsend->cmd_type = SLI4_CMD_FCP_TSEND64_WQE;
+
+ tsend->fcp_data_transmit_length = xfer_len;
+
+ if (sli4->config.perf_hint) {
+ tsend->first_data_bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+ tsend->first_data_bde.buffer_length = sge[first_data_sge].buffer_length;
+ tsend->first_data_bde.u.data.buffer_address_low = sge[first_data_sge].buffer_address_low;
+ tsend->first_data_bde.u.data.buffer_address_high = sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (csctl & SLI4_MASK_CCP) {
+ tsend->ccpe = 1;
+ tsend->ccp = (csctl & SLI4_MASK_CCP);
+ }
+
+ if (app_id && (sli4->config.wqe_size == SLI4_WQE_EXT_BYTES) && !tsend->eat) {
+ tsend->app_id_valid = 1;
+ tsend->wqes = 1;
+ tsend_128->dw[31] = app_id;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write a GEN_REQUEST64 work queue entry.
+ *
+ * @note This WQE is only used to send FC-CT commands.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sgl DMA memory for the request.
+ * @param req_len Length of request.
+ * @param max_rsp_len Max length of response.
+ * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rnode Destination of request (that is, the remote node).
+ * @param r_ctl R_CTL value for sequence.
+ * @param type TYPE value for sequence.
+ * @param df_ctl DF_CTL value for sequence.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_gen_request64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *sgl,
+ uint32_t req_len, uint32_t max_rsp_len, uint8_t timeout,
+ uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode,
+ uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
+{
+ sli4_gen_request64_wqe_t *gen = buf;
+ sli4_sge_t *sge = NULL;
+
+ ocs_memset(buf, 0, size);
+
+ if (!sgl || !sgl->virt) {
+ ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -1;
+ }
+ sge = sgl->virt;
+
+ if (sli4->config.sgl_pre_registered) {
+ gen->xbl = FALSE;
+
+ gen->dbde = TRUE;
+ gen->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+
+ gen->bde.buffer_length = req_len;
+ gen->bde.u.data.buffer_address_low = sge[0].buffer_address_low;
+ gen->bde.u.data.buffer_address_high = sge[0].buffer_address_high;
+ } else {
+ gen->xbl = TRUE;
+
+ gen->bde.bde_type = SLI4_BDE_TYPE_BLP;
+
+ gen->bde.buffer_length = 2 * sizeof(sli4_sge_t);
+ gen->bde.u.blp.sgl_segment_address_low = ocs_addr32_lo(sgl->phys);
+ gen->bde.u.blp.sgl_segment_address_high = ocs_addr32_hi(sgl->phys);
+ }
+
+ gen->request_payload_length = req_len;
+ gen->max_response_payload_length = max_rsp_len;
+
+ gen->df_ctl = df_ctl;
+ gen->type = type;
+ gen->r_ctl = r_ctl;
+
+ gen->xri_tag = xri;
+
+ gen->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ gen->context_tag = rnode->indicator;
+
+ gen->class = SLI4_ELS_REQUEST64_CLASS_3;
+
+ gen->command = SLI4_WQE_GEN_REQUEST64;
+
+ gen->timer = timeout;
+
+ gen->request_tag = tag;
+
+ gen->iod = SLI4_ELS_REQUEST64_DIR_READ;
+
+ gen->qosd = TRUE;
+
+ if (rnode->node_group) {
+ gen->hlm = TRUE;
+ gen->remote_n_port_id = rnode->fc_id & 0x00ffffff;
+ }
+
+ gen->cmd_type = SLI4_CMD_GEN_REQUEST64_WQE;
+
+ gen->cq_id = cq_id;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write a SEND_FRAME work queue entry
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param sof Start of frame value
+ * @param eof End of frame value
+ * @param hdr Pointer to FC header data
+ * @param payload DMA memory for the payload.
+ * @param req_len Length of payload.
+ * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
+ * @param xri XRI for this exchange.
+ * @param req_tag IO tag value.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr,
+ ocs_dma_t *payload, uint32_t req_len, uint8_t timeout,
+ uint16_t xri, uint16_t req_tag)
+{
+ sli4_send_frame_wqe_t *sf = buf;
+
+ ocs_memset(buf, 0, size);
+
+ sf->dbde = TRUE;
+ sf->bde.buffer_length = req_len;
+ sf->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
+ sf->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
+
+ /* Copy FC header */
+ sf->fc_header_0_1[0] = hdr[0];
+ sf->fc_header_0_1[1] = hdr[1];
+ sf->fc_header_2_5[0] = hdr[2];
+ sf->fc_header_2_5[1] = hdr[3];
+ sf->fc_header_2_5[2] = hdr[4];
+ sf->fc_header_2_5[3] = hdr[5];
+
+ sf->frame_length = req_len;
+
+ sf->xri_tag = xri;
+ sf->pu = 0;
+ sf->context_tag = 0;
+
+
+ sf->ct = 0;
+ sf->command = SLI4_WQE_SEND_FRAME;
+ sf->class = SLI4_ELS_REQUEST64_CLASS_3;
+ sf->timer = timeout;
+
+ sf->request_tag = req_tag;
+ sf->eof = eof;
+ sf->sof = sof;
+
+ sf->qosd = 0;
+ sf->lenloc = 1;
+ sf->xc = 0;
+
+ sf->xbl = 1;
+
+ sf->cmd_type = SLI4_CMD_SEND_FRAME_WQE;
+ sf->cq_id = 0xffff;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write a XMIT_SEQUENCE64 work queue entry.
+ *
+ * This WQE is used to send FC-CT response frames.
+ *
+ * @note This API implements a restricted use for this WQE, a TODO: would
+ * include passing in sequence initiative, and full SGL's
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param payload DMA memory for the request.
+ * @param payload_len Length of request.
+ * @param timeout Time, in seconds, before an IO times out. Zero means infinite.
+ * @param ox_id originator exchange ID
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param rnode Destination of request (that is, the remote node).
+ * @param r_ctl R_CTL value for sequence.
+ * @param type TYPE value for sequence.
+ * @param df_ctl DF_CTL value for sequence.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_xmit_sequence64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload,
+ uint32_t payload_len, uint8_t timeout, uint16_t ox_id,
+ uint16_t xri, uint16_t tag, ocs_remote_node_t *rnode,
+ uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
+{
+ sli4_xmit_sequence64_wqe_t *xmit = buf;
+
+ ocs_memset(buf, 0, size);
+
+ if ((payload == NULL) || (payload->virt == NULL)) {
+ ocs_log_err(sli4->os, "bad parameter sgl=%p virt=%p\n",
+ payload, payload ? payload->virt : NULL);
+ return -1;
+ }
+
+ if (sli4->config.sgl_pre_registered) {
+ xmit->dbde = TRUE;
+ } else {
+ xmit->xbl = TRUE;
+ }
+
+ xmit->bde.bde_type = SLI4_BDE_TYPE_BDE_64;
+ xmit->bde.buffer_length = payload_len;
+ xmit->bde.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
+ xmit->bde.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
+ xmit->sequence_payload_len = payload_len;
+
+ xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff;
+
+ xmit->relative_offset = 0;
+
+ xmit->si = 0; /* sequence initiative - this matches what is seen from
+ * FC switches in response to FCGS commands */
+ xmit->ft = 0; /* force transmit */
+ xmit->xo = 0; /* exchange responder */
+ xmit->ls = 1; /* last in seqence */
+ xmit->df_ctl = df_ctl;
+ xmit->type = type;
+ xmit->r_ctl = r_ctl;
+
+ xmit->xri_tag = xri;
+ xmit->context_tag = rnode->indicator;
+
+ xmit->dif = 0;
+ xmit->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ xmit->bs = 0;
+
+ xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
+ xmit->class = SLI4_ELS_REQUEST64_CLASS_3;
+ xmit->pu = 0;
+ xmit->timer = timeout;
+
+ xmit->abort_tag = 0;
+ xmit->request_tag = tag;
+ xmit->remote_xid = ox_id;
+
+ xmit->iod = SLI4_ELS_REQUEST64_DIR_READ;
+
+ if (rnode->node_group) {
+ xmit->hlm = TRUE;
+ xmit->remote_n_port_id = rnode->fc_id & 0x00ffffff;
+ }
+
+ xmit->cmd_type = SLI4_CMD_XMIT_SEQUENCE64_WQE;
+
+ xmit->len_loc = 2;
+
+ xmit->cq_id = 0xFFFF;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write a REQUEUE_XRI_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_requeue_xri_wqe(sli4_t *sli4, void *buf, size_t size, uint16_t xri, uint16_t tag, uint16_t cq_id)
+{
+ sli4_requeue_xri_wqe_t *requeue = buf;
+
+ ocs_memset(buf, 0, size);
+
+ requeue->command = SLI4_WQE_REQUEUE_XRI;
+ requeue->xri_tag = xri;
+ requeue->request_tag = tag;
+ requeue->xc = 1;
+ requeue->qosd = 1;
+ requeue->cq_id = cq_id;
+ requeue->cmd_type = SLI4_CMD_REQUEUE_XRI_WQE;
+ return 0;
+}
+
+int32_t
+sli_xmit_bcast64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *payload,
+ uint32_t payload_len, uint8_t timeout, uint16_t xri, uint16_t tag,
+ uint16_t cq_id, ocs_remote_node_t *rnode,
+ uint8_t r_ctl, uint8_t type, uint8_t df_ctl)
+{
+ sli4_xmit_bcast64_wqe_t *bcast = buf;
+
+ /* Command requires a temporary RPI (i.e. unused remote node) */
+ if (rnode->attached) {
+ ocs_log_test(sli4->os, "remote node %d in use\n", rnode->indicator);
+ return -1;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ bcast->dbde = TRUE;
+ bcast->sequence_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
+ bcast->sequence_payload.buffer_length = payload_len;
+ bcast->sequence_payload.u.data.buffer_address_low = ocs_addr32_lo(payload->phys);
+ bcast->sequence_payload.u.data.buffer_address_high = ocs_addr32_hi(payload->phys);
+
+ bcast->sequence_payload_length = payload_len;
+
+ bcast->df_ctl = df_ctl;
+ bcast->type = type;
+ bcast->r_ctl = r_ctl;
+
+ bcast->xri_tag = xri;
+
+ bcast->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ bcast->context_tag = rnode->sport->indicator;
+
+ bcast->class = SLI4_ELS_REQUEST64_CLASS_3;
+
+ bcast->command = SLI4_WQE_XMIT_BCAST64;
+
+ bcast->timer = timeout;
+
+ bcast->request_tag = tag;
+
+ bcast->temporary_rpi = rnode->indicator;
+
+ bcast->len_loc = 0x1;
+
+ bcast->iod = SLI4_ELS_REQUEST64_DIR_WRITE;
+
+ bcast->cmd_type = SLI4_CMD_XMIT_BCAST64_WQE;
+
+ bcast->cq_id = cq_id;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write an XMIT_BLS_RSP64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param payload Contents of the BLS payload to be sent.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param rnode Destination of request (that is, the remote node).
+ * @param s_id Source ID to use in the response. If UINT32_MAX, use SLI Port's ID.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_xmit_bls_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, sli_bls_payload_t *payload,
+ uint16_t xri, uint16_t tag, uint16_t cq_id, ocs_remote_node_t *rnode, uint32_t s_id)
+{
+ sli4_xmit_bls_rsp_wqe_t *bls = buf;
+
+ /*
+ * Callers can either specify RPI or S_ID, but not both
+ */
+ if (rnode->attached && (s_id != UINT32_MAX)) {
+ ocs_log_test(sli4->os, "S_ID specified for attached remote node %d\n",
+ rnode->indicator);
+ return -1;
+ }
+
+ ocs_memset(buf, 0, size);
+
+ if (SLI_BLS_ACC == payload->type) {
+ bls->payload_word0 = (payload->u.acc.seq_id_last << 16) |
+ (payload->u.acc.seq_id_validity << 24);
+ bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
+ bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
+ } else if (SLI_BLS_RJT == payload->type) {
+ bls->payload_word0 = *((uint32_t *)&payload->u.rjt);
+ bls->ar = TRUE;
+ } else {
+ ocs_log_test(sli4->os, "bad BLS type %#x\n",
+ payload->type);
+ return -1;
+ }
+
+ bls->ox_id = payload->ox_id;
+ bls->rx_id = payload->rx_id;
+
+ if (rnode->attached) {
+ bls->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ bls->context_tag = rnode->indicator;
+ } else {
+ bls->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ bls->context_tag = rnode->sport->indicator;
+
+ if (UINT32_MAX != s_id) {
+ bls->local_n_port_id = s_id & 0x00ffffff;
+ } else {
+ bls->local_n_port_id = rnode->sport->fc_id & 0x00ffffff;
+ }
+ bls->remote_id = rnode->fc_id & 0x00ffffff;
+
+ bls->temporary_rpi = rnode->indicator;
+ }
+
+ bls->xri_tag = xri;
+
+ bls->class = SLI4_ELS_REQUEST64_CLASS_3;
+
+ bls->command = SLI4_WQE_XMIT_BLS_RSP;
+
+ bls->request_tag = tag;
+
+ bls->qosd = TRUE;
+
+ if (rnode->node_group) {
+ bls->hlm = TRUE;
+ bls->remote_id = rnode->fc_id & 0x00ffffff;
+ }
+
+ bls->cq_id = cq_id;
+
+ bls->cmd_type = SLI4_CMD_XMIT_BLS_RSP64_WQE;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Write a XMIT_ELS_RSP64_WQE work queue entry.
+ *
+ * @param sli4 SLI context.
+ * @param buf Destination buffer for the WQE.
+ * @param size Buffer size, in bytes.
+ * @param rsp DMA memory for the ELS response.
+ * @param rsp_len Length of ELS response, in bytes.
+ * @param xri XRI for this exchange.
+ * @param tag IO tag value.
+ * @param cq_id The id of the completion queue where the WQE response is sent.
+ * @param ox_id OX_ID of the exchange containing the request.
+ * @param rnode Destination of the ELS response (that is, the remote node).
+ * @param flags Optional attributes, including:
+ * - SLI4_IO_CONTINUATION - IO is already active.
+ * @param s_id S_ID used for special responses.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_xmit_els_rsp64_wqe(sli4_t *sli4, void *buf, size_t size, ocs_dma_t *rsp,
+ uint32_t rsp_len, uint16_t xri, uint16_t tag, uint16_t cq_id,
+ uint16_t ox_id, ocs_remote_node_t *rnode, uint32_t flags, uint32_t s_id)
+{
+ sli4_xmit_els_rsp64_wqe_t *els = buf;
+
+ ocs_memset(buf, 0, size);
+
+ if (sli4->config.sgl_pre_registered) {
+ els->dbde = TRUE;
+ } else {
+ els->xbl = TRUE;
+ }
+
+ els->els_response_payload.bde_type = SLI4_BDE_TYPE_BDE_64;
+ els->els_response_payload.buffer_length = rsp_len;
+ els->els_response_payload.u.data.buffer_address_low = ocs_addr32_lo(rsp->phys);
+ els->els_response_payload.u.data.buffer_address_high = ocs_addr32_hi(rsp->phys);
+
+ els->els_response_payload_length = rsp_len;
+
+ els->xri_tag = xri;
+
+ els->class = SLI4_ELS_REQUEST64_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_RSP64;
+
+ els->request_tag = tag;
+
+ els->ox_id = ox_id;
+
+ els->iod = SLI4_ELS_REQUEST64_DIR_WRITE;
+
+ els->qosd = TRUE;
+
+ if (flags & SLI4_IO_CONTINUATION) {
+ els->xc = TRUE;
+ }
+
+ if (rnode->attached) {
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_RPI;
+ els->context_tag = rnode->indicator;
+ } else {
+ els->ct = SLI4_ELS_REQUEST64_CONTEXT_VPI;
+ els->context_tag = rnode->sport->indicator;
+ els->remote_id = rnode->fc_id & 0x00ffffff;
+ els->temporary_rpi = rnode->indicator;
+ if (UINT32_MAX != s_id) {
+ els->sp = TRUE;
+ els->s_id = s_id & 0x00ffffff;
+ }
+ }
+
+ if (rnode->node_group) {
+ els->hlm = TRUE;
+ els->remote_id = rnode->fc_id & 0x00ffffff;
+ }
+
+ els->cmd_type = SLI4_ELS_REQUEST64_CMD_GEN;
+
+ els->cq_id = cq_id;
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Process an asynchronous Link State event entry.
+ *
+ * @par Description
+ * Parses Asynchronous Completion Queue Entry (ACQE),
+ * creates an abstracted event, and calls registered callback functions.
+ *
+ * @param sli4 SLI context.
+ * @param acqe Pointer to the ACQE.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fc_process_link_state(sli4_t *sli4, void *acqe)
+{
+ sli4_link_state_t *link_state = acqe;
+ sli4_link_event_t event = { 0 };
+ int32_t rc = 0;
+
+ if (!sli4->link) {
+ /* bail if there is no callback */
+ return 0;
+ }
+
+ if (SLI4_LINK_TYPE_ETHERNET == link_state->link_type) {
+ event.topology = SLI_LINK_TOPO_NPORT;
+ event.medium = SLI_LINK_MEDIUM_ETHERNET;
+ } else {
+ /* TODO is this supported for anything other than FCoE? */
+ ocs_log_test(sli4->os, "unsupported link type %#x\n",
+ link_state->link_type);
+ event.topology = SLI_LINK_TOPO_MAX;
+ event.medium = SLI_LINK_MEDIUM_MAX;
+ rc = -1;
+ }
+
+ switch (link_state->port_link_status) {
+ case SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN:
+ case SLI4_PORT_LINK_STATUS_LOGICAL_DOWN:
+ event.status = SLI_LINK_STATUS_DOWN;
+ break;
+ case SLI4_PORT_LINK_STATUS_PHYSICAL_UP:
+ case SLI4_PORT_LINK_STATUS_LOGICAL_UP:
+ event.status = SLI_LINK_STATUS_UP;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported link status %#x\n",
+ link_state->port_link_status);
+ event.status = SLI_LINK_STATUS_MAX;
+ rc = -1;
+ }
+
+ switch (link_state->port_speed) {
+ case 0:
+ event.speed = 0;
+ break;
+ case 1:
+ event.speed = 10;
+ break;
+ case 2:
+ event.speed = 100;
+ break;
+ case 3:
+ event.speed = 1000;
+ break;
+ case 4:
+ event.speed = 10000;
+ break;
+ case 5:
+ event.speed = 20000;
+ break;
+ case 6:
+ event.speed = 25000;
+ break;
+ case 7:
+ event.speed = 40000;
+ break;
+ case 8:
+ event.speed = 100000;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported port_speed %#x\n",
+ link_state->port_speed);
+ rc = -1;
+ }
+
+ sli4->link(sli4->link_arg, (void *)&event);
+
+ return rc;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Process an asynchronous Link Attention event entry.
+ *
+ * @par Description
+ * Parses Asynchronous Completion Queue Entry (ACQE),
+ * creates an abstracted event, and calls the registered callback functions.
+ *
+ * @param sli4 SLI context.
+ * @param acqe Pointer to the ACQE.
+ *
+ * @todo XXX all events return LINK_UP.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fc_process_link_attention(sli4_t *sli4, void *acqe)
+{
+ sli4_link_attention_t *link_attn = acqe;
+ sli4_link_event_t event = { 0 };
+
+ ocs_log_debug(sli4->os, "link_number=%d attn_type=%#x topology=%#x port_speed=%#x "
+ "port_fault=%#x shared_link_status=%#x logical_link_speed=%#x "
+ "event_tag=%#x\n", link_attn->link_number, link_attn->attn_type,
+ link_attn->topology, link_attn->port_speed, link_attn->port_fault,
+ link_attn->shared_link_status, link_attn->logical_link_speed,
+ link_attn->event_tag);
+
+ if (!sli4->link) {
+ return 0;
+ }
+
+ event.medium = SLI_LINK_MEDIUM_FC;
+
+ switch (link_attn->attn_type) {
+ case SLI4_LINK_ATTN_TYPE_LINK_UP:
+ event.status = SLI_LINK_STATUS_UP;
+ break;
+ case SLI4_LINK_ATTN_TYPE_LINK_DOWN:
+ event.status = SLI_LINK_STATUS_DOWN;
+ break;
+ case SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA:
+ ocs_log_debug(sli4->os, "attn_type: no hard alpa\n");
+ event.status = SLI_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ ocs_log_test(sli4->os, "attn_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->event_type) {
+ case SLI4_FC_EVENT_LINK_ATTENTION:
+ break;
+ case SLI4_FC_EVENT_SHARED_LINK_ATTENTION:
+ ocs_log_debug(sli4->os, "event_type: FC shared link event \n");
+ break;
+ default:
+ ocs_log_test(sli4->os, "event_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->topology) {
+ case SLI4_LINK_ATTN_P2P:
+ event.topology = SLI_LINK_TOPO_NPORT;
+ break;
+ case SLI4_LINK_ATTN_FC_AL:
+ event.topology = SLI_LINK_TOPO_LOOP;
+ break;
+ case SLI4_LINK_ATTN_INTERNAL_LOOPBACK:
+ ocs_log_debug(sli4->os, "topology Internal loopback\n");
+ event.topology = SLI_LINK_TOPO_LOOPBACK_INTERNAL;
+ break;
+ case SLI4_LINK_ATTN_SERDES_LOOPBACK:
+ ocs_log_debug(sli4->os, "topology serdes loopback\n");
+ event.topology = SLI_LINK_TOPO_LOOPBACK_EXTERNAL;
+ break;
+ default:
+ ocs_log_test(sli4->os, "topology: unknown\n");
+ break;
+ }
+
+ event.speed = link_attn->port_speed * 1000;
+
+ sli4->link(sli4->link_arg, (void *)&event);
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Parse an FC/FCoE work queue CQ entry.
+ *
+ * @param sli4 SLI context.
+ * @param cq CQ to process.
+ * @param cqe Pointer to the CQ entry.
+ * @param etype CQ event type.
+ * @param r_id Resource ID associated with this completion message (such as the IO tag).
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fc_cqe_parse(sli4_t *sli4, sli4_queue_t *cq, uint8_t *cqe, sli4_qentry_e *etype,
+ uint16_t *r_id)
+{
+ uint8_t code = cqe[SLI4_CQE_CODE_OFFSET];
+ int32_t rc = -1;
+
+ switch (code) {
+ case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
+ {
+ sli4_fc_wcqe_t *wcqe = (void *)cqe;
+
+ *etype = SLI_QENTRY_WQ;
+ *r_id = wcqe->request_tag;
+ rc = wcqe->status;
+
+ /* Flag errors except for FCP_RSP_FAILURE */
+ if (rc && (rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE)) {
+
+ ocs_log_test(sli4->os, "WCQE: status=%#x hw_status=%#x tag=%#x w1=%#x w2=%#x xb=%d\n",
+ wcqe->status, wcqe->hw_status,
+ wcqe->request_tag, wcqe->wqe_specific_1,
+ wcqe->wqe_specific_2, wcqe->xb);
+ ocs_log_test(sli4->os, " %08X %08X %08X %08X\n", ((uint32_t*) cqe)[0], ((uint32_t*) cqe)[1],
+ ((uint32_t*) cqe)[2], ((uint32_t*) cqe)[3]);
+ }
+
+ /* TODO: need to pass additional status back out of here as well
+ * as status (could overload rc as status/addlstatus are only 8 bits each)
+ */
+
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC:
+ {
+ sli4_fc_async_rcqe_t *rcqe = (void *)cqe;
+
+ *etype = SLI_QENTRY_RQ;
+ *r_id = rcqe->rq_id;
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1:
+ {
+ sli4_fc_async_rcqe_v1_t *rcqe = (void *)cqe;
+
+ *etype = SLI_QENTRY_RQ;
+ *r_id = rcqe->rq_id;
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
+ {
+ sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe;
+
+ *etype = SLI_QENTRY_OPT_WRITE_CMD;
+ *r_id = optcqe->rq_id;
+ rc = optcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
+ {
+ sli4_fc_optimized_write_data_cqe_t *dcqe = (void *)cqe;
+
+ *etype = SLI_QENTRY_OPT_WRITE_DATA;
+ *r_id = dcqe->xri;
+ rc = dcqe->status;
+
+ /* Flag errors */
+ if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
+ ocs_log_test(sli4->os, "Optimized DATA CQE: status=%#x hw_status=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
+ dcqe->status, dcqe->hw_status,
+ dcqe->xri, dcqe->total_data_placed,
+ ((uint32_t*) cqe)[3], dcqe->xb);
+ }
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_COALESCING:
+ {
+ sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe;
+
+ *etype = SLI_QENTRY_RQ;
+ *r_id = rcqe->rq_id;
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_XRI_ABORTED:
+ {
+ sli4_fc_xri_aborted_cqe_t *xa = (void *)cqe;
+
+ *etype = SLI_QENTRY_XABT;
+ *r_id = xa->xri;
+ rc = 0;
+ break;
+ }
+ case SLI4_CQE_CODE_RELEASE_WQE: {
+ sli4_fc_wqec_t *wqec = (void*) cqe;
+
+ *etype = SLI_QENTRY_WQ_RELEASE;
+ *r_id = wqec->wq_id;
+ rc = 0;
+ break;
+ }
+ default:
+ ocs_log_test(sli4->os, "CQE completion code %d not handled\n", code);
+ *etype = SLI_QENTRY_MAX;
+ *r_id = UINT16_MAX;
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Return the ELS/CT response length.
+ *
+ * @param sli4 SLI context.
+ * @param cqe Pointer to the CQ entry.
+ *
+ * @return Returns the length, in bytes.
+ */
+uint32_t
+sli_fc_response_length(sli4_t *sli4, uint8_t *cqe)
+{
+ sli4_fc_wcqe_t *wcqe = (void *)cqe;
+
+ return wcqe->wqe_specific_1;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Return the FCP IO length.
+ *
+ * @param sli4 SLI context.
+ * @param cqe Pointer to the CQ entry.
+ *
+ * @return Returns the length, in bytes.
+ */
+uint32_t
+sli_fc_io_length(sli4_t *sli4, uint8_t *cqe)
+{
+ sli4_fc_wcqe_t *wcqe = (void *)cqe;
+
+ return wcqe->wqe_specific_1;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Retrieve the D_ID from the completion.
+ *
+ * @param sli4 SLI context.
+ * @param cqe Pointer to the CQ entry.
+ * @param d_id Pointer where the D_ID is written.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fc_els_did(sli4_t *sli4, uint8_t *cqe, uint32_t *d_id)
+{
+ sli4_fc_wcqe_t *wcqe = (void *)cqe;
+
+ *d_id = 0;
+
+ if (wcqe->status) {
+ return -1;
+ } else {
+ *d_id = wcqe->wqe_specific_2 & 0x00ffffff;
+ return 0;
+ }
+}
+
+uint32_t
+sli_fc_ext_status(sli4_t *sli4, uint8_t *cqe)
+{
+ sli4_fc_wcqe_t *wcqe = (void *)cqe;
+ uint32_t mask;
+
+ switch (wcqe->status) {
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ mask = UINT32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ case SLI4_FC_WCQE_STATUS_CMD_REJECT:
+ mask = 0xff;
+ break;
+ case SLI4_FC_WCQE_STATUS_NPORT_RJT:
+ case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
+ case SLI4_FC_WCQE_STATUS_NPORT_BSY:
+ case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ mask = UINT32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ mask = UINT32_MAX;
+ break;
+ default:
+ mask = 0;
+ }
+
+ return wcqe->wqe_specific_2 & mask;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Retrieve the RQ index from the completion.
+ *
+ * @param sli4 SLI context.
+ * @param cqe Pointer to the CQ entry.
+ * @param rq_id Pointer where the rq_id is written.
+ * @param index Pointer where the index is written.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fc_rqe_rqid_and_index(sli4_t *sli4, uint8_t *cqe, uint16_t *rq_id, uint32_t *index)
+{
+ sli4_fc_async_rcqe_t *rcqe = (void *)cqe;
+ sli4_fc_async_rcqe_v1_t *rcqe_v1 = (void *)cqe;
+ int32_t rc = -1;
+ uint8_t code = 0;
+
+ *rq_id = 0;
+ *index = UINT32_MAX;
+
+ code = cqe[SLI4_CQE_CODE_OFFSET];
+
+ if (code == SLI4_CQE_CODE_RQ_ASYNC) {
+ *rq_id = rcqe->rq_id;
+ if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) {
+ *index = rcqe->rq_element_index;
+ rc = 0;
+ } else {
+ *index = rcqe->rq_element_index;
+ rc = rcqe->status;
+ ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id,
+ rcqe->rq_element_index, rcqe->payload_data_placement_length, rcqe->sof_byte,
+ rcqe->eof_byte, rcqe->header_data_placement_length);
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
+ *rq_id = rcqe_v1->rq_id;
+ if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe_v1->status) {
+ *index = rcqe_v1->rq_element_index;
+ rc = 0;
+ } else {
+ *index = rcqe_v1->rq_element_index;
+ rc = rcqe_v1->status;
+ ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ rcqe_v1->status, sli_fc_get_status_string(rcqe_v1->status),
+ rcqe_v1->rq_id, rcqe_v1->rq_element_index,
+ rcqe_v1->payload_data_placement_length, rcqe_v1->sof_byte,
+ rcqe_v1->eof_byte, rcqe_v1->header_data_placement_length);
+ }
+ } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
+ sli4_fc_optimized_write_cmd_cqe_t *optcqe = (void *)cqe;
+
+ *rq_id = optcqe->rq_id;
+ if (SLI4_FC_ASYNC_RQ_SUCCESS == optcqe->status) {
+ *index = optcqe->rq_element_index;
+ rc = 0;
+ } else {
+ *index = optcqe->rq_element_index;
+ rc = optcqe->status;
+ ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x pdpl=%x hdpl=%x oox=%d agxr=%d xri=0x%x rpi=0x%x\n",
+ optcqe->status, sli_fc_get_status_string(optcqe->status), optcqe->rq_id,
+ optcqe->rq_element_index, optcqe->payload_data_placement_length,
+ optcqe->header_data_placement_length, optcqe->oox, optcqe->agxr, optcqe->xri,
+ optcqe->rpi);
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
+ sli4_fc_coalescing_rcqe_t *rcqe = (void *)cqe;
+
+ *rq_id = rcqe->rq_id;
+ if (SLI4_FC_COALESCE_RQ_SUCCESS == rcqe->status) {
+ *index = rcqe->rq_element_index;
+ rc = 0;
+ } else {
+ *index = UINT32_MAX;
+ rc = rcqe->status;
+
+ ocs_log_test(sli4->os, "status=%02x (%s) rq_id=%d, index=%x rq_id=%#x sdpl=%x\n",
+ rcqe->status, sli_fc_get_status_string(rcqe->status), rcqe->rq_id,
+ rcqe->rq_element_index, rcqe->rq_id, rcqe->sequence_reporting_placement_length);
+ }
+ } else {
+ *index = UINT32_MAX;
+
+ rc = rcqe->status;
+
+ ocs_log_debug(sli4->os, "status=%02x rq_id=%d, index=%x pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ rcqe->status, rcqe->rq_id, rcqe->rq_element_index, rcqe->payload_data_placement_length,
+ rcqe->sof_byte, rcqe->eof_byte, rcqe->header_data_placement_length);
+ }
+
+ return rc;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Process an asynchronous FCoE event entry.
+ *
+ * @par Description
+ * Parses Asynchronous Completion Queue Entry (ACQE),
+ * creates an abstracted event, and calls the registered callback functions.
+ *
+ * @param sli4 SLI context.
+ * @param acqe Pointer to the ACQE.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+int32_t
+sli_fc_process_fcoe(sli4_t *sli4, void *acqe)
+{
+ sli4_fcoe_fip_t *fcoe = acqe;
+ sli4_fip_event_t event = { 0 };
+ uint32_t mask = UINT32_MAX;
+
+ ocs_log_debug(sli4->os, "ACQE FCoE FIP type=%02x count=%d tag=%#x\n",
+ fcoe->event_type,
+ fcoe->fcf_count,
+ fcoe->event_tag);
+
+ if (!sli4->fip) {
+ return 0;
+ }
+
+ event.type = fcoe->event_type;
+ event.index = UINT32_MAX;
+
+ switch (fcoe->event_type) {
+ case SLI4_FCOE_FIP_FCF_DISCOVERED:
+ ocs_log_debug(sli4->os, "FCF Discovered index=%d\n", fcoe->event_information);
+ break;
+ case SLI4_FCOE_FIP_FCF_TABLE_FULL:
+ ocs_log_debug(sli4->os, "FCF Table Full\n");
+ mask = 0;
+ break;
+ case SLI4_FCOE_FIP_FCF_DEAD:
+ ocs_log_debug(sli4->os, "FCF Dead/Gone index=%d\n", fcoe->event_information);
+ break;
+ case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
+ mask = UINT16_MAX;
+ ocs_log_debug(sli4->os, "Clear VLINK Received VPI=%#x\n", fcoe->event_information & mask);
+ break;
+ case SLI4_FCOE_FIP_FCF_MODIFIED:
+ ocs_log_debug(sli4->os, "FCF Modified\n");
+ break;
+ default:
+ ocs_log_test(sli4->os, "bad FCoE type %#x", fcoe->event_type);
+ mask = 0;
+ }
+
+ if (mask != 0) {
+ event.index = fcoe->event_information & mask;
+ }
+
+ sli4->fip(sli4->fip_arg, &event);
+
+ return 0;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Allocate a receive queue.
+ *
+ * @par Description
+ * Allocates DMA memory and configures the requested queue type.
+ *
+ * @param sli4 SLI context.
+ * @param q Pointer to the queue object for the header.
+ * @param n_entries Number of entries to allocate.
+ * @param buffer_size buffer size for the queue.
+ * @param cq Associated CQ.
+ * @param ulp The ULP to bind
+ * @param is_hdr Used to validate the rq_id and set the type of queue
+ *
+ * @return Returns 0 on success, or -1 on failure.
+ */
+int32_t
+sli_fc_rq_alloc(sli4_t *sli4, sli4_queue_t *q,
+ uint32_t n_entries, uint32_t buffer_size,
+ sli4_queue_t *cq, uint16_t ulp, uint8_t is_hdr)
+{
+ int32_t (*rq_create)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t);
+
+ if ((sli4 == NULL) || (q == NULL)) {
+ void *os = sli4 != NULL ? sli4->os : NULL;
+
+ ocs_log_err(os, "bad parameter sli4=%p q=%p\n", sli4, q);
+ return -1;
+ }
+
+ if (__sli_queue_init(sli4, q, SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE,
+ n_entries, SLI_PAGE_SIZE)) {
+ return -1;
+ }
+
+ if (sli4->if_type == SLI4_IF_TYPE_BE3_SKH_PF) {
+ rq_create = sli_cmd_fcoe_rq_create;
+ } else {
+ rq_create = sli_cmd_fcoe_rq_create_v1;
+ }
+
+ if (rq_create(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE, &q->dma,
+ cq->id, ulp, buffer_size)) {
+ if (__sli_create_queue(sli4, q)) {
+ ocs_dma_free(sli4->os, &q->dma);
+ return -1;
+ }
+ if (is_hdr && q->id & 1) {
+ ocs_log_test(sli4->os, "bad header RQ_ID %d\n", q->id);
+ ocs_dma_free(sli4->os, &q->dma);
+ return -1;
+ } else if (!is_hdr && (q->id & 1) == 0) {
+ ocs_log_test(sli4->os, "bad data RQ_ID %d\n", q->id);
+ ocs_dma_free(sli4->os, &q->dma);
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ q->u.flag.is_hdr = is_hdr;
+ if (SLI4_IF_TYPE_BE3_SKH_PF == sli4->if_type) {
+ q->u.flag.rq_batch = TRUE;
+ }
+ return 0;
+}
+
+
+/**
+ * @ingroup sli_fc
+ * @brief Allocate a receive queue set.
+ *
+ * @param sli4 SLI context.
+ * @param num_rq_pairs to create
+ * @param qs Pointers to the queue objects for both header and data.
+ * Length of this arrays should be 2 * num_rq_pairs
+ * @param base_cq_id. Assumes base_cq_id : (base_cq_id + num_rq_pairs) cqs as allotted.
+ * @param n_entries number of entries in each RQ queue.
+ * @param header_buffer_size
+ * @param payload_buffer_size
+ * @param ulp The ULP to bind
+ *
+ * @return Returns 0 on success, or -1 on failure.
+ */
+int32_t
+sli_fc_rq_set_alloc(sli4_t *sli4, uint32_t num_rq_pairs,
+ sli4_queue_t *qs[], uint32_t base_cq_id,
+ uint32_t n_entries, uint32_t header_buffer_size,
+ uint32_t payload_buffer_size, uint16_t ulp)
+{
+ uint32_t i, p, offset = 0;
+ uint32_t payload_size, total_page_count = 0;
+ uintptr_t addr;
+ ocs_dma_t dma;
+ sli4_res_common_create_queue_set_t *rsp = NULL;
+ sli4_req_fcoe_rq_create_v2_t *req = NULL;
+
+ for (i = 0; i < (num_rq_pairs * 2); i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI_QTYPE_RQ, SLI4_FCOE_RQE_SIZE,
+ n_entries, SLI_PAGE_SIZE)) {
+ goto error;
+ }
+ }
+
+ total_page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rq_pairs * 2;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max((sizeof(sli4_req_fcoe_rq_create_v1_t) + (8 * total_page_count)),
+ sizeof(sli4_res_common_create_queue_set_t));
+
+ if (ocs_dma_alloc(sli4->os, &dma, payload_size, SLI_PAGE_SIZE)) {
+ ocs_log_err(sli4->os, "DMA allocation failed\n");
+ goto error;
+ }
+ ocs_memset(dma.virt, 0, payload_size);
+
+ if (sli_cmd_sli_config(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
+ payload_size, &dma) == -1) {
+ goto error;
+ }
+ req = (sli4_req_fcoe_rq_create_v2_t *)((uint8_t *)dma.virt);
+
+ /* Fill Header fields */
+ req->hdr.opcode = SLI4_OPC_FCOE_RQ_CREATE;
+ req->hdr.subsystem = SLI4_SUBSYSTEM_FCFCOE;
+ req->hdr.version = 2;
+ req->hdr.request_length = sizeof(sli4_req_fcoe_rq_create_v2_t) - sizeof(sli4_req_hdr_t)
+ + (8 * total_page_count);
+
+ /* Fill Payload fields */
+ req->dnb = TRUE;
+ req->num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
+ req->rqe_count = qs[0]->dma.size / SLI4_FCOE_RQE_SIZE;
+ req->rqe_size = SLI4_FCOE_RQE_SIZE_8;
+ req->page_size = SLI4_FCOE_RQ_PAGE_SIZE_4096;
+ req->rq_count = num_rq_pairs * 2;
+ req->base_cq_id = base_cq_id;
+ req->hdr_buffer_size = header_buffer_size;
+ req->payload_buffer_size = payload_buffer_size;
+
+ for (i = 0; i < (num_rq_pairs * 2); i++) {
+ for (p = 0, addr = qs[i]->dma.phys; p < req->num_pages; p++, addr += SLI_PAGE_SIZE) {
+ req->page_physical_address[offset].low = ocs_addr32_lo(addr);
+ req->page_physical_address[offset].high = ocs_addr32_hi(addr);
+ offset++;
+ }
+ }
+
+ if (sli_bmbx_command(sli4)){
+ ocs_log_crit(sli4->os, "bootstrap mailbox write faild RQSet\n");
+ goto error;
+ }
+
+
+ rsp = (void *)((uint8_t *)dma.virt);
+ if (rsp->hdr.status) {
+ ocs_log_err(sli4->os, "bad create RQSet status=%#x addl=%#x\n",
+ rsp->hdr.status, rsp->hdr.additional_status);
+ goto error;
+ } else {
+ for (i = 0; i < (num_rq_pairs * 2); i++) {
+ qs[i]->id = i + rsp->q_id;
+ if ((qs[i]->id & 1) == 0) {
+ qs[i]->u.flag.is_hdr = TRUE;
+ } else {
+ qs[i]->u.flag.is_hdr = FALSE;
+ }
+ qs[i]->doorbell_offset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].off;
+ qs[i]->doorbell_rset = regmap[SLI4_REG_FCOE_RQ_DOORBELL][sli4->if_type].rset;
+ }
+ }
+
+ ocs_dma_free(sli4->os, &dma);
+
+ return 0;
+
+error:
+ for (i = 0; i < (num_rq_pairs * 2); i++) {
+ if (qs[i]->dma.size) {
+ ocs_dma_free(sli4->os, &qs[i]->dma);
+ }
+ }
+
+ if (dma.size) {
+ ocs_dma_free(sli4->os, &dma);
+ }
+
+ return -1;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Get the RPI resource requirements.
+ *
+ * @param sli4 SLI context.
+ * @param n_rpi Number of RPIs desired.
+ *
+ * @return Returns the number of bytes needed. This value may be zero.
+ */
+uint32_t
+sli_fc_get_rpi_requirements(sli4_t *sli4, uint32_t n_rpi)
+{
+ uint32_t bytes = 0;
+
+ /* Check if header templates needed */
+ if (sli4->config.hdr_template_req) {
+ /* round up to a page */
+ bytes = SLI_ROUND_PAGE(n_rpi * SLI4_FCOE_HDR_TEMPLATE_SIZE);
+ }
+
+ return bytes;
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Return a text string corresponding to a CQE status value
+ *
+ * @param status Status value
+ *
+ * @return Returns corresponding string, otherwise "unknown"
+ */
+const char *
+sli_fc_get_status_string(uint32_t status)
+{
+ static struct {
+ uint32_t code;
+ const char *label;
+ } lookup[] = {
+ {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"},
+ {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"},
+ {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"},
+ {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"},
+ {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"},
+ {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, "RQ_INSUFF_BUF_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
+ {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"},
+ {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"},
+ {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, "RQ_INSUFF_XRI_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
+ {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"},
+ {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"},
+ };
+ uint32_t i;
+
+ for (i = 0; i < ARRAY_SIZE(lookup); i++) {
+ if (status == lookup[i].code) {
+ return lookup[i].label;
+ }
+ }
+ return "unknown";
+}
diff --git a/sys/dev/ocs_fc/sli4.h b/sys/dev/ocs_fc/sli4.h
new file mode 100644
index 000000000000..04d50353dd51
--- /dev/null
+++ b/sys/dev/ocs_fc/sli4.h
@@ -0,0 +1,5609 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/**
+ * @file
+ * Define common SLI-4 structures and function prototypes.
+ */
+
+#ifndef _SLI4_H
+#define _SLI4_H
+
+#include "ocs_os.h"
+
+#define SLI_PAGE_SIZE (4096)
+#define SLI_SUB_PAGE_MASK (SLI_PAGE_SIZE - 1)
+#define SLI_PAGE_SHIFT 12
+#define SLI_ROUND_PAGE(b) (((b) + SLI_SUB_PAGE_MASK) & ~SLI_SUB_PAGE_MASK)
+
+#define SLI4_BMBX_TIMEOUT_MSEC 30000
+#define SLI4_FW_READY_TIMEOUT_MSEC 30000
+
+static inline uint32_t
+sli_page_count(size_t bytes, uint32_t page_size)
+{
+ uint32_t mask = page_size - 1;
+ uint32_t shift = 0;
+
+ switch (page_size) {
+ case 4096:
+ shift = 12;
+ break;
+ case 8192:
+ shift = 13;
+ break;
+ case 16384:
+ shift = 14;
+ break;
+ case 32768:
+ shift = 15;
+ break;
+ case 65536:
+ shift = 16;
+ break;
+ default:
+ return 0;
+ }
+
+ return (bytes + mask) >> shift;
+}
+
+/*************************************************************************
+ * Common PCI configuration space register definitions
+ */
+
+#define SLI4_PCI_CLASS_REVISION 0x0008 /** register offset */
+#define SLI4_PCI_REV_ID_SHIFT 0
+#define SLI4_PCI_REV_ID_MASK 0xff
+#define SLI4_PCI_CLASS_SHIFT 8
+#define SLI4_PCI_CLASS_MASK 0xfff
+
+#define SLI4_PCI_SOFT_RESET_CSR 0x005c /** register offset */
+#define SLI4_PCI_SOFT_RESET_MASK 0x0080
+
+/*************************************************************************
+ * Common SLI-4 register offsets and field definitions
+ */
+
+/**
+ * @brief SLI_INTF - SLI Interface Definition Register
+ */
+#define SLI4_INTF_REG 0x0058 /** register offset */
+#define SLI4_INTF_VALID_SHIFT 29
+#define SLI4_INTF_VALID_MASK 0x7
+#define SLI4_INTF_VALID 0x6
+#define SLI4_INTF_IF_TYPE_SHIFT 12
+#define SLI4_INTF_IF_TYPE_MASK 0xf
+#define SLI4_INTF_SLI_FAMILY_SHIFT 8
+#define SLI4_INTF_SLI_FAMILY_MASK 0xf
+#define SLI4_INTF_SLI_REVISION_SHIFT 4
+#define SLI4_INTF_SLI_REVISION_MASK 0xf
+#define SLI4_FAMILY_CHECK_ASIC_TYPE 0xf
+
+#define SLI4_IF_TYPE_BE3_SKH_PF 0
+#define SLI4_IF_TYPE_BE3_SKH_VF 1
+#define SLI4_IF_TYPE_LANCER_FC_ETH 2
+#define SLI4_IF_TYPE_LANCER_RDMA 3
+#define SLI4_MAX_IF_TYPES 4
+
+/**
+ * @brief ASIC_ID - SLI ASIC Type and Revision Register
+ */
+#define SLI4_ASIC_ID_REG 0x009c /* register offset */
+#define SLI4_ASIC_REV_SHIFT 0
+#define SLI4_ASIC_REV_MASK 0xf
+#define SLI4_ASIC_VER_SHIFT 4
+#define SLI4_ASIC_VER_MASK 0xf
+#define SLI4_ASIC_GEN_SHIFT 8
+#define SLI4_ASIC_GEN_MASK 0xff
+#define SLI4_ASIC_GEN_BE2 0x00
+#define SLI4_ASIC_GEN_BE3 0x03
+#define SLI4_ASIC_GEN_SKYHAWK 0x04
+#define SLI4_ASIC_GEN_CORSAIR 0x05
+#define SLI4_ASIC_GEN_LANCER 0x0b
+
+
+/**
+ * @brief BMBX - Bootstrap Mailbox Register
+ */
+#define SLI4_BMBX_REG 0x0160 /* register offset */
+#define SLI4_BMBX_MASK_HI 0x3
+#define SLI4_BMBX_MASK_LO 0xf
+#define SLI4_BMBX_RDY BIT(0)
+#define SLI4_BMBX_HI BIT(1)
+#define SLI4_BMBX_WRITE_HI(r) ((ocs_addr32_hi(r) & ~SLI4_BMBX_MASK_HI) | \
+ SLI4_BMBX_HI)
+#define SLI4_BMBX_WRITE_LO(r) (((ocs_addr32_hi(r) & SLI4_BMBX_MASK_HI) << 30) | \
+ (((r) & ~SLI4_BMBX_MASK_LO) >> 2))
+
+#define SLI4_BMBX_SIZE 256
+
+
+/**
+ * @brief EQCQ_DOORBELL - EQ and CQ Doorbell Register
+ */
+#define SLI4_EQCQ_DOORBELL_REG 0x120
+#define SLI4_EQCQ_DOORBELL_CI BIT(9)
+#define SLI4_EQCQ_DOORBELL_QT BIT(10)
+#define SLI4_EQCQ_DOORBELL_ARM BIT(29)
+#define SLI4_EQCQ_DOORBELL_SE BIT(31)
+#define SLI4_EQCQ_NUM_SHIFT 16
+#define SLI4_EQCQ_NUM_MASK 0x01ff
+#define SLI4_EQCQ_EQ_ID_MASK 0x3fff
+#define SLI4_EQCQ_CQ_ID_MASK 0x7fff
+#define SLI4_EQCQ_EQ_ID_MASK_LO 0x01ff
+#define SLI4_EQCQ_CQ_ID_MASK_LO 0x03ff
+#define SLI4_EQCQ_EQCQ_ID_MASK_HI 0xf800
+
+/**
+ * @brief SLIPORT_CONTROL - SLI Port Control Register
+ */
+#define SLI4_SLIPORT_CONTROL_REG 0x0408
+#define SLI4_SLIPORT_CONTROL_END BIT(30)
+#define SLI4_SLIPORT_CONTROL_LITTLE_ENDIAN (0)
+#define SLI4_SLIPORT_CONTROL_BIG_ENDIAN BIT(30)
+#define SLI4_SLIPORT_CONTROL_IP BIT(27)
+#define SLI4_SLIPORT_CONTROL_IDIS BIT(22)
+#define SLI4_SLIPORT_CONTROL_FDD BIT(31)
+
+/**
+ * @brief SLI4_SLIPORT_ERROR1 - SLI Port Error Register
+ */
+#define SLI4_SLIPORT_ERROR1 0x040c
+
+/**
+ * @brief SLI4_SLIPORT_ERROR2 - SLI Port Error Register
+ */
+#define SLI4_SLIPORT_ERROR2 0x0410
+
+/**
+ * @brief User error registers
+ */
+#define SLI4_UERR_STATUS_LOW_REG 0xA0
+#define SLI4_UERR_STATUS_HIGH_REG 0xA4
+#define SLI4_UERR_MASK_LOW_REG 0xA8
+#define SLI4_UERR_MASK_HIGH_REG 0xAC
+
+/**
+ * @brief Registers for generating software UE (BE3)
+ */
+#define SLI4_SW_UE_CSR1 0x138
+#define SLI4_SW_UE_CSR2 0x1FFFC
+
+/**
+ * @brief Registers for generating software UE (Skyhawk)
+ */
+#define SLI4_SW_UE_REG 0x5C /* register offset */
+
+static inline uint32_t sli_eq_doorbell(uint16_t n_popped, uint16_t id, uint8_t arm)
+{
+ uint32_t reg = 0;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ struct {
+ uint32_t eq_id_lo:9,
+ ci:1, /* clear interrupt */
+ qt:1, /* queue type */
+ eq_id_hi:5,
+ number_popped:13,
+ arm:1,
+ :1,
+ se:1;
+ } * eq_doorbell = (void *)&reg;
+#else
+#error big endian version not defined
+#endif
+
+ eq_doorbell->eq_id_lo = id & SLI4_EQCQ_EQ_ID_MASK_LO;
+ eq_doorbell->qt = 1; /* EQ is type 1 (section 2.2.3.3 SLI Arch) */
+ eq_doorbell->eq_id_hi = (id >> 9) & 0x1f;
+ eq_doorbell->number_popped = n_popped;
+ eq_doorbell->arm = arm;
+ eq_doorbell->ci = TRUE;
+
+ return reg;
+}
+
+static inline uint32_t sli_cq_doorbell(uint16_t n_popped, uint16_t id, uint8_t arm)
+{
+ uint32_t reg = 0;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ struct {
+ uint32_t cq_id_lo:10,
+ qt:1, /* queue type */
+ cq_id_hi:5,
+ number_popped:13,
+ arm:1,
+ :1,
+ se:1;
+ } * cq_doorbell = (void *)&reg;
+#else
+#error big endian version not defined
+#endif
+
+ cq_doorbell->cq_id_lo = id & SLI4_EQCQ_CQ_ID_MASK_LO;
+ cq_doorbell->qt = 0; /* CQ is type 0 (section 2.2.3.3 SLI Arch) */
+ cq_doorbell->cq_id_hi = (id >> 10) & 0x1f;
+ cq_doorbell->number_popped = n_popped;
+ cq_doorbell->arm = arm;
+
+ return reg;
+}
+
+/**
+ * @brief MQ_DOORBELL - MQ Doorbell Register
+ */
+#define SLI4_MQ_DOORBELL_REG 0x0140 /* register offset */
+#define SLI4_MQ_DOORBELL_NUM_SHIFT 16
+#define SLI4_MQ_DOORBELL_NUM_MASK 0x3fff
+#define SLI4_MQ_DOORBELL_ID_MASK 0xffff
+#define SLI4_MQ_DOORBELL(n, i) ((((n) & SLI4_MQ_DOORBELL_NUM_MASK) << SLI4_MQ_DOORBELL_NUM_SHIFT) | \
+ ((i) & SLI4_MQ_DOORBELL_ID_MASK))
+
+/**
+ * @brief RQ_DOORBELL - RQ Doorbell Register
+ */
+#define SLI4_RQ_DOORBELL_REG 0x0a0 /* register offset */
+#define SLI4_RQ_DOORBELL_NUM_SHIFT 16
+#define SLI4_RQ_DOORBELL_NUM_MASK 0x3fff
+#define SLI4_RQ_DOORBELL_ID_MASK 0xffff
+#define SLI4_RQ_DOORBELL(n, i) ((((n) & SLI4_RQ_DOORBELL_NUM_MASK) << SLI4_RQ_DOORBELL_NUM_SHIFT) | \
+ ((i) & SLI4_RQ_DOORBELL_ID_MASK))
+
+/**
+ * @brief WQ_DOORBELL - WQ Doorbell Register
+ */
+#define SLI4_IO_WQ_DOORBELL_REG 0x040 /* register offset */
+#define SLI4_WQ_DOORBELL_IDX_SHIFT 16
+#define SLI4_WQ_DOORBELL_IDX_MASK 0x00ff
+#define SLI4_WQ_DOORBELL_NUM_SHIFT 24
+#define SLI4_WQ_DOORBELL_NUM_MASK 0x00ff
+#define SLI4_WQ_DOORBELL_ID_MASK 0xffff
+#define SLI4_WQ_DOORBELL(n, x, i) ((((n) & SLI4_WQ_DOORBELL_NUM_MASK) << SLI4_WQ_DOORBELL_NUM_SHIFT) | \
+ (((x) & SLI4_WQ_DOORBELL_IDX_MASK) << SLI4_WQ_DOORBELL_IDX_SHIFT) | \
+ ((i) & SLI4_WQ_DOORBELL_ID_MASK))
+
+/**
+ * @brief SLIPORT_SEMAPHORE - SLI Port Host and Port Status Register
+ */
+#define SLI4_PORT_SEMAPHORE_REG_0 0x00ac /** register offset Interface Type 0 + 1 */
+#define SLI4_PORT_SEMAPHORE_REG_1 0x0180 /** register offset Interface Type 0 + 1 */
+#define SLI4_PORT_SEMAPHORE_REG_23 0x0400 /** register offset Interface Type 2 + 3 */
+#define SLI4_PORT_SEMAPHORE_PORT_MASK 0x0000ffff
+#define SLI4_PORT_SEMAPHORE_PORT(r) ((r) & SLI4_PORT_SEMAPHORE_PORT_MASK)
+#define SLI4_PORT_SEMAPHORE_HOST_MASK 0x00ff0000
+#define SLI4_PORT_SEMAPHORE_HOST_SHIFT 16
+#define SLI4_PORT_SEMAPHORE_HOST(r) (((r) & SLI4_PORT_SEMAPHORE_HOST_MASK) >> \
+ SLI4_PORT_SEMAPHORE_HOST_SHIFT)
+#define SLI4_PORT_SEMAPHORE_SCR2 BIT(26) /** scratch area 2 */
+#define SLI4_PORT_SEMAPHORE_SCR1 BIT(27) /** scratch area 1 */
+#define SLI4_PORT_SEMAPHORE_IPC BIT(28) /** IP conflict */
+#define SLI4_PORT_SEMAPHORE_NIP BIT(29) /** no IP address */
+#define SLI4_PORT_SEMAPHORE_SFI BIT(30) /** secondary firmware image used */
+#define SLI4_PORT_SEMAPHORE_PERR BIT(31) /** POST fatal error */
+
+#define SLI4_PORT_SEMAPHORE_STATUS_POST_READY 0xc000
+#define SLI4_PORT_SEMAPHORE_STATUS_UNRECOV_ERR 0xf000
+#define SLI4_PORT_SEMAPHORE_STATUS_ERR_MASK 0xf000
+#define SLI4_PORT_SEMAPHORE_IN_ERR(r) (SLI4_PORT_SEMAPHORE_STATUS_UNRECOV_ERR == ((r) & \
+ SLI4_PORT_SEMAPHORE_STATUS_ERR_MASK))
+
+/**
+ * @brief SLIPORT_STATUS - SLI Port Status Register
+ */
+
+#define SLI4_PORT_STATUS_REG_23 0x0404 /** register offset Interface Type 2 + 3 */
+#define SLI4_PORT_STATUS_FDP BIT(21) /** function specific dump present */
+#define SLI4_PORT_STATUS_RDY BIT(23) /** ready */
+#define SLI4_PORT_STATUS_RN BIT(24) /** reset needed */
+#define SLI4_PORT_STATUS_DIP BIT(25) /** dump present */
+#define SLI4_PORT_STATUS_OTI BIT(29) /** over temp indicator */
+#define SLI4_PORT_STATUS_END BIT(30) /** endianness */
+#define SLI4_PORT_STATUS_ERR BIT(31) /** SLI port error */
+#define SLI4_PORT_STATUS_READY(r) ((r) & SLI4_PORT_STATUS_RDY)
+#define SLI4_PORT_STATUS_ERROR(r) ((r) & SLI4_PORT_STATUS_ERR)
+#define SLI4_PORT_STATUS_DUMP_PRESENT(r) ((r) & SLI4_PORT_STATUS_DIP)
+#define SLI4_PORT_STATUS_FDP_PRESENT(r) ((r) & SLI4_PORT_STATUS_FDP)
+
+
+#define SLI4_PHSDEV_CONTROL_REG_23 0x0414 /** register offset Interface Type 2 + 3 */
+#define SLI4_PHYDEV_CONTROL_DRST BIT(0) /** physical device reset */
+#define SLI4_PHYDEV_CONTROL_FRST BIT(1) /** firmware reset */
+#define SLI4_PHYDEV_CONTROL_DD BIT(2) /** diagnostic dump */
+#define SLI4_PHYDEV_CONTROL_FRL_MASK 0x000000f0
+#define SLI4_PHYDEV_CONTROL_FRL_SHIFT 4
+#define SLI4_PHYDEV_CONTROL_FRL(r) (((r) & SLI4_PHYDEV_CONTROL_FRL_MASK) >> \
+ SLI4_PHYDEV_CONTROL_FRL_SHIFT_SHIFT)
+
+/*************************************************************************
+ * SLI-4 mailbox command formats and definitions
+ */
+
+typedef struct sli4_mbox_command_header_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :8,
+ command:8,
+ status:16; /** Port writes to indicate success / fail */
+#else
+#error big endian version not defined
+#endif
+} sli4_mbox_command_header_t;
+
+#define SLI4_MBOX_COMMAND_CONFIG_LINK 0x07
+#define SLI4_MBOX_COMMAND_DUMP 0x17
+#define SLI4_MBOX_COMMAND_DOWN_LINK 0x06
+#define SLI4_MBOX_COMMAND_INIT_LINK 0x05
+#define SLI4_MBOX_COMMAND_INIT_VFI 0xa3
+#define SLI4_MBOX_COMMAND_INIT_VPI 0xa4
+#define SLI4_MBOX_COMMAND_POST_XRI 0xa7
+#define SLI4_MBOX_COMMAND_RELEASE_XRI 0xac
+#define SLI4_MBOX_COMMAND_READ_CONFIG 0x0b
+#define SLI4_MBOX_COMMAND_READ_STATUS 0x0e
+#define SLI4_MBOX_COMMAND_READ_NVPARMS 0x02
+#define SLI4_MBOX_COMMAND_READ_REV 0x11
+#define SLI4_MBOX_COMMAND_READ_LNK_STAT 0x12
+#define SLI4_MBOX_COMMAND_READ_SPARM64 0x8d
+#define SLI4_MBOX_COMMAND_READ_TOPOLOGY 0x95
+#define SLI4_MBOX_COMMAND_REG_FCFI 0xa0
+#define SLI4_MBOX_COMMAND_REG_FCFI_MRQ 0xaf
+#define SLI4_MBOX_COMMAND_REG_RPI 0x93
+#define SLI4_MBOX_COMMAND_REG_RX_RQ 0xa6
+#define SLI4_MBOX_COMMAND_REG_VFI 0x9f
+#define SLI4_MBOX_COMMAND_REG_VPI 0x96
+#define SLI4_MBOX_COMMAND_REQUEST_FEATURES 0x9d
+#define SLI4_MBOX_COMMAND_SLI_CONFIG 0x9b
+#define SLI4_MBOX_COMMAND_UNREG_FCFI 0xa2
+#define SLI4_MBOX_COMMAND_UNREG_RPI 0x14
+#define SLI4_MBOX_COMMAND_UNREG_VFI 0xa1
+#define SLI4_MBOX_COMMAND_UNREG_VPI 0x97
+#define SLI4_MBOX_COMMAND_WRITE_NVPARMS 0x03
+#define SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY 0xAD
+#define SLI4_MBOX_COMMAND_CONFIG_AUTO_XFER_RDY_HP 0xAE
+
+#define SLI4_MBOX_STATUS_SUCCESS 0x0000
+#define SLI4_MBOX_STATUS_FAILURE 0x0001
+#define SLI4_MBOX_STATUS_RPI_NOT_REG 0x1400
+
+/**
+ * @brief Buffer Descriptor Entry (BDE)
+ */
+typedef struct sli4_bde_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t buffer_length:24,
+ bde_type:8;
+ union {
+ struct {
+ uint32_t buffer_address_low;
+ uint32_t buffer_address_high;
+ } data;
+ struct {
+ uint32_t offset;
+ uint32_t rsvd2;
+ } imm;
+ struct {
+ uint32_t sgl_segment_address_low;
+ uint32_t sgl_segment_address_high;
+ } blp;
+ } u;
+#else
+#error big endian version not defined
+#endif
+} sli4_bde_t;
+
+#define SLI4_BDE_TYPE_BDE_64 0x00 /** Generic 64-bit data */
+#define SLI4_BDE_TYPE_BDE_IMM 0x01 /** Immediate data */
+#define SLI4_BDE_TYPE_BLP 0x40 /** Buffer List Pointer */
+
+/**
+ * @brief Scatter-Gather Entry (SGE)
+ */
+typedef struct sli4_sge_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t buffer_address_high;
+ uint32_t buffer_address_low;
+ uint32_t data_offset:27,
+ sge_type:4,
+ last:1;
+ uint32_t buffer_length;
+#else
+#error big endian version not defined
+#endif
+} sli4_sge_t;
+
+/**
+ * @brief T10 DIF Scatter-Gather Entry (SGE)
+ */
+typedef struct sli4_dif_sge_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t buffer_address_high;
+ uint32_t buffer_address_low;
+ uint32_t :27,
+ sge_type:4,
+ last:1;
+ uint32_t :32;
+#else
+#error big endian version not defined
+#endif
+} sli4_dif_sge_t;
+
+/**
+ * @brief T10 DIF Seed Scatter-Gather Entry (SGE)
+ */
+typedef struct sli4_diseed_sge_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t ref_tag_cmp;
+ uint32_t ref_tag_repl;
+ uint32_t app_tag_repl:16,
+ :2,
+ hs:1,
+ ws:1,
+ ic:1,
+ ics:1,
+ atrt:1,
+ at:1,
+ fwd_app_tag:1,
+ repl_app_tag:1,
+ head_insert:1,
+ sge_type:4,
+ last:1;
+ uint32_t app_tag_cmp:16,
+ dif_blk_size:3,
+ auto_incr_ref_tag:1,
+ check_app_tag:1,
+ check_ref_tag:1,
+ check_crc:1,
+ new_ref_tag:1,
+ dif_op_rx:4,
+ dif_op_tx:4;
+#else
+#error big endian version not defined
+#endif
+} sli4_diseed_sge_t;
+
+/**
+ * @brief List Segment Pointer Scatter-Gather Entry (SGE)
+ */
+typedef struct sli4_lsp_sge_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t buffer_address_high;
+ uint32_t buffer_address_low;
+ uint32_t :27,
+ sge_type:4,
+ last:1;
+ uint32_t segment_length:24,
+ :8;
+#else
+#error big endian version not defined
+#endif
+} sli4_lsp_sge_t;
+
+#define SLI4_SGE_MAX_RESERVED 3
+
+#define SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC 0x00
+#define SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF 0x01
+#define SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM 0x02
+#define SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF 0x03
+#define SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC 0x04
+#define SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM 0x05
+#define SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM 0x06
+#define SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC 0x07
+#define SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW 0x08
+
+#define SLI4_SGE_TYPE_DATA 0x00
+#define SLI4_SGE_TYPE_CHAIN 0x03 /** Skyhawk only */
+#define SLI4_SGE_TYPE_DIF 0x04 /** Data Integrity Field */
+#define SLI4_SGE_TYPE_LSP 0x05 /** List Segment Pointer */
+#define SLI4_SGE_TYPE_PEDIF 0x06 /** Post Encryption Engine DIF */
+#define SLI4_SGE_TYPE_PESEED 0x07 /** Post Encryption Engine DIF Seed */
+#define SLI4_SGE_TYPE_DISEED 0x08 /** DIF Seed */
+#define SLI4_SGE_TYPE_ENC 0x09 /** Encryption */
+#define SLI4_SGE_TYPE_ATM 0x0a /** DIF Application Tag Mask */
+#define SLI4_SGE_TYPE_SKIP 0x0c /** SKIP */
+
+#define OCS_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
+
+/**
+ * @brief CONFIG_LINK
+ */
+typedef struct sli4_cmd_config_link_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t maxbbc:8, /** Max buffer-to-buffer credit */
+ :24;
+ uint32_t alpa:8,
+ n_port_id:16,
+ :8;
+ uint32_t rsvd3;
+ uint32_t e_d_tov;
+ uint32_t lp_tov;
+ uint32_t r_a_tov;
+ uint32_t r_t_tov;
+ uint32_t al_tov;
+ uint32_t rsvd9;
+ uint32_t :8,
+ bbscn:4, /** buffer-to-buffer state change number */
+ cscn:1, /** configure BBSCN */
+ :19;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_config_link_t;
+
+/**
+ * @brief DUMP Type 4
+ */
+#define SLI4_WKI_TAG_SAT_TEM 0x1040
+typedef struct sli4_cmd_dump4_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t type:4,
+ :28;
+ uint32_t wki_selection:16,
+ :16;
+ uint32_t resv;
+ uint32_t returned_byte_cnt;
+ uint32_t resp_data[59];
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_dump4_t;
+
+/**
+ * @brief FW_INITIALIZE - initialize a SLI port
+ *
+ * @note This command uses a different format than all others.
+ */
+
+extern const uint8_t sli4_fw_initialize[8];
+
+/**
+ * @brief FW_DEINITIALIZE - deinitialize a SLI port
+ *
+ * @note This command uses a different format than all others.
+ */
+
+extern const uint8_t sli4_fw_deinitialize[8];
+
+/**
+ * @brief INIT_LINK - initialize the link for a FC/FCoE port
+ */
+typedef struct sli4_cmd_init_link_flags_s {
+ uint32_t loopback:1,
+ topology:2,
+ #define FC_TOPOLOGY_FCAL 0
+ #define FC_TOPOLOGY_P2P 1
+ :3,
+ unfair:1,
+ skip_lirp_lilp:1,
+ gen_loop_validity_check:1,
+ skip_lisa:1,
+ enable_topology_failover:1,
+ fixed_speed:1,
+ :3,
+ select_hightest_al_pa:1,
+ :16; /* pad to 32 bits */
+} sli4_cmd_init_link_flags_t;
+
+#define SLI4_INIT_LINK_F_LOOP_BACK BIT(0)
+#define SLI4_INIT_LINK_F_UNFAIR BIT(6)
+#define SLI4_INIT_LINK_F_NO_LIRP BIT(7)
+#define SLI4_INIT_LINK_F_LOOP_VALID_CHK BIT(8)
+#define SLI4_INIT_LINK_F_NO_LISA BIT(9)
+#define SLI4_INIT_LINK_F_FAIL_OVER BIT(10)
+#define SLI4_INIT_LINK_F_NO_AUTOSPEED BIT(11)
+#define SLI4_INIT_LINK_F_PICK_HI_ALPA BIT(15)
+
+#define SLI4_INIT_LINK_F_P2P_ONLY 1
+#define SLI4_INIT_LINK_F_FCAL_ONLY 2
+
+#define SLI4_INIT_LINK_F_FCAL_FAIL_OVER 0
+#define SLI4_INIT_LINK_F_P2P_FAIL_OVER 1
+
+typedef struct sli4_cmd_init_link_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t selective_reset_al_pa:8,
+ :24;
+ sli4_cmd_init_link_flags_t link_flags;
+ uint32_t link_speed_selection_code;
+ #define FC_LINK_SPEED_1G 1
+ #define FC_LINK_SPEED_2G 2
+ #define FC_LINK_SPEED_AUTO_1_2 3
+ #define FC_LINK_SPEED_4G 4
+ #define FC_LINK_SPEED_AUTO_4_1 5
+ #define FC_LINK_SPEED_AUTO_4_2 6
+ #define FC_LINK_SPEED_AUTO_4_2_1 7
+ #define FC_LINK_SPEED_8G 8
+ #define FC_LINK_SPEED_AUTO_8_1 9
+ #define FC_LINK_SPEED_AUTO_8_2 10
+ #define FC_LINK_SPEED_AUTO_8_2_1 11
+ #define FC_LINK_SPEED_AUTO_8_4 12
+ #define FC_LINK_SPEED_AUTO_8_4_1 13
+ #define FC_LINK_SPEED_AUTO_8_4_2 14
+ #define FC_LINK_SPEED_10G 16
+ #define FC_LINK_SPEED_16G 17
+ #define FC_LINK_SPEED_AUTO_16_8_4 18
+ #define FC_LINK_SPEED_AUTO_16_8 19
+ #define FC_LINK_SPEED_32G 20
+ #define FC_LINK_SPEED_AUTO_32_16_8 21
+ #define FC_LINK_SPEED_AUTO_32_16 22
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_init_link_t;
+
+/**
+ * @brief INIT_VFI - initialize the VFI resource
+ */
+typedef struct sli4_cmd_init_vfi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t vfi:16,
+ :12,
+ vp:1,
+ vf:1,
+ vt:1,
+ vr:1;
+ uint32_t fcfi:16,
+ vpi:16;
+ uint32_t vf_id:13,
+ pri:3,
+ :16;
+ uint32_t :24,
+ hop_count:8;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_init_vfi_t;
+
+/**
+ * @brief INIT_VPI - initialize the VPI resource
+ */
+typedef struct sli4_cmd_init_vpi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t vpi:16,
+ vfi:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_init_vpi_t;
+
+/**
+ * @brief POST_XRI - post XRI resources to the SLI Port
+ */
+typedef struct sli4_cmd_post_xri_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t xri_base:16,
+ xri_count:12,
+ enx:1,
+ dl:1,
+ di:1,
+ val:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_post_xri_t;
+
+/**
+ * @brief RELEASE_XRI - Release XRI resources from the SLI Port
+ */
+typedef struct sli4_cmd_release_xri_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t released_xri_count:5,
+ :11,
+ xri_count:5,
+ :11;
+ struct {
+ uint32_t xri_tag0:16,
+ xri_tag1:16;
+ } xri_tbl[62];
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_release_xri_t;
+
+/**
+ * @brief READ_CONFIG - read SLI port configuration parameters
+ */
+typedef struct sli4_cmd_read_config_s {
+ sli4_mbox_command_header_t hdr;
+} sli4_cmd_read_config_t;
+
+typedef struct sli4_res_read_config_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :31,
+ ext:1; /** Resource Extents */
+ uint32_t :24,
+ topology:8;
+ uint32_t rsvd3;
+ uint32_t e_d_tov:16,
+ :16;
+ uint32_t rsvd5;
+ uint32_t r_a_tov:16,
+ :16;
+ uint32_t rsvd7;
+ uint32_t rsvd8;
+ uint32_t lmt:16, /** Link Module Type */
+ :16;
+ uint32_t rsvd10;
+ uint32_t rsvd11;
+ uint32_t xri_base:16,
+ xri_count:16;
+ uint32_t rpi_base:16,
+ rpi_count:16;
+ uint32_t vpi_base:16,
+ vpi_count:16;
+ uint32_t vfi_base:16,
+ vfi_count:16;
+ uint32_t :16,
+ fcfi_count:16;
+ uint32_t rq_count:16,
+ eq_count:16;
+ uint32_t wq_count:16,
+ cq_count:16;
+ uint32_t pad[45];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_read_config_t;
+
+#define SLI4_READ_CFG_TOPO_FCOE 0x0 /** FCoE topology */
+#define SLI4_READ_CFG_TOPO_FC 0x1 /** FC topology unknown */
+#define SLI4_READ_CFG_TOPO_FC_DA 0x2 /** FC Direct Attach (non FC-AL) topology */
+#define SLI4_READ_CFG_TOPO_FC_AL 0x3 /** FC-AL topology */
+
+/**
+ * @brief READ_NVPARMS - read SLI port configuration parameters
+ */
+typedef struct sli4_cmd_read_nvparms_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+ uint32_t hard_alpa:8,
+ preferred_d_id:24;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_read_nvparms_t;
+
+/**
+ * @brief WRITE_NVPARMS - write SLI port configuration parameters
+ */
+typedef struct sli4_cmd_write_nvparms_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+ uint32_t hard_alpa:8,
+ preferred_d_id:24;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_write_nvparms_t;
+
+/**
+ * @brief READ_REV - read the Port revision levels
+ */
+typedef struct sli4_cmd_read_rev_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :16,
+ sli_level:4,
+ fcoem:1,
+ ceev:2,
+ :6,
+ vpd:1,
+ :2;
+ uint32_t first_hw_revision;
+ uint32_t second_hw_revision;
+ uint32_t rsvd4;
+ uint32_t third_hw_revision;
+ uint32_t fc_ph_low:8,
+ fc_ph_high:8,
+ feature_level_low:8,
+ feature_level_high:8;
+ uint32_t rsvd7;
+ uint32_t first_fw_id;
+ char first_fw_name[16];
+ uint32_t second_fw_id;
+ char second_fw_name[16];
+ uint32_t rsvd18[30];
+ uint32_t available_length:24,
+ :8;
+ uint32_t physical_address_low;
+ uint32_t physical_address_high;
+ uint32_t returned_vpd_length;
+ uint32_t actual_vpd_length;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_read_rev_t;
+
+/**
+ * @brief READ_SPARM64 - read the Port service parameters
+ */
+typedef struct sli4_cmd_read_sparm64_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ sli4_bde_t bde_64;
+ uint32_t vpi:16,
+ :16;
+ uint32_t port_name_start:16,
+ port_name_length:16;
+ uint32_t node_name_start:16,
+ node_name_length:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_read_sparm64_t;
+
+#define SLI4_READ_SPARM64_VPI_DEFAULT 0
+#define SLI4_READ_SPARM64_VPI_SPECIAL UINT16_MAX
+
+#define SLI4_READ_SPARM64_WWPN_OFFSET (4 * sizeof(uint32_t))
+#define SLI4_READ_SPARM64_WWNN_OFFSET (SLI4_READ_SPARM64_WWPN_OFFSET + sizeof(uint64_t))
+
+typedef struct sli4_port_state_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t nx_port_recv_state:2,
+ nx_port_trans_state:2,
+ nx_port_state_machine:4,
+ link_speed:8,
+ :14,
+ tf:1,
+ lu:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_port_state_t;
+
+/**
+ * @brief READ_TOPOLOGY - read the link event information
+ */
+typedef struct sli4_cmd_read_topology_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t event_tag;
+ uint32_t attention_type:8,
+ il:1,
+ pb_recvd:1,
+ :22;
+ uint32_t topology:8,
+ lip_type:8,
+ lip_al_ps:8,
+ al_pa_granted:8;
+ sli4_bde_t bde_loop_map;
+ sli4_port_state_t link_down;
+ sli4_port_state_t link_current;
+ uint32_t max_bbc:8,
+ init_bbc:8,
+ bbscn:4,
+ cbbscn:4,
+ :8;
+ uint32_t r_t_tov:9,
+ :3,
+ al_tov:4,
+ lp_tov:16;
+ uint32_t acquired_al_pa:8,
+ :7,
+ pb:1,
+ specified_al_pa:16;
+ uint32_t initial_n_port_id:24,
+ :8;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_read_topology_t;
+
+#define SLI4_MIN_LOOP_MAP_BYTES 128
+
+#define SLI4_READ_TOPOLOGY_LINK_UP 0x1
+#define SLI4_READ_TOPOLOGY_LINK_DOWN 0x2
+#define SLI4_READ_TOPOLOGY_LINK_NO_ALPA 0x3
+
+#define SLI4_READ_TOPOLOGY_UNKNOWN 0x0
+#define SLI4_READ_TOPOLOGY_NPORT 0x1
+#define SLI4_READ_TOPOLOGY_FC_AL 0x2
+
+#define SLI4_READ_TOPOLOGY_SPEED_NONE 0x00
+#define SLI4_READ_TOPOLOGY_SPEED_1G 0x04
+#define SLI4_READ_TOPOLOGY_SPEED_2G 0x08
+#define SLI4_READ_TOPOLOGY_SPEED_4G 0x10
+#define SLI4_READ_TOPOLOGY_SPEED_8G 0x20
+#define SLI4_READ_TOPOLOGY_SPEED_10G 0x40
+#define SLI4_READ_TOPOLOGY_SPEED_16G 0x80
+#define SLI4_READ_TOPOLOGY_SPEED_32G 0x90
+
+/**
+ * @brief REG_FCFI - activate a FC Forwarder
+ */
+#define SLI4_CMD_REG_FCFI_NUM_RQ_CFG 4
+typedef struct sli4_cmd_reg_fcfi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fcf_index:16,
+ fcfi:16;
+ uint32_t rq_id_1:16,
+ rq_id_0:16;
+ uint32_t rq_id_3:16,
+ rq_id_2:16;
+ struct {
+ uint32_t r_ctl_mask:8,
+ r_ctl_match:8,
+ type_mask:8,
+ type_match:8;
+ } rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ uint32_t vlan_tag:12,
+ vv:1,
+ :19;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_reg_fcfi_t;
+
+#define SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG 4
+#define SLI4_CMD_REG_FCFI_MRQ_MAX_NUM_RQ 32
+#define SLI4_CMD_REG_FCFI_SET_FCFI_MODE 0
+#define SLI4_CMD_REG_FCFI_SET_MRQ_MODE 1
+
+typedef struct sli4_cmd_reg_fcfi_mrq_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fcf_index:16,
+ fcfi:16;
+
+ uint32_t rq_id_1:16,
+ rq_id_0:16;
+
+ uint32_t rq_id_3:16,
+ rq_id_2:16;
+
+ struct {
+ uint32_t r_ctl_mask:8,
+ r_ctl_match:8,
+ type_mask:8,
+ type_match:8;
+ } rq_cfg[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+
+ uint32_t vlan_tag:12,
+ vv:1,
+ mode:1,
+ :18;
+
+ uint32_t num_mrq_pairs:8,
+ mrq_filter_bitmask:4,
+ rq_selection_policy:4,
+ :16;
+#endif
+} sli4_cmd_reg_fcfi_mrq_t;
+
+/**
+ * @brief REG_RPI - register a Remote Port Indicator
+ */
+typedef struct sli4_cmd_reg_rpi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rpi:16,
+ :16;
+ uint32_t remote_n_port_id:24,
+ upd:1,
+ :2,
+ etow:1,
+ :1,
+ terp:1,
+ :1,
+ ci:1;
+ sli4_bde_t bde_64;
+ uint32_t vpi:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_reg_rpi_t;
+#define SLI4_REG_RPI_BUF_LEN 0x70
+
+
+/**
+ * @brief REG_VFI - register a Virtual Fabric Indicator
+ */
+typedef struct sli4_cmd_reg_vfi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t vfi:16,
+ :12,
+ vp:1,
+ upd:1,
+ :2;
+ uint32_t fcfi:16,
+ vpi:16; /* vp=TRUE */
+ uint8_t wwpn[8]; /* vp=TRUE */
+ sli4_bde_t sparm; /* either FLOGI or PLOGI */
+ uint32_t e_d_tov;
+ uint32_t r_a_tov;
+ uint32_t local_n_port_id:24, /* vp=TRUE */
+ :8;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_reg_vfi_t;
+
+/**
+ * @brief REG_VPI - register a Virtual Port Indicator
+ */
+typedef struct sli4_cmd_reg_vpi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd1;
+ uint32_t local_n_port_id:24,
+ upd:1,
+ :7;
+ uint8_t wwpn[8];
+ uint32_t rsvd5;
+ uint32_t vpi:16,
+ vfi:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_reg_vpi_t;
+
+/**
+ * @brief REQUEST_FEATURES - request / query SLI features
+ */
+typedef union {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ struct {
+ uint32_t iaab:1, /** inhibit auto-ABTS originator */
+ npiv:1, /** NPIV support */
+ dif:1, /** DIF/DIX support */
+ vf:1, /** virtual fabric support */
+ fcpi:1, /** FCP initiator support */
+ fcpt:1, /** FCP target support */
+ fcpc:1, /** combined FCP initiator/target */
+ :1,
+ rqd:1, /** recovery qualified delay */
+ iaar:1, /** inhibit auto-ABTS responder */
+ hlm:1, /** High Login Mode */
+ perfh:1, /** performance hints */
+ rxseq:1, /** RX Sequence Coalescing */
+ rxri:1, /** Release XRI variant of Coalescing */
+ dcl2:1, /** Disable Class 2 */
+ rsco:1, /** Receive Sequence Coalescing Optimizations */
+ mrqp:1, /** Multi RQ Pair Mode Support */
+ :15;
+ } flag;
+ uint32_t dword;
+#else
+#error big endian version not defined
+#endif
+} sli4_features_t;
+
+typedef struct sli4_cmd_request_features_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t qry:1,
+ :31;
+#else
+#error big endian version not defined
+#endif
+ sli4_features_t command;
+ sli4_features_t response;
+} sli4_cmd_request_features_t;
+
+/**
+ * @brief SLI_CONFIG - submit a configuration command to Port
+ *
+ * Command is either embedded as part of the payload (embed) or located
+ * in a separate memory buffer (mem)
+ */
+
+
+typedef struct sli4_sli_config_pmd_s {
+ uint32_t address_low;
+ uint32_t address_high;
+ uint32_t length:24,
+ :8;
+} sli4_sli_config_pmd_t;
+
+typedef struct sli4_cmd_sli_config_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t emb:1,
+ :2,
+ pmd_count:5,
+ :24;
+ uint32_t payload_length;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ union {
+ uint8_t embed[58 * sizeof(uint32_t)];
+ sli4_sli_config_pmd_t mem;
+ } payload;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_sli_config_t;
+
+/**
+ * @brief READ_STATUS - read tx/rx status of a particular port
+ *
+ */
+
+typedef struct sli4_cmd_read_status_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t cc:1,
+ :31;
+ uint32_t rsvd2;
+ uint32_t transmit_kbyte_count;
+ uint32_t receive_kbyte_count;
+ uint32_t transmit_frame_count;
+ uint32_t receive_frame_count;
+ uint32_t transmit_sequence_count;
+ uint32_t receive_sequence_count;
+ uint32_t total_exchanges_originator;
+ uint32_t total_exchanges_responder;
+ uint32_t receive_p_bsy_count;
+ uint32_t receive_f_bsy_count;
+ uint32_t dropped_frames_due_to_no_rq_buffer_count;
+ uint32_t empty_rq_timeout_count;
+ uint32_t dropped_frames_due_to_no_xri_count;
+ uint32_t empty_xri_pool_count;
+
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_read_status_t;
+
+/**
+ * @brief READ_LNK_STAT - read link status of a particular port
+ *
+ */
+
+typedef struct sli4_cmd_read_link_stats_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rec:1,
+ gec:1,
+ w02of:1,
+ w03of:1,
+ w04of:1,
+ w05of:1,
+ w06of:1,
+ w07of:1,
+ w08of:1,
+ w09of:1,
+ w10of:1,
+ w11of:1,
+ w12of:1,
+ w13of:1,
+ w14of:1,
+ w15of:1,
+ w16of:1,
+ w17of:1,
+ w18of:1,
+ w19of:1,
+ w20of:1,
+ w21of:1,
+ resv0:8,
+ clrc:1,
+ clof:1;
+ uint32_t link_failure_error_count;
+ uint32_t loss_of_sync_error_count;
+ uint32_t loss_of_signal_error_count;
+ uint32_t primitive_sequence_error_count;
+ uint32_t invalid_transmission_word_error_count;
+ uint32_t crc_error_count;
+ uint32_t primitive_sequence_event_timeout_count;
+ uint32_t elastic_buffer_overrun_error_count;
+ uint32_t arbitration_fc_al_timout_count;
+ uint32_t advertised_receive_bufftor_to_buffer_credit;
+ uint32_t current_receive_buffer_to_buffer_credit;
+ uint32_t advertised_transmit_buffer_to_buffer_credit;
+ uint32_t current_transmit_buffer_to_buffer_credit;
+ uint32_t received_eofa_count;
+ uint32_t received_eofdti_count;
+ uint32_t received_eofni_count;
+ uint32_t received_soff_count;
+ uint32_t received_dropped_no_aer_count;
+ uint32_t received_dropped_no_available_rpi_resources_count;
+ uint32_t received_dropped_no_available_xri_resources_count;
+
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_read_link_stats_t;
+
+/**
+ * @brief Format a WQE with WQ_ID Association performance hint
+ *
+ * @par Description
+ * PHWQ works by over-writing part of Word 10 in the WQE with the WQ ID.
+ *
+ * @param entry Pointer to the WQE.
+ * @param q_id Queue ID.
+ *
+ * @return None.
+ */
+static inline void
+sli_set_wq_id_association(void *entry, uint16_t q_id)
+{
+ uint32_t *wqe = entry;
+
+ /*
+ * Set Word 10, bit 0 to zero
+ * Set Word 10, bits 15:1 to the WQ ID
+ */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ wqe[10] &= ~0xffff;
+ wqe[10] |= q_id << 1;
+#else
+#error big endian version not defined
+#endif
+}
+
+/**
+ * @brief UNREG_FCFI - unregister a FCFI
+ */
+typedef struct sli4_cmd_unreg_fcfi_s {
+ sli4_mbox_command_header_t hdr;
+ uint32_t rsvd1;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fcfi:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_unreg_fcfi_t;
+
+/**
+ * @brief UNREG_RPI - unregister one or more RPI
+ */
+typedef struct sli4_cmd_unreg_rpi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t index:16,
+ :13,
+ dp:1,
+ ii:2;
+ uint32_t destination_n_port_id:24,
+ :8;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_unreg_rpi_t;
+
+#define SLI4_UNREG_RPI_II_RPI 0x0
+#define SLI4_UNREG_RPI_II_VPI 0x1
+#define SLI4_UNREG_RPI_II_VFI 0x2
+#define SLI4_UNREG_RPI_II_FCFI 0x3
+
+/**
+ * @brief UNREG_VFI - unregister one or more VFI
+ */
+typedef struct sli4_cmd_unreg_vfi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd1;
+ uint32_t index:16,
+ :14,
+ ii:2;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_unreg_vfi_t;
+
+#define SLI4_UNREG_VFI_II_VFI 0x0
+#define SLI4_UNREG_VFI_II_FCFI 0x3
+
+enum {
+ SLI4_UNREG_TYPE_PORT,
+ SLI4_UNREG_TYPE_DOMAIN,
+ SLI4_UNREG_TYPE_FCF,
+ SLI4_UNREG_TYPE_ALL
+};
+
+/**
+ * @brief UNREG_VPI - unregister one or more VPI
+ */
+typedef struct sli4_cmd_unreg_vpi_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd1;
+ uint32_t index:16,
+ :14,
+ ii:2;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_unreg_vpi_t;
+
+#define SLI4_UNREG_VPI_II_VPI 0x0
+#define SLI4_UNREG_VPI_II_VFI 0x2
+#define SLI4_UNREG_VPI_II_FCFI 0x3
+
+
+/**
+ * @brief AUTO_XFER_RDY - Configure the auto-generate XFER-RDY feature.
+ */
+typedef struct sli4_cmd_config_auto_xfer_rdy_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t resv;
+ uint32_t max_burst_len;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_config_auto_xfer_rdy_t;
+
+typedef struct sli4_cmd_config_auto_xfer_rdy_hp_s {
+ sli4_mbox_command_header_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t resv;
+ uint32_t max_burst_len;
+ uint32_t esoc:1,
+ :31;
+ uint32_t block_size:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_cmd_config_auto_xfer_rdy_hp_t;
+
+
+/*************************************************************************
+ * SLI-4 common configuration command formats and definitions
+ */
+
+#define SLI4_CFG_STATUS_SUCCESS 0x00
+#define SLI4_CFG_STATUS_FAILED 0x01
+#define SLI4_CFG_STATUS_ILLEGAL_REQUEST 0x02
+#define SLI4_CFG_STATUS_ILLEGAL_FIELD 0x03
+
+#define SLI4_MGMT_STATUS_FLASHROM_READ_FAILED 0xcb
+
+#define SLI4_CFG_ADD_STATUS_NO_STATUS 0x00
+#define SLI4_CFG_ADD_STATUS_INVALID_OPCODE 0x1e
+
+/**
+ * Subsystem values.
+ */
+#define SLI4_SUBSYSTEM_COMMON 0x01
+#define SLI4_SUBSYSTEM_LOWLEVEL 0x0B
+#define SLI4_SUBSYSTEM_FCFCOE 0x0c
+#define SLI4_SUBSYSTEM_DMTF 0x11
+
+#define SLI4_OPC_LOWLEVEL_SET_WATCHDOG 0X36
+
+/**
+ * Common opcode (OPC) values.
+ */
+#define SLI4_OPC_COMMON_FUNCTION_RESET 0x3d
+#define SLI4_OPC_COMMON_CREATE_CQ 0x0c
+#define SLI4_OPC_COMMON_CREATE_CQ_SET 0x1d
+#define SLI4_OPC_COMMON_DESTROY_CQ 0x36
+#define SLI4_OPC_COMMON_MODIFY_EQ_DELAY 0x29
+#define SLI4_OPC_COMMON_CREATE_EQ 0x0d
+#define SLI4_OPC_COMMON_DESTROY_EQ 0x37
+#define SLI4_OPC_COMMON_CREATE_MQ_EXT 0x5a
+#define SLI4_OPC_COMMON_DESTROY_MQ 0x35
+#define SLI4_OPC_COMMON_GET_CNTL_ATTRIBUTES 0x20
+#define SLI4_OPC_COMMON_NOP 0x21
+#define SLI4_OPC_COMMON_GET_RESOURCE_EXTENT_INFO 0x9a
+#define SLI4_OPC_COMMON_GET_SLI4_PARAMETERS 0xb5
+#define SLI4_OPC_COMMON_QUERY_FW_CONFIG 0x3a
+#define SLI4_OPC_COMMON_GET_PORT_NAME 0x4d
+
+#define SLI4_OPC_COMMON_WRITE_FLASHROM 0x07
+#define SLI4_OPC_COMMON_MANAGE_FAT 0x44
+#define SLI4_OPC_COMMON_READ_TRANSCEIVER_DATA 0x49
+#define SLI4_OPC_COMMON_GET_CNTL_ADDL_ATTRIBUTES 0x79
+#define SLI4_OPC_COMMON_GET_EXT_FAT_CAPABILITIES 0x7d
+#define SLI4_OPC_COMMON_SET_EXT_FAT_CAPABILITIES 0x7e
+#define SLI4_OPC_COMMON_EXT_FAT_CONFIGURE_SNAPSHOT 0x7f
+#define SLI4_OPC_COMMON_EXT_FAT_RETRIEVE_SNAPSHOT 0x80
+#define SLI4_OPC_COMMON_EXT_FAT_READ_STRING_TABLE 0x82
+#define SLI4_OPC_COMMON_GET_FUNCTION_CONFIG 0xa0
+#define SLI4_OPC_COMMON_GET_PROFILE_CONFIG 0xa4
+#define SLI4_OPC_COMMON_SET_PROFILE_CONFIG 0xa5
+#define SLI4_OPC_COMMON_GET_PROFILE_LIST 0xa6
+#define SLI4_OPC_COMMON_GET_ACTIVE_PROFILE 0xa7
+#define SLI4_OPC_COMMON_SET_ACTIVE_PROFILE 0xa8
+#define SLI4_OPC_COMMON_READ_OBJECT 0xab
+#define SLI4_OPC_COMMON_WRITE_OBJECT 0xac
+#define SLI4_OPC_COMMON_DELETE_OBJECT 0xae
+#define SLI4_OPC_COMMON_READ_OBJECT_LIST 0xad
+#define SLI4_OPC_COMMON_SET_DUMP_LOCATION 0xb8
+#define SLI4_OPC_COMMON_SET_FEATURES 0xbf
+#define SLI4_OPC_COMMON_GET_RECONFIG_LINK_INFO 0xc9
+#define SLI4_OPC_COMMON_SET_RECONFIG_LINK_ID 0xca
+
+/**
+ * DMTF opcode (OPC) values.
+ */
+#define SLI4_OPC_DMTF_EXEC_CLP_CMD 0x01
+
+/**
+ * @brief Generic Command Request header
+ */
+typedef struct sli4_req_hdr_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t opcode:8,
+ subsystem:8,
+ :16;
+ uint32_t timeout;
+ uint32_t request_length;
+ uint32_t version:8,
+ :24;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_hdr_t;
+
+/**
+ * @brief Generic Command Response header
+ */
+typedef struct sli4_res_hdr_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t opcode:8,
+ subsystem:8,
+ :16;
+ uint32_t status:8,
+ additional_status:8,
+ :16;
+ uint32_t response_length;
+ uint32_t actual_response_length;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_hdr_t;
+
+/**
+ * @brief COMMON_FUNCTION_RESET
+ *
+ * Resets the Port, returning it to a power-on state. This configuration
+ * command does not have a payload and should set/expect the lengths to
+ * be zero.
+ */
+typedef struct sli4_req_common_function_reset_s {
+ sli4_req_hdr_t hdr;
+} sli4_req_common_function_reset_t;
+
+
+typedef struct sli4_res_common_function_reset_s {
+ sli4_res_hdr_t hdr;
+} sli4_res_common_function_reset_t;
+
+/**
+ * @brief COMMON_CREATE_CQ_V0
+ *
+ * Create a Completion Queue.
+ */
+typedef struct sli4_req_common_create_cq_v0_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ :16;
+ uint32_t :12,
+ clswm:2,
+ nodelay:1,
+ :12,
+ cqecnt:2,
+ valid:1,
+ :1,
+ evt:1;
+ uint32_t :22,
+ eq_id:8,
+ :1,
+ arm:1;
+ uint32_t rsvd[2];
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } page_physical_address[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_create_cq_v0_t;
+
+/**
+ * @brief COMMON_CREATE_CQ_V2
+ *
+ * Create a Completion Queue.
+ */
+typedef struct sli4_req_common_create_cq_v2_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ page_size:8,
+ :8,
+ uint32_t :12,
+ clswm:2,
+ nodelay:1,
+ autovalid:1,
+ :11,
+ cqecnt:2,
+ valid:1,
+ :1,
+ evt:1;
+ uint32_t eq_id:16,
+ :15,
+ arm:1;
+ uint32_t cqe_count:16,
+ :16;
+ uint32_t rsvd[1];
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } page_physical_address[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_create_cq_v2_t;
+
+
+
+/**
+ * @brief COMMON_CREATE_CQ_SET_V0
+ *
+ * Create a set of Completion Queues.
+ */
+typedef struct sli4_req_common_create_cq_set_v0_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ page_size:8,
+ :8;
+ uint32_t :12,
+ clswm:2,
+ nodelay:1,
+ autovalid:1,
+ rsvd:11,
+ cqecnt:2,
+ valid:1,
+ :1,
+ evt:1;
+ uint32_t num_cq_req:16,
+ cqe_count:15,
+ arm:1;
+ uint16_t eq_id[16];
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } page_physical_address[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_create_cq_set_v0_t;
+
+/**
+ * CQE count.
+ */
+#define SLI4_CQ_CNT_256 0
+#define SLI4_CQ_CNT_512 1
+#define SLI4_CQ_CNT_1024 2
+#define SLI4_CQ_CNT_LARGE 3
+
+#define SLI4_CQE_BYTES (4 * sizeof(uint32_t))
+
+#define SLI4_COMMON_CREATE_CQ_V2_MAX_PAGES 8
+
+/**
+ * @brief Generic Common Create EQ/CQ/MQ/WQ/RQ Queue completion
+ */
+typedef struct sli4_res_common_create_queue_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t q_id:16,
+ :8,
+ ulp:8;
+ uint32_t db_offset;
+ uint32_t db_rs:16,
+ db_fmt:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_create_queue_t;
+
+
+typedef struct sli4_res_common_create_queue_set_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t q_id:16,
+ num_q_allocated:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_create_queue_set_t;
+
+
+/**
+ * @brief Common Destroy CQ
+ */
+typedef struct sli4_req_common_destroy_cq_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t cq_id:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_destroy_cq_t;
+
+/**
+ * @brief COMMON_MODIFY_EQ_DELAY
+ *
+ * Modify the delay multiplier for EQs
+ */
+typedef struct sli4_req_common_modify_eq_delay_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_eq;
+ struct {
+ uint32_t eq_id;
+ uint32_t phase;
+ uint32_t delay_multiplier;
+ } eq_delay_record[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_modify_eq_delay_t;
+
+/**
+ * @brief COMMON_CREATE_EQ
+ *
+ * Create an Event Queue.
+ */
+typedef struct sli4_req_common_create_eq_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ :16;
+ uint32_t :29,
+ valid:1,
+ :1,
+ eqesz:1;
+ uint32_t :26,
+ count:3,
+ :2,
+ arm:1;
+ uint32_t :13,
+ delay_multiplier:10,
+ :9;
+ uint32_t rsvd;
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } page_address[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_create_eq_t;
+
+#define SLI4_EQ_CNT_256 0
+#define SLI4_EQ_CNT_512 1
+#define SLI4_EQ_CNT_1024 2
+#define SLI4_EQ_CNT_2048 3
+#define SLI4_EQ_CNT_4096 4
+
+#define SLI4_EQE_SIZE_4 0
+#define SLI4_EQE_SIZE_16 1
+
+/**
+ * @brief Common Destroy EQ
+ */
+typedef struct sli4_req_common_destroy_eq_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t eq_id:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_destroy_eq_t;
+
+/**
+ * @brief COMMON_CREATE_MQ_EXT
+ *
+ * Create a Mailbox Queue; accommodate v0 and v1 forms.
+ */
+typedef struct sli4_req_common_create_mq_ext_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ cq_id_v1:16;
+ uint32_t async_event_bitmap;
+ uint32_t async_cq_id_v1:16,
+ ring_size:4,
+ :2,
+ cq_id_v0:10;
+ uint32_t :31,
+ val:1;
+ uint32_t acqv:1,
+ async_cq_id_v0:10,
+ :21;
+ uint32_t rsvd9;
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } page_physical_address[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_create_mq_ext_t;
+
+#define SLI4_MQE_SIZE_16 0x05
+#define SLI4_MQE_SIZE_32 0x06
+#define SLI4_MQE_SIZE_64 0x07
+#define SLI4_MQE_SIZE_128 0x08
+
+#define SLI4_ASYNC_EVT_LINK_STATE BIT(1)
+#define SLI4_ASYNC_EVT_FCOE_FIP BIT(2)
+#define SLI4_ASYNC_EVT_DCBX BIT(3)
+#define SLI4_ASYNC_EVT_ISCSI BIT(4)
+#define SLI4_ASYNC_EVT_GRP5 BIT(5)
+#define SLI4_ASYNC_EVT_FC BIT(16)
+#define SLI4_ASYNC_EVT_SLI_PORT BIT(17)
+#define SLI4_ASYNC_EVT_VF BIT(18)
+#define SLI4_ASYNC_EVT_MR BIT(19)
+
+#define SLI4_ASYNC_EVT_ALL \
+ SLI4_ASYNC_EVT_LINK_STATE | \
+ SLI4_ASYNC_EVT_FCOE_FIP | \
+ SLI4_ASYNC_EVT_DCBX | \
+ SLI4_ASYNC_EVT_ISCSI | \
+ SLI4_ASYNC_EVT_GRP5 | \
+ SLI4_ASYNC_EVT_FC | \
+ SLI4_ASYNC_EVT_SLI_PORT | \
+ SLI4_ASYNC_EVT_VF |\
+ SLI4_ASYNC_EVT_MR
+
+#define SLI4_ASYNC_EVT_FC_FCOE \
+ SLI4_ASYNC_EVT_LINK_STATE | \
+ SLI4_ASYNC_EVT_FCOE_FIP | \
+ SLI4_ASYNC_EVT_GRP5 | \
+ SLI4_ASYNC_EVT_FC | \
+ SLI4_ASYNC_EVT_SLI_PORT
+
+/**
+ * @brief Common Destroy MQ
+ */
+typedef struct sli4_req_common_destroy_mq_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t mq_id:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_destroy_mq_t;
+
+/**
+ * @brief COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * Query for information about the SLI Port
+ */
+typedef struct sli4_res_common_get_cntl_attributes_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint8_t version_string[32];
+ uint8_t manufacturer_name[32];
+ uint32_t supported_modes;
+ uint32_t eprom_version_lo:8,
+ eprom_version_hi:8,
+ :16;
+ uint32_t mbx_data_structure_version;
+ uint32_t ep_firmware_data_structure_version;
+ uint8_t ncsi_version_string[12];
+ uint32_t default_extended_timeout;
+ uint8_t model_number[32];
+ uint8_t description[64];
+ uint8_t serial_number[32];
+ uint8_t ip_version_string[32];
+ uint8_t fw_version_string[32];
+ uint8_t bios_version_string[32];
+ uint8_t redboot_version_string[32];
+ uint8_t driver_version_string[32];
+ uint8_t fw_on_flash_version_string[32];
+ uint32_t functionalities_supported;
+ uint32_t max_cdb_length:16,
+ asic_revision:8,
+ generational_guid0:8;
+ uint32_t generational_guid1_12[3];
+ uint32_t generational_guid13:24,
+ hba_port_count:8;
+ uint32_t default_link_down_timeout:16,
+ iscsi_version_min_max:8,
+ multifunctional_device:8;
+ uint32_t cache_valid:8,
+ hba_status:8,
+ max_domains_supported:8,
+ port_number:6,
+ port_type:2;
+ uint32_t firmware_post_status;
+ uint32_t hba_mtu;
+ uint32_t iscsi_features:8,
+ rsvd121:24;
+ uint32_t pci_vendor_id:16,
+ pci_device_id:16;
+ uint32_t pci_sub_vendor_id:16,
+ pci_sub_system_id:16;
+ uint32_t pci_bus_number:8,
+ pci_device_number:8,
+ pci_function_number:8,
+ interface_type:8;
+ uint64_t unique_identifier;
+ uint32_t number_of_netfilters:8,
+ rsvd130:24;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_cntl_attributes_t;
+
+/**
+ * @brief COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * This command queries the controller information from the Flash ROM.
+ */
+typedef struct sli4_req_common_get_cntl_addl_attributes_s {
+ sli4_req_hdr_t hdr;
+} sli4_req_common_get_cntl_addl_attributes_t;
+
+
+typedef struct sli4_res_common_get_cntl_addl_attributes_s {
+ sli4_res_hdr_t hdr;
+ uint16_t ipl_file_number;
+ uint8_t ipl_file_version;
+ uint8_t rsvd0;
+ uint8_t on_die_temperature;
+ uint8_t rsvd1[3];
+ uint32_t driver_advanced_features_supported;
+ uint32_t rsvd2[4];
+ char fcoe_universal_bios_version[32];
+ char fcoe_x86_bios_version[32];
+ char fcoe_efi_bios_version[32];
+ char fcoe_fcode_version[32];
+ char uefi_bios_version[32];
+ char uefi_nic_version[32];
+ char uefi_fcode_version[32];
+ char uefi_iscsi_version[32];
+ char iscsi_x86_bios_version[32];
+ char pxe_x86_bios_version[32];
+ uint8_t fcoe_default_wwpn[8];
+ uint8_t ext_phy_version[32];
+ uint8_t fc_universal_bios_version[32];
+ uint8_t fc_x86_bios_version[32];
+ uint8_t fc_efi_bios_version[32];
+ uint8_t fc_fcode_version[32];
+ uint8_t ext_phy_crc_label[8];
+ uint8_t ipl_file_name[16];
+ uint8_t rsvd3[72];
+} sli4_res_common_get_cntl_addl_attributes_t;
+
+/**
+ * @brief COMMON_NOP
+ *
+ * This command does not do anything; it only returns the payload in the completion.
+ */
+typedef struct sli4_req_common_nop_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t context[2];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_nop_t;
+
+typedef struct sli4_res_common_nop_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t context[2];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_nop_t;
+
+/**
+ * @brief COMMON_GET_RESOURCE_EXTENT_INFO
+ */
+typedef struct sli4_req_common_get_resource_extent_info_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t resource_type:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_get_resource_extent_info_t;
+
+#define SLI4_RSC_TYPE_ISCSI_INI_XRI 0x0c
+#define SLI4_RSC_TYPE_FCOE_VFI 0x20
+#define SLI4_RSC_TYPE_FCOE_VPI 0x21
+#define SLI4_RSC_TYPE_FCOE_RPI 0x22
+#define SLI4_RSC_TYPE_FCOE_XRI 0x23
+
+typedef struct sli4_res_common_get_resource_extent_info_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t resource_extent_count:16,
+ resource_extent_size:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_resource_extent_info_t;
+
+
+#define SLI4_128BYTE_WQE_SUPPORT 0x02
+/**
+ * @brief COMMON_GET_SLI4_PARAMETERS
+ */
+typedef struct sli4_res_common_get_sli4_parameters_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t protocol_type:8,
+ :24;
+ uint32_t ft:1,
+ :3,
+ sli_revision:4,
+ sli_family:4,
+ if_type:4,
+ sli_hint_1:8,
+ sli_hint_2:5,
+ :3;
+ uint32_t eq_page_cnt:4,
+ :4,
+ eqe_sizes:4,
+ :4,
+ eq_page_sizes:8,
+ eqe_count_method:4,
+ :4;
+ uint32_t eqe_count_mask:16,
+ :16;
+ uint32_t cq_page_cnt:4,
+ :4,
+ cqe_sizes:4,
+ :2,
+ cqv:2,
+ cq_page_sizes:8,
+ cqe_count_method:4,
+ :4;
+ uint32_t cqe_count_mask:16,
+ :16;
+ uint32_t mq_page_cnt:4,
+ :10,
+ mqv:2,
+ mq_page_sizes:8,
+ mqe_count_method:4,
+ :4;
+ uint32_t mqe_count_mask:16,
+ :16;
+ uint32_t wq_page_cnt:4,
+ :4,
+ wqe_sizes:4,
+ :2,
+ wqv:2,
+ wq_page_sizes:8,
+ wqe_count_method:4,
+ :4;
+ uint32_t wqe_count_mask:16,
+ :16;
+ uint32_t rq_page_cnt:4,
+ :4,
+ rqe_sizes:4,
+ :2,
+ rqv:2,
+ rq_page_sizes:8,
+ rqe_count_method:4,
+ :4;
+ uint32_t rqe_count_mask:16,
+ :12,
+ rq_db_window:4;
+ uint32_t fcoe:1,
+ ext:1,
+ hdrr:1,
+ sglr:1,
+ fbrr:1,
+ areg:1,
+ tgt:1,
+ terp:1,
+ assi:1,
+ wchn:1,
+ tcca:1,
+ trty:1,
+ trir:1,
+ phoff:1,
+ phon:1,
+ phwq:1, /** Performance Hint WQ_ID Association */
+ boundary_4ga:1,
+ rxc:1,
+ hlm:1,
+ ipr:1,
+ rxri:1,
+ sglc:1,
+ timm:1,
+ tsmm:1,
+ :1,
+ oas:1,
+ lc:1,
+ agxf:1,
+ loopback_scope:4;
+ uint32_t sge_supported_length;
+ uint32_t sgl_page_cnt:4,
+ :4,
+ sgl_page_sizes:8,
+ sgl_pp_align:8,
+ :8;
+ uint32_t min_rq_buffer_size:16,
+ :16;
+ uint32_t max_rq_buffer_size;
+ uint32_t physical_xri_max:16,
+ physical_rpi_max:16;
+ uint32_t physical_vpi_max:16,
+ physical_vfi_max:16;
+ uint32_t rsvd19;
+ uint32_t frag_num_field_offset:16, /* dword 20 */
+ frag_num_field_size:16;
+ uint32_t sgl_index_field_offset:16, /* dword 21 */
+ sgl_index_field_size:16;
+ uint32_t chain_sge_initial_value_lo; /* dword 22 */
+ uint32_t chain_sge_initial_value_hi; /* dword 23 */
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_sli4_parameters_t;
+
+
+/**
+ * @brief COMMON_QUERY_FW_CONFIG
+ *
+ * This command retrieves firmware configuration parameters and adapter
+ * resources available to the driver.
+ */
+typedef struct sli4_req_common_query_fw_config_s {
+ sli4_req_hdr_t hdr;
+} sli4_req_common_query_fw_config_t;
+
+
+#define SLI4_FUNCTION_MODE_FCOE_INI_MODE 0x40
+#define SLI4_FUNCTION_MODE_FCOE_TGT_MODE 0x80
+#define SLI4_FUNCTION_MODE_DUA_MODE 0x800
+
+#define SLI4_ULP_MODE_FCOE_INI 0x40
+#define SLI4_ULP_MODE_FCOE_TGT 0x80
+
+typedef struct sli4_res_common_query_fw_config_s {
+ sli4_res_hdr_t hdr;
+ uint32_t config_number;
+ uint32_t asic_rev;
+ uint32_t physical_port;
+ uint32_t function_mode;
+ uint32_t ulp0_mode;
+ uint32_t ulp0_nic_wqid_base;
+ uint32_t ulp0_nic_wq_total; /* Dword 10 */
+ uint32_t ulp0_toe_wqid_base;
+ uint32_t ulp0_toe_wq_total;
+ uint32_t ulp0_toe_rqid_base;
+ uint32_t ulp0_toe_rq_total;
+ uint32_t ulp0_toe_defrqid_base;
+ uint32_t ulp0_toe_defrq_total;
+ uint32_t ulp0_lro_rqid_base;
+ uint32_t ulp0_lro_rq_total;
+ uint32_t ulp0_iscsi_icd_base;
+ uint32_t ulp0_iscsi_icd_total; /* Dword 20 */
+ uint32_t ulp1_mode;
+ uint32_t ulp1_nic_wqid_base;
+ uint32_t ulp1_nic_wq_total;
+ uint32_t ulp1_toe_wqid_base;
+ uint32_t ulp1_toe_wq_total;
+ uint32_t ulp1_toe_rqid_base;
+ uint32_t ulp1_toe_rq_total;
+ uint32_t ulp1_toe_defrqid_base;
+ uint32_t ulp1_toe_defrq_total;
+ uint32_t ulp1_lro_rqid_base; /* Dword 30 */
+ uint32_t ulp1_lro_rq_total;
+ uint32_t ulp1_iscsi_icd_base;
+ uint32_t ulp1_iscsi_icd_total;
+ uint32_t function_capabilities;
+ uint32_t ulp0_cq_base;
+ uint32_t ulp0_cq_total;
+ uint32_t ulp0_eq_base;
+ uint32_t ulp0_eq_total;
+ uint32_t ulp0_iscsi_chain_icd_base;
+ uint32_t ulp0_iscsi_chain_icd_total; /* Dword 40 */
+ uint32_t ulp1_iscsi_chain_icd_base;
+ uint32_t ulp1_iscsi_chain_icd_total;
+} sli4_res_common_query_fw_config_t;
+
+/**
+ * @brief COMMON_GET_PORT_NAME
+ */
+typedef struct sli4_req_common_get_port_name_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t pt:2, /* only COMMON_GET_PORT_NAME_V1 */
+ :30;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_get_port_name_t;
+
+typedef struct sli4_res_common_get_port_name_s {
+ sli4_res_hdr_t hdr;
+ char port_name[4];
+} sli4_res_common_get_port_name_t;
+
+/**
+ * @brief COMMON_WRITE_FLASHROM
+ */
+typedef struct sli4_req_common_write_flashrom_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t flash_rom_access_opcode;
+ uint32_t flash_rom_access_operation_type;
+ uint32_t data_buffer_size;
+ uint32_t offset;
+ uint8_t data_buffer[4];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_write_flashrom_t;
+
+#define SLI4_MGMT_FLASHROM_OPCODE_FLASH 0x01
+#define SLI4_MGMT_FLASHROM_OPCODE_SAVE 0x02
+#define SLI4_MGMT_FLASHROM_OPCODE_CLEAR 0x03
+#define SLI4_MGMT_FLASHROM_OPCODE_REPORT 0x04
+#define SLI4_MGMT_FLASHROM_OPCODE_IMAGE_INFO 0x05
+#define SLI4_MGMT_FLASHROM_OPCODE_IMAGE_CRC 0x06
+#define SLI4_MGMT_FLASHROM_OPCODE_OFFSET_BASED_FLASH 0x07
+#define SLI4_MGMT_FLASHROM_OPCODE_OFFSET_BASED_SAVE 0x08
+#define SLI4_MGMT_PHY_FLASHROM_OPCODE_FLASH 0x09
+#define SLI4_MGMT_PHY_FLASHROM_OPCODE_SAVE 0x0a
+
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ISCSI 0x00
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_REDBOOT 0x01
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_BIOS 0x02
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_PXE_BIOS 0x03
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_CODE_CONTROL 0x04
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_IPSEC_CFG 0x05
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_INIT_DATA 0x06
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ROM_OFFSET 0x07
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_FCOE_BIOS 0x08
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ISCSI_BAK 0x09
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_FCOE_ACT 0x0a
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_FCOE_BAK 0x0b
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_CODE_CTRL_P 0x0c
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_NCSI 0x0d
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_NIC 0x0e
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_DCBX 0x0f
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_PXE_BIOS_CFG 0x10
+#define SLI4_FLASH_ROM_ACCESS_OP_TYPE_ALL_CFG_DATA 0x11
+
+/**
+ * @brief COMMON_MANAGE_FAT
+ */
+typedef struct sli4_req_common_manage_fat_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fat_operation;
+ uint32_t read_log_offset;
+ uint32_t read_log_length;
+ uint32_t data_buffer_size;
+ uint32_t data_buffer; /* response only */
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_manage_fat_t;
+
+/**
+ * @brief COMMON_GET_EXT_FAT_CAPABILITIES
+ */
+typedef struct sli4_req_common_get_ext_fat_capabilities_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t parameter_type;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_get_ext_fat_capabilities_t;
+
+/**
+ * @brief COMMON_SET_EXT_FAT_CAPABILITIES
+ */
+typedef struct sli4_req_common_set_ext_fat_capabilities_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t maximum_log_entries;
+ uint32_t log_entry_size;
+ uint32_t logging_type:8,
+ maximum_logging_functions:8,
+ maximum_logging_ports:8,
+ :8;
+ uint32_t supported_modes;
+ uint32_t number_modules;
+ uint32_t debug_module[14];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_ext_fat_capabilities_t;
+
+/**
+ * @brief COMMON_EXT_FAT_CONFIGURE_SNAPSHOT
+ */
+typedef struct sli4_req_common_ext_fat_configure_snapshot_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t total_log_entries;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_ext_fat_configure_snapshot_t;
+
+/**
+ * @brief COMMON_EXT_FAT_RETRIEVE_SNAPSHOT
+ */
+typedef struct sli4_req_common_ext_fat_retrieve_snapshot_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t snapshot_mode;
+ uint32_t start_index;
+ uint32_t number_log_entries;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_ext_fat_retrieve_snapshot_t;
+
+typedef struct sli4_res_common_ext_fat_retrieve_snapshot_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t number_log_entries;
+ uint32_t version:8,
+ physical_port:8,
+ function_id:16;
+ uint32_t trace_level;
+ uint32_t module_mask[2];
+ uint32_t trace_table_index;
+ uint32_t timestamp;
+ uint8_t string_data[16];
+ uint32_t data[6];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_ext_fat_retrieve_snapshot_t;
+
+/**
+ * @brief COMMON_EXT_FAT_READ_STRING_TABLE
+ */
+typedef struct sli4_req_common_ext_fat_read_string_table_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t byte_offset;
+ uint32_t number_bytes;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_ext_fat_read_string_table_t;
+
+typedef struct sli4_res_common_ext_fat_read_string_table_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t number_returned_bytes;
+ uint32_t number_remaining_bytes;
+ uint32_t table_data0:8,
+ :24;
+ uint8_t table_data[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_ext_fat_read_string_table_t;
+
+/**
+ * @brief COMMON_READ_TRANSCEIVER_DATA
+ *
+ * This command reads SFF transceiver data(Format is defined
+ * by the SFF-8472 specification).
+ */
+typedef struct sli4_req_common_read_transceiver_data_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t page_number;
+ uint32_t port;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_read_transceiver_data_t;
+
+typedef struct sli4_res_common_read_transceiver_data_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t page_number;
+ uint32_t port;
+ uint32_t page_data[32];
+ uint32_t page_data_2[32];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_read_transceiver_data_t;
+
+/**
+ * @brief COMMON_READ_OBJECT
+ */
+typedef struct sli4_req_common_read_object_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t desired_read_length:24,
+ :8;
+ uint32_t read_offset;
+ uint8_t object_name[104];
+ uint32_t host_buffer_descriptor_count;
+ sli4_bde_t host_buffer_descriptor[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_read_object_t;
+
+typedef struct sli4_res_common_read_object_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t actual_read_length;
+ uint32_t resv:31,
+ eof:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_read_object_t;
+
+/**
+ * @brief COMMON_WRITE_OBJECT
+ */
+typedef struct sli4_req_common_write_object_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t desired_write_length:24,
+ :6,
+ noc:1,
+ eof:1;
+ uint32_t write_offset;
+ uint8_t object_name[104];
+ uint32_t host_buffer_descriptor_count;
+ sli4_bde_t host_buffer_descriptor[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_write_object_t;
+
+typedef struct sli4_res_common_write_object_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t actual_write_length;
+ uint32_t change_status:8,
+ :24;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_write_object_t;
+
+/**
+ * @brief COMMON_DELETE_OBJECT
+ */
+typedef struct sli4_req_common_delete_object_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint8_t object_name[104];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_delete_object_t;
+
+/**
+ * @brief COMMON_READ_OBJECT_LIST
+ */
+typedef struct sli4_req_common_read_object_list_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t desired_read_length:24,
+ :8;
+ uint32_t read_offset;
+ uint8_t object_name[104];
+ uint32_t host_buffer_descriptor_count;
+ sli4_bde_t host_buffer_descriptor[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_read_object_list_t;
+
+/**
+ * @brief COMMON_SET_DUMP_LOCATION
+ */
+typedef struct sli4_req_common_set_dump_location_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t buffer_length:24,
+ :5,
+ fdb:1,
+ blp:1,
+ qry:1;
+ uint32_t buf_addr_low;
+ uint32_t buf_addr_high;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_dump_location_t;
+
+typedef struct sli4_res_common_set_dump_location_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t buffer_length:24,
+ :8;
+#else
+#error big endian version not defined
+#endif
+}sli4_res_common_set_dump_location_t;
+
+/**
+ * @brief COMMON_SET_SET_FEATURES
+ */
+#define SLI4_SET_FEATURES_DIF_SEED 0x01
+#define SLI4_SET_FEATURES_XRI_TIMER 0x03
+#define SLI4_SET_FEATURES_MAX_PCIE_SPEED 0x04
+#define SLI4_SET_FEATURES_FCTL_CHECK 0x05
+#define SLI4_SET_FEATURES_FEC 0x06
+#define SLI4_SET_FEATURES_PCIE_RECV_DETECT 0x07
+#define SLI4_SET_FEATURES_DIF_MEMORY_MODE 0x08
+#define SLI4_SET_FEATURES_DISABLE_SLI_PORT_PAUSE_STATE 0x09
+#define SLI4_SET_FEATURES_ENABLE_PCIE_OPTIONS 0x0A
+#define SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI 0x0C
+#define SLI4_SET_FEATURES_ENABLE_MULTI_RECEIVE_QUEUE 0x0D
+#define SLI4_SET_FEATURES_SET_FTD_XFER_HINT 0x0F
+#define SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK 0x11
+
+typedef struct sli4_req_common_set_features_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t feature;
+ uint32_t param_len;
+ uint32_t params[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_features_t;
+
+typedef struct sli4_req_common_set_features_dif_seed_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t seed:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_features_dif_seed_t;
+
+typedef struct sli4_req_common_set_features_t10_pi_mem_model_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t tmm:1,
+ :31;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_features_t10_pi_mem_model_t;
+
+typedef struct sli4_req_common_set_features_multirq_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t isr:1, /*<< Include Sequence Reporting */
+ agxfe:1, /*<< Auto Generate XFER-RDY Feature Enabled */
+ :30;
+ uint32_t num_rqs:8,
+ rq_select_policy:4,
+ :20;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_features_multirq_t;
+
+typedef struct sli4_req_common_set_features_xfer_rdy_t10pi_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rtc:1,
+ atv:1,
+ tmm:1,
+ :1,
+ p_type:3,
+ blk_size:3,
+ :22;
+ uint32_t app_tag:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_features_xfer_rdy_t10pi_t;
+
+typedef struct sli4_req_common_set_features_health_check_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t hck:1,
+ qry:1,
+ :30;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_features_health_check_t;
+
+typedef struct sli4_req_common_set_features_set_fdt_xfer_hint_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fdt_xfer_hint;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_features_set_fdt_xfer_hint_t;
+
+/**
+ * @brief DMTF_EXEC_CLP_CMD
+ */
+typedef struct sli4_req_dmtf_exec_clp_cmd_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t cmd_buf_length;
+ uint32_t resp_buf_length;
+ uint32_t cmd_buf_addr_low;
+ uint32_t cmd_buf_addr_high;
+ uint32_t resp_buf_addr_low;
+ uint32_t resp_buf_addr_high;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_dmtf_exec_clp_cmd_t;
+
+typedef struct sli4_res_dmtf_exec_clp_cmd_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :32;
+ uint32_t resp_length;
+ uint32_t :32;
+ uint32_t :32;
+ uint32_t :32;
+ uint32_t :32;
+ uint32_t clp_status;
+ uint32_t clp_detailed_status;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_dmtf_exec_clp_cmd_t;
+
+/**
+ * @brief Resource descriptor
+ */
+
+#define SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE 0x50
+#define SLI4_RESOURCE_DESCRIPTOR_TYPE_NIC 0x51
+#define SLI4_RESOURCE_DESCRIPTOR_TYPE_ISCSI 0x52
+#define SLI4_RESOURCE_DESCRIPTOR_TYPE_FCFCOE 0x53
+#define SLI4_RESOURCE_DESCRIPTOR_TYPE_RDMA 0x54
+#define SLI4_RESOURCE_DESCRIPTOR_TYPE_PORT 0x55
+#define SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP 0x56
+
+#define SLI4_PROTOCOL_NIC_TOE 0x01
+#define SLI4_PROTOCOL_ISCSI 0x02
+#define SLI4_PROTOCOL_FCOE 0x04
+#define SLI4_PROTOCOL_NIC_TOE_RDMA 0x08
+#define SLI4_PROTOCOL_FC 0x10
+#define SLI4_PROTOCOL_DEFAULT 0xff
+
+typedef struct sli4_resource_descriptor_v1_s {
+ uint32_t descriptor_type:8,
+ descriptor_length:8,
+ :16;
+ uint32_t type_specific[0];
+} sli4_resource_descriptor_v1_t;
+
+typedef struct sli4_pcie_resource_descriptor_v1_s {
+ uint32_t descriptor_type:8,
+ descriptor_length:8,
+ :14,
+ imm:1,
+ nosv:1;
+ uint32_t :16,
+ pf_number:10,
+ :6;
+ uint32_t rsvd1;
+ uint32_t sriov_state:8,
+ pf_state:8,
+ pf_type:8,
+ :8;
+ uint32_t number_of_vfs:16,
+ :16;
+ uint32_t mission_roles:8,
+ :19,
+ pchg:1,
+ schg:1,
+ xchg:1,
+ xrom:2;
+ uint32_t rsvd2[16];
+} sli4_pcie_resource_descriptor_v1_t;
+
+typedef struct sli4_isap_resource_descriptor_v1_s {
+ uint32_t descriptor_type:8,
+ descriptor_length:8,
+ :16;
+ uint32_t iscsi_tgt:1,
+ iscsi_ini:1,
+ iscsi_dif:1,
+ :29;
+ uint32_t rsvd1[3];
+ uint32_t fcoe_tgt:1,
+ fcoe_ini:1,
+ fcoe_dif:1,
+ :29;
+ uint32_t rsvd2[7];
+ uint32_t mc_type0:8,
+ mc_type1:8,
+ mc_type2:8,
+ mc_type3:8;
+ uint32_t rsvd3[3];
+} sli4_isap_resouce_descriptor_v1_t;
+
+/**
+ * @brief COMMON_GET_FUNCTION_CONFIG
+ */
+typedef struct sli4_req_common_get_function_config_s {
+ sli4_req_hdr_t hdr;
+} sli4_req_common_get_function_config_t;
+
+typedef struct sli4_res_common_get_function_config_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t desc_count;
+ uint32_t desc[54];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_function_config_t;
+
+/**
+ * @brief COMMON_GET_PROFILE_CONFIG
+ */
+typedef struct sli4_req_common_get_profile_config_s {
+ sli4_req_hdr_t hdr;
+ uint32_t profile_id:8,
+ typ:2,
+ :22;
+} sli4_req_common_get_profile_config_t;
+
+typedef struct sli4_res_common_get_profile_config_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t desc_count;
+ uint32_t desc[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_profile_config_t;
+
+/**
+ * @brief COMMON_SET_PROFILE_CONFIG
+ */
+typedef struct sli4_req_common_set_profile_config_s {
+ sli4_req_hdr_t hdr;
+ uint32_t profile_id:8,
+ :23,
+ isap:1;
+ uint32_t desc_count;
+ uint32_t desc[0];
+} sli4_req_common_set_profile_config_t;
+
+typedef struct sli4_res_common_set_profile_config_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_set_profile_config_t;
+
+/**
+ * @brief Profile Descriptor for profile functions
+ */
+typedef struct sli4_profile_descriptor_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t profile_id:8,
+ :8,
+ profile_index:8,
+ :8;
+ uint32_t profile_description[128];
+#else
+#error big endian version not defined
+#endif
+} sli4_profile_descriptor_t;
+
+/* We don't know in advance how many descriptors there are. We have
+ to pick a number that we think will be big enough and ask for that
+ many. */
+
+#define MAX_PRODUCT_DESCRIPTORS 40
+
+/**
+ * @brief COMMON_GET_PROFILE_LIST
+ */
+typedef struct sli4_req_common_get_profile_list_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t start_profile_index:8,
+ :24;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_get_profile_list_t;
+
+typedef struct sli4_res_common_get_profile_list_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t profile_descriptor_count;
+ sli4_profile_descriptor_t profile_descriptor[MAX_PRODUCT_DESCRIPTORS];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_profile_list_t;
+
+/**
+ * @brief COMMON_GET_ACTIVE_PROFILE
+ */
+typedef struct sli4_req_common_get_active_profile_s {
+ sli4_req_hdr_t hdr;
+} sli4_req_common_get_active_profile_t;
+
+typedef struct sli4_res_common_get_active_profile_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t active_profile_id:8,
+ :8,
+ next_profile_id:8,
+ :8;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_active_profile_t;
+
+/**
+ * @brief COMMON_SET_ACTIVE_PROFILE
+ */
+typedef struct sli4_req_common_set_active_profile_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t active_profile_id:8,
+ :23,
+ fd:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_active_profile_t;
+
+typedef struct sli4_res_common_set_active_profile_s {
+ sli4_res_hdr_t hdr;
+} sli4_res_common_set_active_profile_t;
+
+/**
+ * @brief Link Config Descriptor for link config functions
+ */
+typedef struct sli4_link_config_descriptor_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t link_config_id:8,
+ :24;
+ uint32_t config_description[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_link_config_descriptor_t;
+
+#define MAX_LINK_CONFIG_DESCRIPTORS 10
+
+/**
+ * @brief COMMON_GET_RECONFIG_LINK_INFO
+ */
+typedef struct sli4_req_common_get_reconfig_link_info_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_get_reconfig_link_info_t;
+
+typedef struct sli4_res_common_get_reconfig_link_info_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t active_link_config_id:8,
+ :8,
+ next_link_config_id:8,
+ :8;
+ uint32_t link_configuration_descriptor_count;
+ sli4_link_config_descriptor_t desc[MAX_LINK_CONFIG_DESCRIPTORS];
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_get_reconfig_link_info_t;
+
+/**
+ * @brief COMMON_SET_RECONFIG_LINK_ID
+ */
+typedef struct sli4_req_common_set_reconfig_link_id_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t next_link_config_id:8,
+ :23,
+ fd:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_common_set_reconfig_link_id_t;
+
+typedef struct sli4_res_common_set_reconfig_link_id_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+#else
+#error big endian version not defined
+#endif
+} sli4_res_common_set_reconfig_link_id_t;
+
+
+typedef struct sli4_req_lowlevel_set_watchdog_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t watchdog_timeout:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+
+} sli4_req_lowlevel_set_watchdog_t;
+
+
+typedef struct sli4_res_lowlevel_set_watchdog_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_lowlevel_set_watchdog_t;
+
+/**
+ * @brief Event Queue Entry
+ */
+typedef struct sli4_eqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t vld:1, /** valid */
+ major_code:3,
+ minor_code:12,
+ resource_id:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_eqe_t;
+
+#define SLI4_MAJOR_CODE_STANDARD 0
+#define SLI4_MAJOR_CODE_SENTINEL 1
+
+/**
+ * @brief Mailbox Completion Queue Entry
+ *
+ * A CQE generated on the completion of a MQE from a MQ.
+ */
+typedef struct sli4_mcqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t completion_status:16, /** values are protocol specific */
+ extended_status:16;
+ uint32_t mqe_tag_low;
+ uint32_t mqe_tag_high;
+ uint32_t :27,
+ con:1, /** consumed - command now being executed */
+ cmp:1, /** completed - command still executing if clear */
+ :1,
+ ae:1, /** async event - this is an ACQE */
+ val:1; /** valid - contents of CQE are valid */
+#else
+#error big endian version not defined
+#endif
+} sli4_mcqe_t;
+
+
+/**
+ * @brief Asynchronous Completion Queue Entry
+ *
+ * A CQE generated asynchronously in response to the link or other internal events.
+ */
+typedef struct sli4_acqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t event_data[3];
+ uint32_t :8,
+ event_code:8,
+ event_type:8, /** values are protocol specific */
+ :6,
+ ae:1, /** async event - this is an ACQE */
+ val:1; /** valid - contents of CQE are valid */
+#else
+#error big endian version not defined
+#endif
+} sli4_acqe_t;
+
+#define SLI4_ACQE_EVENT_CODE_LINK_STATE 0x01
+#define SLI4_ACQE_EVENT_CODE_FCOE_FIP 0x02
+#define SLI4_ACQE_EVENT_CODE_DCBX 0x03
+#define SLI4_ACQE_EVENT_CODE_ISCSI 0x04
+#define SLI4_ACQE_EVENT_CODE_GRP_5 0x05
+#define SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT 0x10
+#define SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT 0x11
+#define SLI4_ACQE_EVENT_CODE_VF_EVENT 0x12
+#define SLI4_ACQE_EVENT_CODE_MR_EVENT 0x13
+
+/**
+ * @brief Register name enums
+ */
+typedef enum {
+ SLI4_REG_BMBX,
+ SLI4_REG_EQCQ_DOORBELL,
+ SLI4_REG_FCOE_RQ_DOORBELL,
+ SLI4_REG_IO_WQ_DOORBELL,
+ SLI4_REG_MQ_DOORBELL,
+ SLI4_REG_PHYSDEV_CONTROL,
+ SLI4_REG_SLIPORT_CONTROL,
+ SLI4_REG_SLIPORT_ERROR1,
+ SLI4_REG_SLIPORT_ERROR2,
+ SLI4_REG_SLIPORT_SEMAPHORE,
+ SLI4_REG_SLIPORT_STATUS,
+ SLI4_REG_UERR_MASK_HI,
+ SLI4_REG_UERR_MASK_LO,
+ SLI4_REG_UERR_STATUS_HI,
+ SLI4_REG_UERR_STATUS_LO,
+ SLI4_REG_SW_UE_CSR1,
+ SLI4_REG_SW_UE_CSR2,
+ SLI4_REG_MAX /* must be last */
+} sli4_regname_e;
+
+typedef struct sli4_reg_s {
+ uint32_t rset;
+ uint32_t off;
+} sli4_reg_t;
+
+typedef enum {
+ SLI_QTYPE_EQ,
+ SLI_QTYPE_CQ,
+ SLI_QTYPE_MQ,
+ SLI_QTYPE_WQ,
+ SLI_QTYPE_RQ,
+ SLI_QTYPE_MAX, /* must be last */
+} sli4_qtype_e;
+
+#define SLI_USER_MQ_COUNT 1 /** User specified max mail queues */
+#define SLI_MAX_CQ_SET_COUNT 16
+#define SLI_MAX_RQ_SET_COUNT 16
+
+typedef enum {
+ SLI_QENTRY_ASYNC,
+ SLI_QENTRY_MQ,
+ SLI_QENTRY_RQ,
+ SLI_QENTRY_WQ,
+ SLI_QENTRY_WQ_RELEASE,
+ SLI_QENTRY_OPT_WRITE_CMD,
+ SLI_QENTRY_OPT_WRITE_DATA,
+ SLI_QENTRY_XABT,
+ SLI_QENTRY_MAX /* must be last */
+} sli4_qentry_e;
+
+typedef struct sli4_queue_s {
+ /* Common to all queue types */
+ ocs_dma_t dma;
+ ocs_lock_t lock;
+ uint32_t index; /** current host entry index */
+ uint16_t size; /** entry size */
+ uint16_t length; /** number of entries */
+ uint16_t n_posted; /** number entries posted */
+ uint16_t id; /** Port assigned xQ_ID */
+ uint16_t ulp; /** ULP assigned to this queue */
+ uint32_t doorbell_offset;/** The offset for the doorbell */
+ uint16_t doorbell_rset; /** register set for the doorbell */
+ uint8_t type; /** queue type ie EQ, CQ, ... */
+ uint32_t proc_limit; /** limit number of CQE processed per iteration */
+ uint32_t posted_limit; /** number of CQE/EQE to process before ringing doorbell */
+ uint32_t max_num_processed;
+ time_t max_process_time;
+
+ /* Type specific gunk */
+ union {
+ uint32_t r_idx; /** "read" index (MQ only) */
+ struct {
+ uint32_t is_mq:1,/** CQ contains MQ/Async completions */
+ is_hdr:1,/** is a RQ for packet headers */
+ rq_batch:1;/** RQ index incremented by 8 */
+ } flag;
+ } u;
+} sli4_queue_t;
+
+static inline void
+sli_queue_lock(sli4_queue_t *q)
+{
+ ocs_lock(&q->lock);
+}
+
+static inline void
+sli_queue_unlock(sli4_queue_t *q)
+{
+ ocs_unlock(&q->lock);
+}
+
+
+#define SLI4_QUEUE_DEFAULT_CQ UINT16_MAX /** Use the default CQ */
+
+#define SLI4_QUEUE_RQ_BATCH 8
+
+typedef enum {
+ SLI4_CB_LINK,
+ SLI4_CB_FIP,
+ SLI4_CB_MAX /* must be last */
+} sli4_callback_e;
+
+typedef enum {
+ SLI_LINK_STATUS_UP,
+ SLI_LINK_STATUS_DOWN,
+ SLI_LINK_STATUS_NO_ALPA,
+ SLI_LINK_STATUS_MAX,
+} sli4_link_status_e;
+
+typedef enum {
+ SLI_LINK_TOPO_NPORT = 1, /** fabric or point-to-point */
+ SLI_LINK_TOPO_LOOP,
+ SLI_LINK_TOPO_LOOPBACK_INTERNAL,
+ SLI_LINK_TOPO_LOOPBACK_EXTERNAL,
+ SLI_LINK_TOPO_NONE,
+ SLI_LINK_TOPO_MAX,
+} sli4_link_topology_e;
+
+/* TODO do we need both sli4_port_type_e & sli4_link_medium_e */
+typedef enum {
+ SLI_LINK_MEDIUM_ETHERNET,
+ SLI_LINK_MEDIUM_FC,
+ SLI_LINK_MEDIUM_MAX,
+} sli4_link_medium_e;
+
+typedef struct sli4_link_event_s {
+ sli4_link_status_e status; /* link up/down */
+ sli4_link_topology_e topology;
+ sli4_link_medium_e medium; /* Ethernet / FC */
+ uint32_t speed; /* Mbps */
+ uint8_t *loop_map;
+ uint32_t fc_id;
+} sli4_link_event_t;
+
+/**
+ * @brief Fields retrieved from skyhawk that used used to build chained SGL
+ */
+typedef struct sli4_sgl_chaining_params_s {
+ uint8_t chaining_capable;
+ uint16_t frag_num_field_offset;
+ uint16_t sgl_index_field_offset;
+ uint64_t frag_num_field_mask;
+ uint64_t sgl_index_field_mask;
+ uint32_t chain_sge_initial_value_lo;
+ uint32_t chain_sge_initial_value_hi;
+} sli4_sgl_chaining_params_t;
+
+typedef struct sli4_fip_event_s {
+ uint32_t type;
+ uint32_t index; /* FCF index or UINT32_MAX if invalid */
+} sli4_fip_event_t;
+
+typedef enum {
+ SLI_RSRC_FCOE_VFI,
+ SLI_RSRC_FCOE_VPI,
+ SLI_RSRC_FCOE_RPI,
+ SLI_RSRC_FCOE_XRI,
+ SLI_RSRC_FCOE_FCFI,
+ SLI_RSRC_MAX /* must be last */
+} sli4_resource_e;
+
+typedef enum {
+ SLI4_PORT_TYPE_FC,
+ SLI4_PORT_TYPE_NIC,
+ SLI4_PORT_TYPE_MAX /* must be last */
+} sli4_port_type_e;
+
+typedef enum {
+ SLI4_ASIC_TYPE_BE3 = 1,
+ SLI4_ASIC_TYPE_SKYHAWK,
+ SLI4_ASIC_TYPE_LANCER,
+ SLI4_ASIC_TYPE_CORSAIR,
+ SLI4_ASIC_TYPE_LANCERG6,
+} sli4_asic_type_e;
+
+typedef enum {
+ SLI4_ASIC_REV_FPGA = 1,
+ SLI4_ASIC_REV_A0,
+ SLI4_ASIC_REV_A1,
+ SLI4_ASIC_REV_A2,
+ SLI4_ASIC_REV_A3,
+ SLI4_ASIC_REV_B0,
+ SLI4_ASIC_REV_B1,
+ SLI4_ASIC_REV_C0,
+ SLI4_ASIC_REV_D0,
+} sli4_asic_rev_e;
+
+typedef struct sli4_s {
+ ocs_os_handle_t os;
+ sli4_port_type_e port_type;
+
+ uint32_t sli_rev; /* SLI revision number */
+ uint32_t sli_family;
+ uint32_t if_type; /* SLI Interface type */
+
+ sli4_asic_type_e asic_type; /*<< ASIC type */
+ sli4_asic_rev_e asic_rev; /*<< ASIC revision */
+ uint32_t physical_port;
+
+ struct {
+ uint16_t e_d_tov;
+ uint16_t r_a_tov;
+ uint16_t max_qcount[SLI_QTYPE_MAX];
+ uint32_t max_qentries[SLI_QTYPE_MAX];
+ uint16_t count_mask[SLI_QTYPE_MAX];
+ uint16_t count_method[SLI_QTYPE_MAX];
+ uint32_t qpage_count[SLI_QTYPE_MAX];
+ uint16_t link_module_type;
+ uint8_t rq_batch;
+ uint16_t rq_min_buf_size;
+ uint32_t rq_max_buf_size;
+ uint8_t topology;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+ uint32_t fw_rev[2];
+ uint8_t fw_name[2][16];
+ char ipl_name[16];
+ uint32_t hw_rev[3];
+ uint8_t port_number;
+ char port_name[2];
+ char bios_version_string[32];
+ uint8_t dual_ulp_capable;
+ uint8_t is_ulp_fc[2];
+ /*
+ * Tracks the port resources using extents metaphor. For
+ * devices that don't implement extents (i.e.
+ * has_extents == FALSE), the code models each resource as
+ * a single large extent.
+ */
+ struct {
+ uint32_t number; /* number of extents */
+ uint32_t size; /* number of elements in each extent */
+ uint32_t n_alloc;/* number of elements allocated */
+ uint32_t *base;
+ ocs_bitmap_t *use_map;/* bitmap showing resources in use */
+ uint32_t map_size;/* number of bits in bitmap */
+ } extent[SLI_RSRC_MAX];
+ sli4_features_t features;
+ uint32_t has_extents:1,
+ auto_reg:1,
+ auto_xfer_rdy:1,
+ hdr_template_req:1,
+ perf_hint:1,
+ perf_wq_id_association:1,
+ cq_create_version:2,
+ mq_create_version:2,
+ high_login_mode:1,
+ sgl_pre_registered:1,
+ sgl_pre_registration_required:1,
+ t10_dif_inline_capable:1,
+ t10_dif_separate_capable:1;
+ uint32_t sge_supported_length;
+ uint32_t sgl_page_sizes;
+ uint32_t max_sgl_pages;
+ sli4_sgl_chaining_params_t sgl_chaining_params;
+ size_t wqe_size;
+ } config;
+
+ /*
+ * Callback functions
+ */
+ int32_t (*link)(void *, void *);
+ void *link_arg;
+ int32_t (*fip)(void *, void *);
+ void *fip_arg;
+
+ ocs_dma_t bmbx;
+#if defined(OCS_INCLUDE_DEBUG)
+ /* Save pointer to physical memory descriptor for non-embedded SLI_CONFIG
+ * commands for BMBX dumping purposes */
+ ocs_dma_t *bmbx_non_emb_pmd;
+#endif
+
+ struct {
+ ocs_dma_t data;
+ uint32_t length;
+ } vpd;
+} sli4_t;
+
+/**
+ * Get / set parameter functions
+ */
+static inline uint32_t
+sli_get_max_rsrc(sli4_t *sli4, sli4_resource_e rsrc)
+{
+ if (rsrc >= SLI_RSRC_MAX) {
+ return 0;
+ }
+
+ return sli4->config.extent[rsrc].size;
+}
+
+static inline uint32_t
+sli_get_max_queue(sli4_t *sli4, sli4_qtype_e qtype)
+{
+ if (qtype >= SLI_QTYPE_MAX) {
+ return 0;
+ }
+ return sli4->config.max_qcount[qtype];
+}
+
+static inline uint32_t
+sli_get_max_qentries(sli4_t *sli4, sli4_qtype_e qtype)
+{
+
+ return sli4->config.max_qentries[qtype];
+}
+
+static inline uint32_t
+sli_get_max_sge(sli4_t *sli4)
+{
+ return sli4->config.sge_supported_length;
+}
+
+static inline uint32_t
+sli_get_max_sgl(sli4_t *sli4)
+{
+
+ if (sli4->config.sgl_page_sizes != 1) {
+ ocs_log_test(sli4->os, "unsupported SGL page sizes %#x\n",
+ sli4->config.sgl_page_sizes);
+ return 0;
+ }
+
+ return ((sli4->config.max_sgl_pages * SLI_PAGE_SIZE) / sizeof(sli4_sge_t));
+}
+
+static inline sli4_link_medium_e
+sli_get_medium(sli4_t *sli4)
+{
+ switch (sli4->config.topology) {
+ case SLI4_READ_CFG_TOPO_FCOE:
+ return SLI_LINK_MEDIUM_ETHERNET;
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_DA:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ return SLI_LINK_MEDIUM_FC;
+ default:
+ return SLI_LINK_MEDIUM_MAX;
+ }
+}
+
+static inline void
+sli_skh_chain_sge_build(sli4_t *sli4, sli4_sge_t *sge, uint32_t xri_index, uint32_t frag_num, uint32_t offset)
+{
+ sli4_sgl_chaining_params_t *cparms = &sli4->config.sgl_chaining_params;
+
+
+ ocs_memset(sge, 0, sizeof(*sge));
+ sge->sge_type = SLI4_SGE_TYPE_CHAIN;
+ sge->buffer_address_high = (uint32_t)cparms->chain_sge_initial_value_hi;
+ sge->buffer_address_low =
+ (uint32_t)((cparms->chain_sge_initial_value_lo |
+ (((uintptr_t)(xri_index & cparms->sgl_index_field_mask)) <<
+ cparms->sgl_index_field_offset) |
+ (((uintptr_t)(frag_num & cparms->frag_num_field_mask)) <<
+ cparms->frag_num_field_offset) |
+ offset) >> 3);
+}
+
+static inline uint32_t
+sli_get_sli_rev(sli4_t *sli4)
+{
+ return sli4->sli_rev;
+}
+
+static inline uint32_t
+sli_get_sli_family(sli4_t *sli4)
+{
+ return sli4->sli_family;
+}
+
+static inline uint32_t
+sli_get_if_type(sli4_t *sli4)
+{
+ return sli4->if_type;
+}
+
+static inline void *
+sli_get_wwn_port(sli4_t *sli4)
+{
+ return sli4->config.wwpn;
+}
+
+static inline void *
+sli_get_wwn_node(sli4_t *sli4)
+{
+ return sli4->config.wwnn;
+}
+
+static inline void *
+sli_get_vpd(sli4_t *sli4)
+{
+ return sli4->vpd.data.virt;
+}
+
+static inline uint32_t
+sli_get_vpd_len(sli4_t *sli4)
+{
+ return sli4->vpd.length;
+}
+
+static inline uint32_t
+sli_get_fw_revision(sli4_t *sli4, uint32_t which)
+{
+ return sli4->config.fw_rev[which];
+}
+
+static inline void *
+sli_get_fw_name(sli4_t *sli4, uint32_t which)
+{
+ return sli4->config.fw_name[which];
+}
+
+static inline char *
+sli_get_ipl_name(sli4_t *sli4)
+{
+ return sli4->config.ipl_name;
+}
+
+static inline uint32_t
+sli_get_hw_revision(sli4_t *sli4, uint32_t which)
+{
+ return sli4->config.hw_rev[which];
+}
+
+static inline uint32_t
+sli_get_auto_xfer_rdy_capable(sli4_t *sli4)
+{
+ return sli4->config.auto_xfer_rdy;
+}
+
+static inline uint32_t
+sli_get_dif_capable(sli4_t *sli4)
+{
+ return sli4->config.features.flag.dif;
+}
+
+static inline uint32_t
+sli_is_dif_inline_capable(sli4_t *sli4)
+{
+ return sli_get_dif_capable(sli4) && sli4->config.t10_dif_inline_capable;
+}
+
+static inline uint32_t
+sli_is_dif_separate_capable(sli4_t *sli4)
+{
+ return sli_get_dif_capable(sli4) && sli4->config.t10_dif_separate_capable;
+}
+
+static inline uint32_t
+sli_get_is_dual_ulp_capable(sli4_t *sli4)
+{
+ return sli4->config.dual_ulp_capable;
+}
+
+static inline uint32_t
+sli_get_is_sgl_chaining_capable(sli4_t *sli4)
+{
+ return sli4->config.sgl_chaining_params.chaining_capable;
+}
+
+static inline uint32_t
+sli_get_is_ulp_enabled(sli4_t *sli4, uint16_t ulp)
+{
+ return sli4->config.is_ulp_fc[ulp];
+}
+
+static inline uint32_t
+sli_get_hlm_capable(sli4_t *sli4)
+{
+ return sli4->config.features.flag.hlm;
+}
+
+static inline int32_t
+sli_set_hlm(sli4_t *sli4, uint32_t value)
+{
+ if (value && !sli4->config.features.flag.hlm) {
+ ocs_log_test(sli4->os, "HLM not supported\n");
+ return -1;
+ }
+
+ sli4->config.high_login_mode = value != 0 ? TRUE : FALSE;
+
+ return 0;
+}
+
+static inline uint32_t
+sli_get_hlm(sli4_t *sli4)
+{
+ return sli4->config.high_login_mode;
+}
+
+static inline uint32_t
+sli_get_sgl_preregister_required(sli4_t *sli4)
+{
+ return sli4->config.sgl_pre_registration_required;
+}
+
+static inline uint32_t
+sli_get_sgl_preregister(sli4_t *sli4)
+{
+ return sli4->config.sgl_pre_registered;
+}
+
+static inline int32_t
+sli_set_sgl_preregister(sli4_t *sli4, uint32_t value)
+{
+ if ((value == 0) && sli4->config.sgl_pre_registration_required) {
+ ocs_log_test(sli4->os, "SGL pre-registration required\n");
+ return -1;
+ }
+
+ sli4->config.sgl_pre_registered = value != 0 ? TRUE : FALSE;
+
+ return 0;
+}
+
+static inline sli4_asic_type_e
+sli_get_asic_type(sli4_t *sli4)
+{
+ return sli4->asic_type;
+}
+
+static inline sli4_asic_rev_e
+sli_get_asic_rev(sli4_t *sli4)
+{
+ return sli4->asic_rev;
+}
+
+static inline int32_t
+sli_set_topology(sli4_t *sli4, uint32_t value)
+{
+ int32_t rc = 0;
+
+ switch (value) {
+ case SLI4_READ_CFG_TOPO_FCOE:
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_DA:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ sli4->config.topology = value;
+ break;
+ default:
+ ocs_log_test(sli4->os, "unsupported topology %#x\n", value);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static inline uint16_t
+sli_get_link_module_type(sli4_t *sli4)
+{
+ return sli4->config.link_module_type;
+}
+
+static inline char *
+sli_get_portnum(sli4_t *sli4)
+{
+ return sli4->config.port_name;
+}
+
+static inline char *
+sli_get_bios_version_string(sli4_t *sli4)
+{
+ return sli4->config.bios_version_string;
+}
+
+static inline uint32_t
+sli_convert_mask_to_count(uint32_t method, uint32_t mask)
+{
+ uint32_t count = 0;
+
+ if (method) {
+ count = 1 << ocs_lg2(mask);
+ count *= 16;
+ } else {
+ count = mask;
+ }
+
+ return count;
+}
+
+/**
+ * @brief Common Create Queue function prototype
+ */
+typedef int32_t (*sli4_create_q_fn_t)(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t);
+
+/**
+ * @brief Common Destroy Queue function prototype
+ */
+typedef int32_t (*sli4_destroy_q_fn_t)(sli4_t *, void *, size_t, uint16_t);
+
+
+/****************************************************************************
+ * Function prototypes
+ */
+extern int32_t sli_cmd_config_auto_xfer_rdy(sli4_t *, void *, size_t, uint32_t);
+extern int32_t sli_cmd_config_auto_xfer_rdy_hp(sli4_t *, void *, size_t, uint32_t, uint32_t, uint32_t);
+extern int32_t sli_cmd_config_link(sli4_t *, void *, size_t);
+extern int32_t sli_cmd_down_link(sli4_t *, void *, size_t);
+extern int32_t sli_cmd_dump_type4(sli4_t *, void *, size_t, uint16_t);
+extern int32_t sli_cmd_common_read_transceiver_data(sli4_t *, void *, size_t, uint32_t, ocs_dma_t *);
+extern int32_t sli_cmd_read_link_stats(sli4_t *, void *, size_t,uint8_t, uint8_t, uint8_t);
+extern int32_t sli_cmd_read_status(sli4_t *sli4, void *buf, size_t size, uint8_t clear_counters);
+extern int32_t sli_cmd_init_link(sli4_t *, void *, size_t, uint32_t, uint8_t);
+extern int32_t sli_cmd_init_vfi(sli4_t *, void *, size_t, uint16_t, uint16_t, uint16_t);
+extern int32_t sli_cmd_init_vpi(sli4_t *, void *, size_t, uint16_t, uint16_t);
+extern int32_t sli_cmd_post_xri(sli4_t *, void *, size_t, uint16_t, uint16_t);
+extern int32_t sli_cmd_release_xri(sli4_t *, void *, size_t, uint8_t);
+extern int32_t sli_cmd_read_sparm64(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t);
+extern int32_t sli_cmd_read_topology(sli4_t *, void *, size_t, ocs_dma_t *);
+extern int32_t sli_cmd_read_nvparms(sli4_t *, void *, size_t);
+extern int32_t sli_cmd_write_nvparms(sli4_t *, void *, size_t, uint8_t *, uint8_t *, uint8_t, uint32_t);
+typedef struct {
+ uint16_t rq_id;
+ uint8_t r_ctl_mask;
+ uint8_t r_ctl_match;
+ uint8_t type_mask;
+ uint8_t type_match;
+} sli4_cmd_rq_cfg_t;
+extern int32_t sli_cmd_reg_fcfi(sli4_t *, void *, size_t, uint16_t,
+ sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG], uint16_t);
+extern int32_t sli_cmd_reg_fcfi_mrq(sli4_t *, void *, size_t, uint8_t, uint16_t, uint16_t, uint8_t, uint8_t , uint16_t, sli4_cmd_rq_cfg_t *);
+
+extern int32_t sli_cmd_reg_rpi(sli4_t *, void *, size_t, uint32_t, uint16_t, uint16_t, ocs_dma_t *, uint8_t, uint8_t);
+extern int32_t sli_cmd_reg_vfi(sli4_t *, void *, size_t, ocs_domain_t *);
+extern int32_t sli_cmd_reg_vpi(sli4_t *, void *, size_t, ocs_sli_port_t *, uint8_t);
+extern int32_t sli_cmd_sli_config(sli4_t *, void *, size_t, uint32_t, ocs_dma_t *);
+extern int32_t sli_cmd_unreg_fcfi(sli4_t *, void *, size_t, uint16_t);
+extern int32_t sli_cmd_unreg_rpi(sli4_t *, void *, size_t, uint16_t, sli4_resource_e, uint32_t);
+extern int32_t sli_cmd_unreg_vfi(sli4_t *, void *, size_t, ocs_domain_t *, uint32_t);
+extern int32_t sli_cmd_unreg_vpi(sli4_t *, void *, size_t, uint16_t, uint32_t);
+extern int32_t sli_cmd_common_nop(sli4_t *, void *, size_t, uint64_t);
+extern int32_t sli_cmd_common_get_resource_extent_info(sli4_t *, void *, size_t, uint16_t);
+extern int32_t sli_cmd_common_get_sli4_parameters(sli4_t *, void *, size_t);
+extern int32_t sli_cmd_common_write_object(sli4_t *, void *, size_t,
+ uint16_t, uint16_t, uint32_t, uint32_t, char *, ocs_dma_t *);
+extern int32_t sli_cmd_common_delete_object(sli4_t *, void *, size_t, char *);
+extern int32_t sli_cmd_common_read_object(sli4_t *, void *, size_t, uint32_t,
+ uint32_t, char *, ocs_dma_t *);
+extern int32_t sli_cmd_dmtf_exec_clp_cmd(sli4_t *sli4, void *buf, size_t size,
+ ocs_dma_t *cmd,
+ ocs_dma_t *resp);
+extern int32_t sli_cmd_common_set_dump_location(sli4_t *sli4, void *buf, size_t size,
+ uint8_t query, uint8_t is_buffer_list,
+ ocs_dma_t *buffer, uint8_t fdb);
+extern int32_t sli_cmd_common_set_features(sli4_t *, void *, size_t, uint32_t, uint32_t, void*);
+extern int32_t sli_cmd_common_get_profile_list(sli4_t *sli4, void *buf,
+ size_t size, uint32_t start_profile_index, ocs_dma_t *dma);
+extern int32_t sli_cmd_common_get_active_profile(sli4_t *sli4, void *buf,
+ size_t size);
+extern int32_t sli_cmd_common_set_active_profile(sli4_t *sli4, void *buf,
+ size_t size,
+ uint32_t fd,
+ uint32_t active_profile_id);
+extern int32_t sli_cmd_common_get_reconfig_link_info(sli4_t *sli4, void *buf,
+ size_t size, ocs_dma_t *dma);
+extern int32_t sli_cmd_common_set_reconfig_link_id(sli4_t *sli4, void *buf,
+ size_t size, ocs_dma_t *dma,
+ uint32_t fd, uint32_t active_link_config_id);
+extern int32_t sli_cmd_common_get_function_config(sli4_t *sli4, void *buf,
+ size_t size);
+extern int32_t sli_cmd_common_get_profile_config(sli4_t *sli4, void *buf,
+ size_t size, ocs_dma_t *dma);
+extern int32_t sli_cmd_common_set_profile_config(sli4_t *sli4, void *buf,
+ size_t size, ocs_dma_t *dma,
+ uint8_t profile_id, uint32_t descriptor_count,
+ uint8_t isap);
+
+extern int32_t sli_cqe_mq(void *);
+extern int32_t sli_cqe_async(sli4_t *, void *);
+
+extern int32_t sli_setup(sli4_t *, ocs_os_handle_t, sli4_port_type_e);
+extern void sli_calc_max_qentries(sli4_t *sli4);
+extern int32_t sli_init(sli4_t *);
+extern int32_t sli_reset(sli4_t *);
+extern int32_t sli_fw_reset(sli4_t *);
+extern int32_t sli_teardown(sli4_t *);
+extern int32_t sli_callback(sli4_t *, sli4_callback_e, void *, void *);
+extern int32_t sli_bmbx_command(sli4_t *);
+extern int32_t __sli_queue_init(sli4_t *, sli4_queue_t *, uint32_t, size_t, uint32_t, uint32_t);
+extern int32_t __sli_create_queue(sli4_t *, sli4_queue_t *);
+extern int32_t sli_eq_modify_delay(sli4_t *sli4, sli4_queue_t *eq, uint32_t num_eq, uint32_t shift, uint32_t delay_mult);
+extern int32_t sli_queue_alloc(sli4_t *, uint32_t, sli4_queue_t *, uint32_t, sli4_queue_t *, uint16_t);
+extern int32_t sli_cq_alloc_set(sli4_t *, sli4_queue_t *qs[], uint32_t, uint32_t, sli4_queue_t *eqs[]);
+extern int32_t sli_get_queue_entry_size(sli4_t *, uint32_t);
+extern int32_t sli_queue_free(sli4_t *, sli4_queue_t *, uint32_t, uint32_t);
+extern int32_t sli_queue_reset(sli4_t *, sli4_queue_t *);
+extern int32_t sli_queue_is_empty(sli4_t *, sli4_queue_t *);
+extern int32_t sli_queue_eq_arm(sli4_t *, sli4_queue_t *, uint8_t);
+extern int32_t sli_queue_arm(sli4_t *, sli4_queue_t *, uint8_t);
+extern int32_t _sli_queue_write(sli4_t *, sli4_queue_t *, uint8_t *);
+extern int32_t sli_queue_write(sli4_t *, sli4_queue_t *, uint8_t *);
+extern int32_t sli_queue_read(sli4_t *, sli4_queue_t *, uint8_t *);
+extern int32_t sli_queue_index(sli4_t *, sli4_queue_t *);
+extern int32_t _sli_queue_poke(sli4_t *, sli4_queue_t *, uint32_t, uint8_t *);
+extern int32_t sli_queue_poke(sli4_t *, sli4_queue_t *, uint32_t, uint8_t *);
+extern int32_t sli_resource_alloc(sli4_t *, sli4_resource_e, uint32_t *, uint32_t *);
+extern int32_t sli_resource_free(sli4_t *, sli4_resource_e, uint32_t);
+extern int32_t sli_resource_reset(sli4_t *, sli4_resource_e);
+extern int32_t sli_eq_parse(sli4_t *, uint8_t *, uint16_t *);
+extern int32_t sli_cq_parse(sli4_t *, sli4_queue_t *, uint8_t *, sli4_qentry_e *, uint16_t *);
+
+extern int32_t sli_raise_ue(sli4_t *, uint8_t);
+extern int32_t sli_dump_is_ready(sli4_t *);
+extern int32_t sli_dump_is_present(sli4_t *);
+extern int32_t sli_reset_required(sli4_t *);
+extern int32_t sli_fw_error_status(sli4_t *);
+extern int32_t sli_fw_ready(sli4_t *);
+extern uint32_t sli_reg_read(sli4_t *, sli4_regname_e);
+extern void sli_reg_write(sli4_t *, sli4_regname_e, uint32_t);
+extern int32_t sli_link_is_configurable(sli4_t *);
+
+#include "ocs_fcp.h"
+
+/**
+ * @brief Maximum value for a FCFI
+ *
+ * Note that although most commands provide a 16 bit field for the FCFI,
+ * the FC/FCoE Asynchronous Recived CQE format only provides 6 bits for
+ * the returned FCFI. Then effectively, the FCFI cannot be larger than
+ * 1 << 6 or 64.
+ */
+#define SLI4_MAX_FCFI 64
+
+/**
+ * @brief Maximum value for FCF index
+ *
+ * The SLI-4 specification uses a 16 bit field in most places for the FCF
+ * index, but practically, this value will be much smaller. Arbitrarily
+ * limit the max FCF index to match the max FCFI value.
+ */
+#define SLI4_MAX_FCF_INDEX SLI4_MAX_FCFI
+
+/*************************************************************************
+ * SLI-4 FC/FCoE mailbox command formats and definitions.
+ */
+
+/**
+ * FC/FCoE opcode (OPC) values.
+ */
+#define SLI4_OPC_FCOE_WQ_CREATE 0x1
+#define SLI4_OPC_FCOE_WQ_DESTROY 0x2
+#define SLI4_OPC_FCOE_POST_SGL_PAGES 0x3
+#define SLI4_OPC_FCOE_RQ_CREATE 0x5
+#define SLI4_OPC_FCOE_RQ_DESTROY 0x6
+#define SLI4_OPC_FCOE_READ_FCF_TABLE 0x8
+#define SLI4_OPC_FCOE_POST_HDR_TEMPLATES 0xb
+#define SLI4_OPC_FCOE_REDISCOVER_FCF 0x10
+
+/* Use the default CQ associated with the WQ */
+#define SLI4_CQ_DEFAULT 0xffff
+
+typedef struct sli4_physical_page_descriptor_s {
+ uint32_t low;
+ uint32_t high;
+} sli4_physical_page_descriptor_t;
+
+/**
+ * @brief FCOE_WQ_CREATE
+ *
+ * Create a Work Queue for FC/FCoE use.
+ */
+#define SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES 4
+
+typedef struct sli4_req_fcoe_wq_create_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:8,
+ dua:1,
+ :7,
+ cq_id:16;
+ sli4_physical_page_descriptor_t page_physical_address[SLI4_FCOE_WQ_CREATE_V0_MAX_PAGES];
+ uint32_t bqu:1,
+ :7,
+ ulp:8,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_wq_create_t;
+
+/**
+ * @brief FCOE_WQ_CREATE_V1
+ *
+ * Create a version 1 Work Queue for FC/FCoE use.
+ */
+typedef struct sli4_req_fcoe_wq_create_v1_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ cq_id:16;
+ uint32_t page_size:8,
+ wqe_size:4,
+ :4,
+ wqe_count:16;
+ uint32_t rsvd6;
+ sli4_physical_page_descriptor_t page_physical_address[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_wq_create_v1_t;
+
+#define SLI4_FCOE_WQ_CREATE_V1_MAX_PAGES 8
+
+/**
+ * @brief FCOE_WQ_DESTROY
+ *
+ * Destroy an FC/FCoE Work Queue.
+ */
+typedef struct sli4_req_fcoe_wq_destroy_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t wq_id:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_wq_destroy_t;
+
+/**
+ * @brief FCOE_POST_SGL_PAGES
+ *
+ * Register the scatter gather list (SGL) memory and associate it with an XRI.
+ */
+typedef struct sli4_req_fcoe_post_sgl_pages_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t xri_start:16,
+ xri_count:16;
+ struct {
+ uint32_t page0_low;
+ uint32_t page0_high;
+ uint32_t page1_low;
+ uint32_t page1_high;
+ } page_set[10];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_post_sgl_pages_t;
+
+/**
+ * @brief FCOE_RQ_CREATE
+ *
+ * Create a Receive Queue for FC/FCoE use.
+ */
+typedef struct sli4_req_fcoe_rq_create_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ dua:1,
+ bqu:1,
+ :6,
+ ulp:8;
+ uint32_t :16,
+ rqe_count:4,
+ :12;
+ uint32_t rsvd6;
+ uint32_t buffer_size:16,
+ cq_id:16;
+ uint32_t rsvd8;
+ sli4_physical_page_descriptor_t page_physical_address[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_rq_create_t;
+
+#define SLI4_FCOE_RQ_CREATE_V0_MAX_PAGES 8
+#define SLI4_FCOE_RQ_CREATE_V0_MIN_BUF_SIZE 128
+#define SLI4_FCOE_RQ_CREATE_V0_MAX_BUF_SIZE 2048
+
+/**
+ * @brief FCOE_RQ_CREATE_V1
+ *
+ * Create a version 1 Receive Queue for FC/FCoE use.
+ */
+typedef struct sli4_req_fcoe_rq_create_v1_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ :13,
+ dim:1,
+ dfd:1,
+ dnb:1;
+ uint32_t page_size:8,
+ rqe_size:4,
+ :4,
+ rqe_count:16;
+ uint32_t rsvd6;
+ uint32_t :16,
+ cq_id:16;
+ uint32_t buffer_size;
+ sli4_physical_page_descriptor_t page_physical_address[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_rq_create_v1_t;
+
+
+/**
+ * @brief FCOE_RQ_CREATE_V2
+ *
+ * Create a version 2 Receive Queue for FC/FCoE use.
+ */
+typedef struct sli4_req_fcoe_rq_create_v2_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t num_pages:16,
+ rq_count:8,
+ :5,
+ dim:1,
+ dfd:1,
+ dnb:1;
+ uint32_t page_size:8,
+ rqe_size:4,
+ :4,
+ rqe_count:16;
+ uint32_t hdr_buffer_size:16,
+ payload_buffer_size:16;
+ uint32_t base_cq_id:16,
+ :16;
+ uint32_t rsvd;
+ sli4_physical_page_descriptor_t page_physical_address[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_rq_create_v2_t;
+
+
+#define SLI4_FCOE_RQ_CREATE_V1_MAX_PAGES 8
+#define SLI4_FCOE_RQ_CREATE_V1_MIN_BUF_SIZE 64
+#define SLI4_FCOE_RQ_CREATE_V1_MAX_BUF_SIZE 2048
+
+#define SLI4_FCOE_RQE_SIZE_8 0x2
+#define SLI4_FCOE_RQE_SIZE_16 0x3
+#define SLI4_FCOE_RQE_SIZE_32 0x4
+#define SLI4_FCOE_RQE_SIZE_64 0x5
+#define SLI4_FCOE_RQE_SIZE_128 0x6
+
+#define SLI4_FCOE_RQ_PAGE_SIZE_4096 0x1
+#define SLI4_FCOE_RQ_PAGE_SIZE_8192 0x2
+#define SLI4_FCOE_RQ_PAGE_SIZE_16384 0x4
+#define SLI4_FCOE_RQ_PAGE_SIZE_32768 0x8
+#define SLI4_FCOE_RQ_PAGE_SIZE_64536 0x10
+
+#define SLI4_FCOE_RQE_SIZE 8
+
+/**
+ * @brief FCOE_RQ_DESTROY
+ *
+ * Destroy an FC/FCoE Receive Queue.
+ */
+typedef struct sli4_req_fcoe_rq_destroy_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rq_id:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_rq_destroy_t;
+
+/**
+ * @brief FCOE_READ_FCF_TABLE
+ *
+ * Retrieve a FCF database (also known as a table) entry created by the SLI Port
+ * during FIP discovery.
+ */
+typedef struct sli4_req_fcoe_read_fcf_table_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fcf_index:16,
+ :16;
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_read_fcf_table_t;
+
+/* A FCF index of -1 on the request means return the first valid entry */
+#define SLI4_FCOE_FCF_TABLE_FIRST (UINT16_MAX)
+
+/**
+ * @brief FCF table entry
+ *
+ * This is the information returned by the FCOE_READ_FCF_TABLE command.
+ */
+typedef struct sli4_fcf_entry_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t max_receive_size;
+ uint32_t fip_keep_alive;
+ uint32_t fip_priority;
+ uint8_t fcf_mac_address[6];
+ uint8_t fcf_available;
+ uint8_t mac_address_provider;
+ uint8_t fabric_name_id[8];
+ uint8_t fc_map[3];
+ uint8_t val:1,
+ fc:1,
+ :5,
+ sol:1;
+ uint32_t fcf_index:16,
+ fcf_state:16;
+ uint8_t vlan_bitmap[512];
+ uint8_t switch_name[8];
+#else
+#error big endian version not defined
+#endif
+} sli4_fcf_entry_t;
+
+/**
+ * @brief FCOE_READ_FCF_TABLE response.
+ */
+typedef struct sli4_res_fcoe_read_fcf_table_s {
+ sli4_res_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t event_tag;
+ uint32_t next_index:16,
+ :16;
+ sli4_fcf_entry_t fcf_entry;
+#else
+#error big endian version not defined
+#endif
+} sli4_res_fcoe_read_fcf_table_t;
+
+/* A next FCF index of -1 in the response means this is the last valid entry */
+#define SLI4_FCOE_FCF_TABLE_LAST (UINT16_MAX)
+
+
+/**
+ * @brief FCOE_POST_HDR_TEMPLATES
+ */
+typedef struct sli4_req_fcoe_post_hdr_templates_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rpi_offset:16,
+ page_count:16;
+ sli4_physical_page_descriptor_t page_descriptor[0];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_post_hdr_templates_t;
+
+#define SLI4_FCOE_HDR_TEMPLATE_SIZE 64
+
+/**
+ * @brief FCOE_REDISCOVER_FCF
+ */
+typedef struct sli4_req_fcoe_rediscover_fcf_s {
+ sli4_req_hdr_t hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fcf_count:16,
+ :16;
+ uint32_t rsvd5;
+ uint16_t fcf_index[16];
+#else
+#error big endian version not defined
+#endif
+} sli4_req_fcoe_rediscover_fcf_t;
+
+
+/**
+ * Work Queue Entry (WQE) types.
+ */
+#define SLI4_WQE_ABORT 0x0f
+#define SLI4_WQE_ELS_REQUEST64 0x8a
+#define SLI4_WQE_FCP_IBIDIR64 0xac
+#define SLI4_WQE_FCP_IREAD64 0x9a
+#define SLI4_WQE_FCP_IWRITE64 0x98
+#define SLI4_WQE_FCP_ICMND64 0x9c
+#define SLI4_WQE_FCP_TRECEIVE64 0xa1
+#define SLI4_WQE_FCP_CONT_TRECEIVE64 0xe5
+#define SLI4_WQE_FCP_TRSP64 0xa3
+#define SLI4_WQE_FCP_TSEND64 0x9f
+#define SLI4_WQE_GEN_REQUEST64 0xc2
+#define SLI4_WQE_SEND_FRAME 0xe1
+#define SLI4_WQE_XMIT_BCAST64 0X84
+#define SLI4_WQE_XMIT_BLS_RSP 0x97
+#define SLI4_WQE_ELS_RSP64 0x95
+#define SLI4_WQE_XMIT_SEQUENCE64 0x82
+#define SLI4_WQE_REQUEUE_XRI 0x93
+
+/**
+ * WQE command types.
+ */
+#define SLI4_CMD_FCP_IREAD64_WQE 0x00
+#define SLI4_CMD_FCP_ICMND64_WQE 0x00
+#define SLI4_CMD_FCP_IWRITE64_WQE 0x01
+#define SLI4_CMD_FCP_TRECEIVE64_WQE 0x02
+#define SLI4_CMD_FCP_TRSP64_WQE 0x03
+#define SLI4_CMD_FCP_TSEND64_WQE 0x07
+#define SLI4_CMD_GEN_REQUEST64_WQE 0x08
+#define SLI4_CMD_XMIT_BCAST64_WQE 0x08
+#define SLI4_CMD_XMIT_BLS_RSP64_WQE 0x08
+#define SLI4_CMD_ABORT_WQE 0x08
+#define SLI4_CMD_XMIT_SEQUENCE64_WQE 0x08
+#define SLI4_CMD_REQUEUE_XRI_WQE 0x0A
+#define SLI4_CMD_SEND_FRAME_WQE 0x0a
+
+#define SLI4_WQE_SIZE 0x05
+#define SLI4_WQE_EXT_SIZE 0x06
+
+#define SLI4_WQE_BYTES (16 * sizeof(uint32_t))
+#define SLI4_WQE_EXT_BYTES (32 * sizeof(uint32_t))
+
+/* Mask for ccp (CS_CTL) */
+#define SLI4_MASK_CCP 0xfe /* Upper 7 bits of CS_CTL is priority */
+
+/**
+ * @brief Generic WQE
+ */
+typedef struct sli4_generic_wqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t cmd_spec0_5[6];
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_generic_wqe_t;
+
+/**
+ * @brief WQE used to abort exchanges.
+ */
+typedef struct sli4_abort_wqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t rsvd0;
+ uint32_t rsvd1;
+ uint32_t ext_t_tag;
+ uint32_t ia:1,
+ ir:1,
+ :6,
+ criteria:8,
+ :16;
+ uint32_t ext_t_mask;
+ uint32_t t_mask;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t t_tag;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ :1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+#else
+#error big endian version not defined
+#endif
+} sli4_abort_wqe_t;
+
+#define SLI4_ABORT_CRITERIA_XRI_TAG 0x01
+#define SLI4_ABORT_CRITERIA_ABORT_TAG 0x02
+#define SLI4_ABORT_CRITERIA_REQUEST_TAG 0x03
+#define SLI4_ABORT_CRITERIA_EXT_ABORT_TAG 0x04
+
+typedef enum {
+ SLI_ABORT_XRI,
+ SLI_ABORT_ABORT_ID,
+ SLI_ABORT_REQUEST_ID,
+ SLI_ABORT_MAX, /* must be last */
+} sli4_abort_type_e;
+
+/**
+ * @brief WQE used to create an ELS request.
+ */
+typedef struct sli4_els_request64_wqe_s {
+ sli4_bde_t els_request_payload;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t els_request_payload_length;
+ uint32_t sid:24,
+ sp:1,
+ :7;
+ uint32_t remote_id:24,
+ :8;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ ar:1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ temporary_rpi:16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ els_id:3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ sli4_bde_t els_response_payload_bde;
+ uint32_t max_response_payload_length;
+#else
+#error big endian version not defined
+#endif
+} sli4_els_request64_wqe_t;
+
+#define SLI4_ELS_REQUEST64_CONTEXT_RPI 0x0
+#define SLI4_ELS_REQUEST64_CONTEXT_VPI 0x1
+#define SLI4_ELS_REQUEST64_CONTEXT_VFI 0x2
+#define SLI4_ELS_REQUEST64_CONTEXT_FCFI 0x3
+
+#define SLI4_ELS_REQUEST64_CLASS_2 0x1
+#define SLI4_ELS_REQUEST64_CLASS_3 0x2
+
+#define SLI4_ELS_REQUEST64_DIR_WRITE 0x0
+#define SLI4_ELS_REQUEST64_DIR_READ 0x1
+
+#define SLI4_ELS_REQUEST64_OTHER 0x0
+#define SLI4_ELS_REQUEST64_LOGO 0x1
+#define SLI4_ELS_REQUEST64_FDISC 0x2
+#define SLI4_ELS_REQUEST64_FLOGIN 0x3
+#define SLI4_ELS_REQUEST64_PLOGI 0x4
+
+#define SLI4_ELS_REQUEST64_CMD_GEN 0x08
+#define SLI4_ELS_REQUEST64_CMD_NON_FABRIC 0x0c
+#define SLI4_ELS_REQUEST64_CMD_FABRIC 0x0d
+
+/**
+ * @brief WQE used to create an FCP initiator no data command.
+ */
+typedef struct sli4_fcp_icmnd64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t payload_offset_length:16,
+ fcp_cmd_buffer_length:16;
+ uint32_t rsvd4;
+ uint32_t remote_n_port_id:24,
+ :8;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t dif:2,
+ ct:2,
+ bs:3,
+ :1,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ erp:1,
+ lnk:1,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t rsvd12;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t rsvd15;
+#else
+#error big endian version not defined
+#endif
+} sli4_fcp_icmnd64_wqe_t;
+
+/**
+ * @brief WQE used to create an FCP initiator read.
+ */
+typedef struct sli4_fcp_iread64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t payload_offset_length:16,
+ fcp_cmd_buffer_length:16;
+ uint32_t total_transfer_length;
+ uint32_t remote_n_port_id:24,
+ :8;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t dif:2,
+ ct:2,
+ bs:3,
+ :1,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ erp:1,
+ lnk:1,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t rsvd12;
+#else
+#error big endian version not defined
+#endif
+ sli4_bde_t first_data_bde; /* reserved if performance hints disabled */
+} sli4_fcp_iread64_wqe_t;
+
+/**
+ * @brief WQE used to create an FCP initiator write.
+ */
+typedef struct sli4_fcp_iwrite64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t payload_offset_length:16,
+ fcp_cmd_buffer_length:16;
+ uint32_t total_transfer_length;
+ uint32_t initial_transfer_length;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t dif:2,
+ ct:2,
+ bs:3,
+ :1,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ erp:1,
+ lnk:1,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t remote_n_port_id:24,
+ :8;
+#else
+#error big endian version not defined
+#endif
+ sli4_bde_t first_data_bde;
+} sli4_fcp_iwrite64_wqe_t;
+
+
+typedef struct sli4_fcp_128byte_wqe_s {
+ uint32_t dw[32];
+} sli4_fcp_128byte_wqe_t;
+
+/**
+ * @brief WQE used to create an FCP target receive, and FCP target
+ * receive continue.
+ */
+typedef struct sli4_fcp_treceive64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t payload_offset_length;
+ uint32_t relative_offset;
+ /**
+ * DWord 5 can either be the task retry identifier (HLM=0) or
+ * the remote N_Port ID (HLM=1), or if implementing the Skyhawk
+ * T10-PI workaround, the secondary xri tag
+ */
+ union {
+ uint32_t sec_xri_tag:16,
+ :16;
+ uint32_t dword;
+ } dword5;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t dif:2,
+ ct:2,
+ bs:3,
+ :1,
+ command:8,
+ class:3,
+ ar:1,
+ pu:2,
+ conf:1,
+ lnk:1,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ remote_xid:16;
+ uint32_t ebde_cnt:4,
+ :1,
+ app_id_valid:1,
+ :1,
+ len_loc:2,
+ qosd:1,
+ wchn:1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ sr:1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t fcp_data_receive_length;
+
+#else
+#error big endian version not defined
+#endif
+ sli4_bde_t first_data_bde; /* For performance hints */
+
+} sli4_fcp_treceive64_wqe_t;
+
+/**
+ * @brief WQE used to create an FCP target response.
+ */
+typedef struct sli4_fcp_trsp64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t fcp_response_length;
+ uint32_t rsvd4;
+ /**
+ * DWord 5 can either be the task retry identifier (HLM=0) or
+ * the remote N_Port ID (HLM=1)
+ */
+ uint32_t dword5;
+ uint32_t xri_tag:16,
+ rpi:16;
+ uint32_t :2,
+ ct:2,
+ dnrx:1,
+ :3,
+ command:8,
+ class:3,
+ ag:1,
+ pu:2,
+ conf:1,
+ lnk:1,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ remote_xid:16;
+ uint32_t ebde_cnt:4,
+ :1,
+ app_id_valid:1,
+ :1,
+ len_loc:2,
+ qosd:1,
+ wchn:1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ sr:1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t rsvd12;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t rsvd15;
+#else
+#error big endian version not defined
+#endif
+} sli4_fcp_trsp64_wqe_t;
+
+/**
+ * @brief WQE used to create an FCP target send (DATA IN).
+ */
+typedef struct sli4_fcp_tsend64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t payload_offset_length;
+ uint32_t relative_offset;
+ /**
+ * DWord 5 can either be the task retry identifier (HLM=0) or
+ * the remote N_Port ID (HLM=1)
+ */
+ uint32_t dword5;
+ uint32_t xri_tag:16,
+ rpi:16;
+ uint32_t dif:2,
+ ct:2,
+ bs:3,
+ :1,
+ command:8,
+ class:3,
+ ar:1,
+ pu:2,
+ conf:1,
+ lnk:1,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ remote_xid:16;
+ uint32_t ebde_cnt:4,
+ :1,
+ app_id_valid:1,
+ :1,
+ len_loc:2,
+ qosd:1,
+ wchn:1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ sr:1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t fcp_data_transmit_length;
+
+#else
+#error big endian version not defined
+#endif
+ sli4_bde_t first_data_bde; /* For performance hints */
+} sli4_fcp_tsend64_wqe_t;
+
+#define SLI4_IO_CONTINUATION BIT(0) /** The XRI associated with this IO is already active */
+#define SLI4_IO_AUTO_GOOD_RESPONSE BIT(1) /** Automatically generate a good RSP frame */
+#define SLI4_IO_NO_ABORT BIT(2)
+#define SLI4_IO_DNRX BIT(3) /** Set the DNRX bit because no auto xref rdy buffer is posted */
+
+/* WQE DIF field contents */
+#define SLI4_DIF_DISABLED 0
+#define SLI4_DIF_PASS_THROUGH 1
+#define SLI4_DIF_STRIP 2
+#define SLI4_DIF_INSERT 3
+
+/**
+ * @brief WQE used to create a general request.
+ */
+typedef struct sli4_gen_request64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t request_payload_length;
+ uint32_t relative_offset;
+ uint32_t :8,
+ df_ctl:8,
+ type:8,
+ r_ctl:8;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t remote_n_port_id:24,
+ :8;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t max_response_payload_length;
+#else
+#error big endian version not defined
+#endif
+} sli4_gen_request64_wqe_t;
+
+/**
+ * @brief WQE used to create a send frame request.
+ */
+typedef struct sli4_send_frame_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t frame_length;
+ uint32_t fc_header_0_1[2];
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ eof:8,
+ sof:8;
+ uint32_t ebde_cnt:4,
+ :3,
+ lenloc:2,
+ qosd:1,
+ wchn:1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t fc_header_2_5[4];
+#else
+#error big endian version not defined
+#endif
+} sli4_send_frame_wqe_t;
+
+/**
+ * @brief WQE used to create a transmit sequence.
+ */
+typedef struct sli4_xmit_sequence64_wqe_s {
+ sli4_bde_t bde;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t remote_n_port_id:24,
+ :8;
+ uint32_t relative_offset;
+ uint32_t :2,
+ si:1,
+ ft:1,
+ :2,
+ xo:1,
+ ls:1,
+ df_ctl:8,
+ type:8,
+ r_ctl:8;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t dif:2,
+ ct:2,
+ bs:3,
+ :1,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ remote_xid:16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ sr:1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t sequence_payload_len;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t rsvd15;
+#else
+#error big endian version not defined
+#endif
+} sli4_xmit_sequence64_wqe_t;
+
+/**
+ * @brief WQE used unblock the specified XRI and to release it to the SLI Port's free pool.
+ */
+typedef struct sli4_requeue_xri_wqe_s {
+ uint32_t rsvd0;
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t rsvd8;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ wchn:1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t rsvd12;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t rsvd15;
+#else
+#error big endian version not defined
+#endif
+} sli4_requeue_xri_wqe_t;
+
+/**
+ * @brief WQE used to send a single frame sequence to broadcast address
+ */
+typedef struct sli4_xmit_bcast64_wqe_s {
+ sli4_bde_t sequence_payload;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t sequence_payload_length;
+ uint32_t rsvd4;
+ uint32_t :8,
+ df_ctl:8,
+ type:8,
+ r_ctl:8;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ temporary_rpi:16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t rsvd12;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t rsvd15;
+#else
+#error big endian version not defined
+#endif
+} sli4_xmit_bcast64_wqe_t;
+
+/**
+ * @brief WQE used to create a BLS response.
+ */
+typedef struct sli4_xmit_bls_rsp_wqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t payload_word0;
+ uint32_t rx_id:16,
+ ox_id:16;
+ uint32_t high_seq_cnt:16,
+ low_seq_cnt:16;
+ uint32_t rsvd3;
+ uint32_t local_n_port_id:24,
+ :8;
+ uint32_t remote_id:24,
+ :6,
+ ar:1,
+ xo:1;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ :16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t temporary_rpi:16,
+ :16;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t rsvd15;
+#else
+#error big endian version not defined
+#endif
+} sli4_xmit_bls_rsp_wqe_t;
+
+typedef enum {
+ SLI_BLS_ACC,
+ SLI_BLS_RJT,
+ SLI_BLS_MAX
+} sli_bls_type_e;
+
+typedef struct sli_bls_payload_s {
+ sli_bls_type_e type;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ union {
+ struct {
+ uint32_t seq_id_validity:8,
+ seq_id_last:8,
+ :16;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ uint16_t low_seq_cnt;
+ uint16_t high_seq_cnt;
+ } acc;
+ struct {
+ uint32_t vendor_unique:8,
+ reason_explanation:8,
+ reason_code:8,
+ :8;
+ } rjt;
+ } u;
+} sli_bls_payload_t;
+
+/**
+ * @brief WQE used to create an ELS response.
+ */
+typedef struct sli4_xmit_els_rsp64_wqe_s {
+ sli4_bde_t els_response_payload;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t els_response_payload_length;
+ uint32_t s_id:24,
+ sp:1,
+ :7;
+ uint32_t remote_id:24,
+ :8;
+ uint32_t xri_tag:16,
+ context_tag:16;
+ uint32_t :2,
+ ct:2,
+ :4,
+ command:8,
+ class:3,
+ :1,
+ pu:2,
+ :2,
+ timer:8;
+ uint32_t abort_tag;
+ uint32_t request_tag:16,
+ ox_id:16;
+ uint32_t ebde_cnt:4,
+ :3,
+ len_loc:2,
+ qosd:1,
+ :1,
+ xbl:1,
+ hlm:1,
+ iod:1,
+ dbde:1,
+ wqes:1,
+ pri:3,
+ pv:1,
+ eat:1,
+ xc:1,
+ :1,
+ ccpe:1,
+ ccp:8;
+ uint32_t cmd_type:4,
+ :3,
+ wqec:1,
+ :8,
+ cq_id:16;
+ uint32_t temporary_rpi:16,
+ :16;
+ uint32_t rsvd13;
+ uint32_t rsvd14;
+ uint32_t rsvd15;
+#else
+#error big endian version not defined
+#endif
+} sli4_xmit_els_rsp64_wqe_t;
+
+/**
+ * @brief Asynchronouse Event: Link State ACQE.
+ */
+typedef struct sli4_link_state_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t link_number:6,
+ link_type:2,
+ port_link_status:8,
+ port_duplex:8,
+ port_speed:8;
+ uint32_t port_fault:8,
+ :8,
+ logical_link_speed:16;
+ uint32_t event_tag;
+ uint32_t :8,
+ event_code:8,
+ event_type:8, /** values are protocol specific */
+ :6,
+ ae:1, /** async event - this is an ACQE */
+ val:1; /** valid - contents of CQE are valid */
+#else
+#error big endian version not defined
+#endif
+} sli4_link_state_t;
+
+
+#define SLI4_LINK_ATTN_TYPE_LINK_UP 0x01
+#define SLI4_LINK_ATTN_TYPE_LINK_DOWN 0x02
+#define SLI4_LINK_ATTN_TYPE_NO_HARD_ALPA 0x03
+
+#define SLI4_LINK_ATTN_P2P 0x01
+#define SLI4_LINK_ATTN_FC_AL 0x02
+#define SLI4_LINK_ATTN_INTERNAL_LOOPBACK 0x03
+#define SLI4_LINK_ATTN_SERDES_LOOPBACK 0x04
+
+#define SLI4_LINK_ATTN_1G 0x01
+#define SLI4_LINK_ATTN_2G 0x02
+#define SLI4_LINK_ATTN_4G 0x04
+#define SLI4_LINK_ATTN_8G 0x08
+#define SLI4_LINK_ATTN_10G 0x0a
+#define SLI4_LINK_ATTN_16G 0x10
+
+#define SLI4_LINK_TYPE_ETHERNET 0x0
+#define SLI4_LINK_TYPE_FC 0x1
+
+/**
+ * @brief Asynchronouse Event: FC Link Attention Event.
+ */
+typedef struct sli4_link_attention_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t link_number:8,
+ attn_type:8,
+ topology:8,
+ port_speed:8;
+ uint32_t port_fault:8,
+ shared_link_status:8,
+ logical_link_speed:16;
+ uint32_t event_tag;
+ uint32_t :8,
+ event_code:8,
+ event_type:8, /** values are protocol specific */
+ :6,
+ ae:1, /** async event - this is an ACQE */
+ val:1; /** valid - contents of CQE are valid */
+#else
+#error big endian version not defined
+#endif
+} sli4_link_attention_t;
+
+/**
+ * @brief FC/FCoE event types.
+ */
+#define SLI4_LINK_STATE_PHYSICAL 0x00
+#define SLI4_LINK_STATE_LOGICAL 0x01
+
+#define SLI4_FCOE_FIP_FCF_DISCOVERED 0x01
+#define SLI4_FCOE_FIP_FCF_TABLE_FULL 0x02
+#define SLI4_FCOE_FIP_FCF_DEAD 0x03
+#define SLI4_FCOE_FIP_FCF_CLEAR_VLINK 0x04
+#define SLI4_FCOE_FIP_FCF_MODIFIED 0x05
+
+#define SLI4_GRP5_QOS_SPEED 0x01
+
+#define SLI4_FC_EVENT_LINK_ATTENTION 0x01
+#define SLI4_FC_EVENT_SHARED_LINK_ATTENTION 0x02
+
+#define SLI4_PORT_SPEED_NO_LINK 0x0
+#define SLI4_PORT_SPEED_10_MBPS 0x1
+#define SLI4_PORT_SPEED_100_MBPS 0x2
+#define SLI4_PORT_SPEED_1_GBPS 0x3
+#define SLI4_PORT_SPEED_10_GBPS 0x4
+
+#define SLI4_PORT_DUPLEX_NONE 0x0
+#define SLI4_PORT_DUPLEX_HWF 0x1
+#define SLI4_PORT_DUPLEX_FULL 0x2
+
+#define SLI4_PORT_LINK_STATUS_PHYSICAL_DOWN 0x0
+#define SLI4_PORT_LINK_STATUS_PHYSICAL_UP 0x1
+#define SLI4_PORT_LINK_STATUS_LOGICAL_DOWN 0x2
+#define SLI4_PORT_LINK_STATUS_LOGICAL_UP 0x3
+
+/**
+ * @brief Asynchronouse Event: FCoE/FIP ACQE.
+ */
+typedef struct sli4_fcoe_fip_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t event_information;
+ uint32_t fcf_count:16,
+ fcoe_event_type:16;
+ uint32_t event_tag;
+ uint32_t :8,
+ event_code:8,
+ event_type:8, /** values are protocol specific */
+ :6,
+ ae:1, /** async event - this is an ACQE */
+ val:1; /** valid - contents of CQE are valid */
+#else
+#error big endian version not defined
+#endif
+} sli4_fcoe_fip_t;
+
+/**
+ * @brief FC/FCoE WQ completion queue entry.
+ */
+typedef struct sli4_fc_wcqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t hw_status:8,
+ status:8,
+ request_tag:16;
+ uint32_t wqe_specific_1;
+ uint32_t wqe_specific_2;
+ uint32_t :15,
+ qx:1,
+ code:8,
+ pri:3,
+ pv:1,
+ xb:1,
+ :2,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_wcqe_t;
+
+/**
+ * @brief FC/FCoE WQ consumed CQ queue entry.
+ */
+typedef struct sli4_fc_wqec_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :32;
+ uint32_t :32;
+ uint32_t wqe_index:16,
+ wq_id:16;
+ uint32_t :16,
+ code:8,
+ :7,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_wqec_t;
+
+/**
+ * @brief FC/FCoE Completion Status Codes.
+ */
+#define SLI4_FC_WCQE_STATUS_SUCCESS 0x00
+#define SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE 0x01
+#define SLI4_FC_WCQE_STATUS_REMOTE_STOP 0x02
+#define SLI4_FC_WCQE_STATUS_LOCAL_REJECT 0x03
+#define SLI4_FC_WCQE_STATUS_NPORT_RJT 0x04
+#define SLI4_FC_WCQE_STATUS_FABRIC_RJT 0x05
+#define SLI4_FC_WCQE_STATUS_NPORT_BSY 0x06
+#define SLI4_FC_WCQE_STATUS_FABRIC_BSY 0x07
+#define SLI4_FC_WCQE_STATUS_LS_RJT 0x09
+#define SLI4_FC_WCQE_STATUS_CMD_REJECT 0x0b
+#define SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK 0x0c
+#define SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED 0x11
+#define SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED 0x12
+#define SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC 0x13
+#define SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE 0x14
+#define SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE 0x15
+#define SLI4_FC_WCQE_STATUS_DI_ERROR 0x16
+#define SLI4_FC_WCQE_STATUS_BA_RJT 0x17
+#define SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED 0x18
+#define SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC 0x19
+#define SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT 0x1a
+#define SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST 0x1b
+
+/* driver generated status codes; better not overlap with chip's status codes! */
+#define SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT 0xff
+#define SLI4_FC_WCQE_STATUS_SHUTDOWN 0xfe
+#define SLI4_FC_WCQE_STATUS_DISPATCH_ERROR 0xfd
+
+/**
+ * @brief DI_ERROR Extended Status
+ */
+#define SLI4_FC_DI_ERROR_GE (1 << 0) /* Guard Error */
+#define SLI4_FC_DI_ERROR_AE (1 << 1) /* Application Tag Error */
+#define SLI4_FC_DI_ERROR_RE (1 << 2) /* Reference Tag Error */
+#define SLI4_FC_DI_ERROR_TDPV (1 << 3) /* Total Data Placed Valid */
+#define SLI4_FC_DI_ERROR_UDB (1 << 4) /* Uninitialized DIF Block */
+#define SLI4_FC_DI_ERROR_EDIR (1 << 5) /* Error direction */
+
+/**
+ * @brief Local Reject Reason Codes.
+ */
+#define SLI4_FC_LOCAL_REJECT_MISSING_CONTINUE 0x01
+#define SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT 0x02
+#define SLI4_FC_LOCAL_REJECT_INTERNAL_ERROR 0x03
+#define SLI4_FC_LOCAL_REJECT_INVALID_RPI 0x04
+#define SLI4_FC_LOCAL_REJECT_NO_XRI 0x05
+#define SLI4_FC_LOCAL_REJECT_ILLEGAL_COMMAND 0x06
+#define SLI4_FC_LOCAL_REJECT_XCHG_DROPPED 0x07
+#define SLI4_FC_LOCAL_REJECT_ILLEGAL_FIELD 0x08
+#define SLI4_FC_LOCAL_REJECT_NO_ABORT_MATCH 0x0c
+#define SLI4_FC_LOCAL_REJECT_TX_DMA_FAILED 0x0d
+#define SLI4_FC_LOCAL_REJECT_RX_DMA_FAILED 0x0e
+#define SLI4_FC_LOCAL_REJECT_ILLEGAL_FRAME 0x0f
+#define SLI4_FC_LOCAL_REJECT_NO_RESOURCES 0x11
+#define SLI4_FC_LOCAL_REJECT_FCP_CONF_FAILURE 0x12
+#define SLI4_FC_LOCAL_REJECT_ILLEGAL_LENGTH 0x13
+#define SLI4_FC_LOCAL_REJECT_UNSUPPORTED_FEATURE 0x14
+#define SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS 0x15
+#define SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED 0x16
+#define SLI4_FC_LOCAL_REJECT_RCV_BUFFER_TIMEOUT 0x17
+#define SLI4_FC_LOCAL_REJECT_LOOP_OPEN_FAILURE 0x18
+#define SLI4_FC_LOCAL_REJECT_LINK_DOWN 0x1a
+#define SLI4_FC_LOCAL_REJECT_CORRUPTED_DATA 0x1b
+#define SLI4_FC_LOCAL_REJECT_CORRUPTED_RPI 0x1c
+#define SLI4_FC_LOCAL_REJECT_OUT_OF_ORDER_DATA 0x1d
+#define SLI4_FC_LOCAL_REJECT_OUT_OF_ORDER_ACK 0x1e
+#define SLI4_FC_LOCAL_REJECT_DUP_FRAME 0x1f
+#define SLI4_FC_LOCAL_REJECT_LINK_CONTROL_FRAME 0x20
+#define SLI4_FC_LOCAL_REJECT_BAD_HOST_ADDRESS 0x21
+#define SLI4_FC_LOCAL_REJECT_MISSING_HDR_BUFFER 0x23
+#define SLI4_FC_LOCAL_REJECT_MSEQ_CHAIN_CORRUPTED 0x24
+#define SLI4_FC_LOCAL_REJECT_ABORTMULT_REQUESTED 0x25
+#define SLI4_FC_LOCAL_REJECT_BUFFER_SHORTAGE 0x28
+#define SLI4_FC_LOCAL_REJECT_RCV_XRIBUF_WAITING 0x29
+#define SLI4_FC_LOCAL_REJECT_INVALID_VPI 0x2e
+#define SLI4_FC_LOCAL_REJECT_MISSING_XRIBUF 0x30
+#define SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET 0x40
+#define SLI4_FC_LOCAL_REJECT_MISSING_RELOFFSET 0x41
+#define SLI4_FC_LOCAL_REJECT_INSUFF_BUFFERSPACE 0x42
+#define SLI4_FC_LOCAL_REJECT_MISSING_SI 0x43
+#define SLI4_FC_LOCAL_REJECT_MISSING_ES 0x44
+#define SLI4_FC_LOCAL_REJECT_INCOMPLETE_XFER 0x45
+#define SLI4_FC_LOCAL_REJECT_SLER_FAILURE 0x46
+#define SLI4_FC_LOCAL_REJECT_SLER_CMD_RCV_FAILURE 0x47
+#define SLI4_FC_LOCAL_REJECT_SLER_REC_RJT_ERR 0x48
+#define SLI4_FC_LOCAL_REJECT_SLER_REC_SRR_RETRY_ERR 0x49
+#define SLI4_FC_LOCAL_REJECT_SLER_SRR_RJT_ERR 0x4a
+#define SLI4_FC_LOCAL_REJECT_SLER_RRQ_RJT_ERR 0x4c
+#define SLI4_FC_LOCAL_REJECT_SLER_RRQ_RETRY_ERR 0x4d
+#define SLI4_FC_LOCAL_REJECT_SLER_ABTS_ERR 0x4e
+
+typedef struct sli4_fc_async_rcqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :8,
+ status:8,
+ rq_element_index:12,
+ :4;
+ uint32_t rsvd1;
+ uint32_t fcfi:6,
+ rq_id:10,
+ payload_data_placement_length:16;
+ uint32_t sof_byte:8,
+ eof_byte:8,
+ code:8,
+ header_data_placement_length:6,
+ :1,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_async_rcqe_t;
+
+typedef struct sli4_fc_async_rcqe_v1_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :8,
+ status:8,
+ rq_element_index:12,
+ :4;
+ uint32_t fcfi:6,
+ :26;
+ uint32_t rq_id:16,
+ payload_data_placement_length:16;
+ uint32_t sof_byte:8,
+ eof_byte:8,
+ code:8,
+ header_data_placement_length:6,
+ :1,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_async_rcqe_v1_t;
+
+#define SLI4_FC_ASYNC_RQ_SUCCESS 0x10
+#define SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED 0x11
+#define SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED 0x12
+#define SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC 0x13
+#define SLI4_FC_ASYNC_RQ_DMA_FAILURE 0x14
+
+typedef struct sli4_fc_coalescing_rcqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :8,
+ status:8,
+ rq_element_index:12,
+ :4;
+ uint32_t rsvd1;
+ uint32_t rq_id:16,
+ sequence_reporting_placement_length:16;
+ uint32_t :16,
+ code:8,
+ :7,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_coalescing_rcqe_t;
+
+#define SLI4_FC_COALESCE_RQ_SUCCESS 0x10
+#define SLI4_FC_COALESCE_RQ_INSUFF_XRI_NEEDED 0x18
+
+typedef struct sli4_fc_optimized_write_cmd_cqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :8,
+ status:8,
+ rq_element_index:15,
+ iv:1;
+ uint32_t fcfi:6,
+ :8,
+ oox:1,
+ agxr:1,
+ xri:16;
+ uint32_t rq_id:16,
+ payload_data_placement_length:16;
+ uint32_t rpi:16,
+ code:8,
+ header_data_placement_length:6,
+ :1,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_optimized_write_cmd_cqe_t;
+
+typedef struct sli4_fc_optimized_write_data_cqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t hw_status:8,
+ status:8,
+ xri:16;
+ uint32_t total_data_placed;
+ uint32_t extended_status;
+ uint32_t :16,
+ code:8,
+ pri:3,
+ pv:1,
+ xb:1,
+ rha:1,
+ :1,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_optimized_write_data_cqe_t;
+
+typedef struct sli4_fc_xri_aborted_cqe_s {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint32_t :8,
+ status:8,
+ :16;
+ uint32_t extended_status;
+ uint32_t xri:16,
+ remote_xid:16;
+ uint32_t :16,
+ code:8,
+ xr:1,
+ :3,
+ eo:1,
+ br:1,
+ ia:1,
+ vld:1;
+#else
+#error big endian version not defined
+#endif
+} sli4_fc_xri_aborted_cqe_t;
+
+/**
+ * Code definitions applicable to all FC/FCoE CQE types.
+ */
+#define SLI4_CQE_CODE_OFFSET 14
+
+#define SLI4_CQE_CODE_WORK_REQUEST_COMPLETION 0x01
+#define SLI4_CQE_CODE_RELEASE_WQE 0x02
+#define SLI4_CQE_CODE_RQ_ASYNC 0x04
+#define SLI4_CQE_CODE_XRI_ABORTED 0x05
+#define SLI4_CQE_CODE_RQ_COALESCING 0x06
+#define SLI4_CQE_CODE_RQ_CONSUMPTION 0x07
+#define SLI4_CQE_CODE_MEASUREMENT_REPORTING 0x08
+#define SLI4_CQE_CODE_RQ_ASYNC_V1 0x09
+#define SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD 0x0B
+#define SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA 0x0C
+
+extern int32_t sli_fc_process_link_state(sli4_t *, void *);
+extern int32_t sli_fc_process_link_attention(sli4_t *, void *);
+extern int32_t sli_fc_cqe_parse(sli4_t *, sli4_queue_t *, uint8_t *, sli4_qentry_e *, uint16_t *);
+extern uint32_t sli_fc_response_length(sli4_t *, uint8_t *);
+extern uint32_t sli_fc_io_length(sli4_t *, uint8_t *);
+extern int32_t sli_fc_els_did(sli4_t *, uint8_t *, uint32_t *);
+extern uint32_t sli_fc_ext_status(sli4_t *, uint8_t *);
+extern int32_t sli_fc_rqe_rqid_and_index(sli4_t *, uint8_t *, uint16_t *, uint32_t *);
+extern int32_t sli_fc_process_fcoe(sli4_t *, void *);
+extern int32_t sli_cmd_fcoe_wq_create(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t);
+extern int32_t sli_cmd_fcoe_wq_create_v1(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t);
+extern int32_t sli_cmd_fcoe_wq_destroy(sli4_t *, void *, size_t, uint16_t);
+extern int32_t sli_cmd_fcoe_post_sgl_pages(sli4_t *, void *, size_t, uint16_t, uint32_t, ocs_dma_t **, ocs_dma_t **,
+ocs_dma_t *);
+extern int32_t sli_cmd_fcoe_rq_create(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t);
+extern int32_t sli_cmd_fcoe_rq_create_v1(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t);
+extern int32_t sli_cmd_fcoe_rq_destroy(sli4_t *, void *, size_t, uint16_t);
+extern int32_t sli_cmd_fcoe_read_fcf_table(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t);
+extern int32_t sli_cmd_fcoe_post_hdr_templates(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, ocs_dma_t *);
+extern int32_t sli_cmd_fcoe_rediscover_fcf(sli4_t *, void *, size_t, uint16_t);
+extern int32_t sli_fc_rq_alloc(sli4_t *, sli4_queue_t *, uint32_t, uint32_t, sli4_queue_t *, uint16_t, uint8_t);
+extern int32_t sli_fc_rq_set_alloc(sli4_t *, uint32_t, sli4_queue_t *[], uint32_t, uint32_t, uint32_t, uint32_t, uint16_t);
+extern uint32_t sli_fc_get_rpi_requirements(sli4_t *, uint32_t);
+extern int32_t sli_abort_wqe(sli4_t *, void *, size_t, sli4_abort_type_e, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t);
+
+extern int32_t sli_els_request64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint8_t, uint32_t, uint32_t, uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *);
+extern int32_t sli_fcp_iread64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t);
+extern int32_t sli_fcp_iwrite64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t);
+extern int32_t sli_fcp_icmnd64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint8_t);
+
+extern int32_t sli_fcp_treceive64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint8_t, uint32_t);
+extern int32_t sli_fcp_trsp64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint32_t);
+extern int32_t sli_fcp_tsend64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint8_t, uint32_t);
+extern int32_t sli_fcp_cont_treceive64_wqe(sli4_t *, void*, size_t, ocs_dma_t *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint32_t, ocs_remote_node_t *, uint32_t, uint8_t, uint8_t, uint8_t, uint32_t);
+extern int32_t sli_gen_request64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint32_t,uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t);
+extern int32_t sli_send_frame_wqe(sli4_t *sli4, void *buf, size_t size, uint8_t sof, uint8_t eof, uint32_t *hdr,
+ ocs_dma_t *payload, uint32_t req_len, uint8_t timeout,
+ uint16_t xri, uint16_t req_tag);
+extern int32_t sli_xmit_sequence64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t);
+extern int32_t sli_xmit_bcast64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint8_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint8_t, uint8_t, uint8_t);
+extern int32_t sli_xmit_bls_rsp64_wqe(sli4_t *, void *, size_t, sli_bls_payload_t *, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint32_t);
+extern int32_t sli_xmit_els_rsp64_wqe(sli4_t *, void *, size_t, ocs_dma_t *, uint32_t, uint16_t, uint16_t, uint16_t, uint16_t, ocs_remote_node_t *, uint32_t, uint32_t);
+extern int32_t sli_requeue_xri_wqe(sli4_t *, void *, size_t, uint16_t, uint16_t, uint16_t);
+extern void sli4_cmd_lowlevel_set_watchdog(sli4_t *sli4, void *buf, size_t size, uint16_t timeout);
+
+/**
+ * @ingroup sli_fc
+ * @brief Retrieve the received header and payload length.
+ *
+ * @param sli4 SLI context.
+ * @param cqe Pointer to the CQ entry.
+ * @param len_hdr Pointer where the header length is written.
+ * @param len_data Pointer where the payload length is written.
+ *
+ * @return Returns 0 on success, or a non-zero value on failure.
+ */
+static inline int32_t
+sli_fc_rqe_length(sli4_t *sli4, void *cqe, uint32_t *len_hdr, uint32_t *len_data)
+{
+ sli4_fc_async_rcqe_t *rcqe = cqe;
+
+ *len_hdr = *len_data = 0;
+
+ if (SLI4_FC_ASYNC_RQ_SUCCESS == rcqe->status) {
+ *len_hdr = rcqe->header_data_placement_length;
+ *len_data = rcqe->payload_data_placement_length;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+/**
+ * @ingroup sli_fc
+ * @brief Retrieve the received FCFI.
+ *
+ * @param sli4 SLI context.
+ * @param cqe Pointer to the CQ entry.
+ *
+ * @return Returns the FCFI in the CQE. or UINT8_MAX if invalid CQE code.
+ */
+static inline uint8_t
+sli_fc_rqe_fcfi(sli4_t *sli4, void *cqe)
+{
+ uint8_t code = ((uint8_t*)cqe)[SLI4_CQE_CODE_OFFSET];
+ uint8_t fcfi = UINT8_MAX;
+
+ switch(code) {
+ case SLI4_CQE_CODE_RQ_ASYNC: {
+ sli4_fc_async_rcqe_t *rcqe = cqe;
+ fcfi = rcqe->fcfi;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1: {
+ sli4_fc_async_rcqe_v1_t *rcqev1 = cqe;
+ fcfi = rcqev1->fcfi;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: {
+ sli4_fc_optimized_write_cmd_cqe_t *opt_wr = cqe;
+ fcfi = opt_wr->fcfi;
+ break;
+ }
+ }
+
+ return fcfi;
+}
+
+extern const char *sli_fc_get_status_string(uint32_t status);
+
+#endif /* !_SLI4_H */
+
diff --git a/sys/dev/ocs_fc/version.h b/sys/dev/ocs_fc/version.h
new file mode 100644
index 000000000000..0f5d90d67b26
--- /dev/null
+++ b/sys/dev/ocs_fc/version.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2017 Broadcom. All rights reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define STR_BE_BRANCH "0"
+#define STR_BE_BUILD "9999"
+#define STR_BE_DOT "0"
+#define STR_BE_MINOR "0"
+#define STR_BE_MAJOR "0"
+
+#define BE_BRANCH 0
+#define BE_BUILD 9999
+#define BE_DOT 0
+#define BE_MINOR 0
+#define BE_MAJOR 0
+
+#define MGMT_BRANCH 0
+#define MGMT_BUILDNUM 476
+#define MGMT_MINOR 100
+#define MGMT_MAJOR 2
+
+#define BE_REDBOOT_VERSION "2.0.5.0"
+
+//start-auto
+#define BUILD_MONTH "6"
+#define BUILD_MONTH_NAME "June"
+#define BUILD_DAY "12"
+#define BUILD_YEAR "2009"
+#define BUILD_24HOUR "5"
+#define BUILD_12HOUR "5"
+#define BUILD_AM_PM "AM"
+#define BUILD_MIN "37"
+#define BUILD_SEC "17"
+#define BUILD_MONTH_NUMBER 6
+#define BUILD_DAY_NUMBER 12
+#define BUILD_YEAR_NUMBER 2009
+#define BUILD_24HOUR_NUMBER 5
+#define BUILD_12HOUR_NUMBER 5
+#define BUILD_MIN_NUMBER 37
+#define BUILD_SEC_NUMBER 17
+#undef MAJOR_BUILD
+#undef MINOR_BUILD
+#undef DOT_BUILD
+#undef NUMBERED_BUILD
+#undef BRANCH_BUILD
+//end-auto
+
+#define ELX_FCOE_XROM_BIOS_VER "7.03a1"
+#define ELX_FCoE_X86_VER "4.02a1"
+#define ELX_FCoE_EFI_VER "5.01a1"
+#define ELX_FCoE_FCODE_VER "4.01a0"
+#define ELX_PXE_BIOS_VER "3.00a5"
+#define ELX_UEFI_NIC_VER "2.10A10"
+#define ELX_UEFI_FCODE_VER "1.10A1"
+#define ELX_ISCSI_BIOS_VER "1.00A8"
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 2648443bf82b..28d3c7ebee82 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -296,6 +296,7 @@ SUBDIR= \
${_nvram} \
${_nxge} \
oce \
+ ${_ocs_fc} \
otus \
${_otusfw} \
ow \
@@ -609,6 +610,7 @@ _lio= lio
.endif
_nctgpio= nctgpio
_ndis= ndis
+_ocs_fc= ocs_fc
_pccard= pccard
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
_rdma= rdma
diff --git a/sys/modules/ocs_fc/Makefile b/sys/modules/ocs_fc/Makefile
new file mode 100644
index 000000000000..ffd730eb08c4
--- /dev/null
+++ b/sys/modules/ocs_fc/Makefile
@@ -0,0 +1,45 @@
+# $FreeBSD$
+
+.PATH: ${SRCTOP}/sys/dev/ocs_fc
+KMOD = ocs_fc
+
+SRCS = \
+ device_if.h \
+ bus_if.h \
+ pci_if.h \
+ opt_scsi.h \
+ opt_cam.h
+
+# OS
+SRCS += ocs_pci.c ocs_ioctl.c ocs_os.c ocs_utils.c
+
+# hw
+SRCS += ocs_hw.c ocs_hw_queues.c
+
+# SLI
+SRCS += sli4.c ocs_sm.c
+
+# Transport
+SRCS += \
+ ocs_device.c \
+ ocs_xport.c \
+ ocs_domain.c \
+ ocs_sport.c \
+ ocs_els.c \
+ ocs_fabric.c \
+ ocs_io.c \
+ ocs_node.c \
+ ocs_scsi.c \
+ ocs_unsol.c \
+ ocs_ddump.c \
+ ocs_mgmt.c
+
+
+# CAM initiator/target
+SRCS += ocs_cam.c
+
+CINCS = -I.
+
+CLEANFILES += ${PROG}.debug ${PROG}.symbols cscope.* .depend.*
+
+.include <bsd.kmod.mk>