diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/Config.in linux.gamma/drivers/scsi/Config.in
--- linux.15p3/drivers/scsi/Config.in	Mon Nov 12 11:59:25 2001
+++ linux.gamma/drivers/scsi/Config.in	Mon Nov 19 11:42:23 2001
@@ -50,6 +50,7 @@
 dep_tristate 'Adaptec AHA152X/2825 support' CONFIG_SCSI_AHA152X $CONFIG_SCSI
 dep_tristate 'Adaptec AHA1542 support' CONFIG_SCSI_AHA1542 $CONFIG_SCSI
 dep_tristate 'Adaptec AHA1740 support' CONFIG_SCSI_AHA1740 $CONFIG_SCSI
+dep_tristate 'Adaptec AACRAID support' CONFIG_SCSI_AACRAID $CONFIG_SCSI
 source drivers/scsi/aic7xxx/Config.in
 if [ "$CONFIG_SCSI_AIC7XXX" != "y" ]; then
    dep_tristate 'Old Adaptec AIC7xxx support' CONFIG_SCSI_AIC7XXX_OLD $CONFIG_SCSI
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/Makefile linux.gamma/drivers/scsi/Makefile
--- linux.15p3/drivers/scsi/Makefile	Mon Nov 12 11:59:25 2001
+++ linux.gamma/drivers/scsi/Makefile	Wed Nov 28 19:09:49 2001
@@ -24,11 +24,10 @@
 export-objs	:= scsi_syms.o 53c700.o
 mod-subdirs	:= pcmcia ../acorn/scsi
 
-
+subdir-$(CONFIG_SCSI_AACRAID)	+= aacraid
 subdir-$(CONFIG_SCSI_AIC7XXX)	+= aic7xxx
 subdir-$(CONFIG_PCMCIA)		+= pcmcia
 
-
 obj-$(CONFIG_SCSI)		+= scsi_mod.o
 
 obj-$(CONFIG_A4000T_SCSI)	+= amiga7xx.o	53c7xx.o
@@ -64,6 +63,9 @@
 obj-$(CONFIG_SCSI_AHA152X)	+= aha152x.o
 obj-$(CONFIG_SCSI_AHA1542)	+= aha1542.o
 obj-$(CONFIG_SCSI_AHA1740)	+= aha1740.o
+ifeq ($(CONFIG_SCSI_AACRAID),y)
+  obj-$(CONFIG_SCSI_AACRAID)	+= aacraid/aacraid.o
+endif
 ifeq ($(CONFIG_SCSI_AIC7XXX),y)
 obj-$(CONFIG_SCSI_AIC7XXX)	+= aic7xxx/aic7xxx_drv.o
 endif
@@ -203,3 +205,6 @@
 	mv script.h 53c700_d.h
 
 53c700.o: 53c700_d.h
+
+aacraid.o:
+	cd aacraid; make
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/Makefile linux.gamma/drivers/scsi/aacraid/Makefile
--- linux.15p3/drivers/scsi/aacraid/Makefile	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/Makefile	Thu Nov 29 20:14:24 2001
@@ -0,0 +1,15 @@
+list-multi := aacraid.o
+
+aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o dpcsup.o rx.o sap1sup.o
+
+EXTRA_CFLAGS += -I./include -I..
+
+aacraid.o: $(aacraid-objs)
+	$(LD) -r -o $@ $(aacraid-objs)
+
+obj-$(CONFIG_SCSI_AACRAID) += aacraid.o
+
+include $(TOPDIR)/Rules.make
+
+clean:
+	rm -f *.o
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/README linux.gamma/drivers/scsi/aacraid/README
--- linux.15p3/drivers/scsi/aacraid/README	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/README	Fri Nov 30 11:00:05 2001
@@ -0,0 +1,40 @@
+AACRAID Driver for Linux (take two)
+
+Introduction
+-------------------------
+The aacraid driver adds support for Adaptec (http://www.adaptec.com)
+OEM based RAID controllers. This is a major rewrite from the original 
+Adaptec supplied driver. It has signficantly cleaned up both the code
+and the running binary size (the module is less than half the size of
+the original).
+
+This isn't a finished project, but the structural work is now done.
+This driver is experimental.
+
+Supported Cards/Chipsets
+-------------------------
+	Dell Computer Corporation PERC 2 Quad Channel
+	Dell Computer Corporation PERC 2/Si
+	Dell Computer Corporation PERC 3/Si
+	Dell Computer Corporation PERC 3/Di
+	HP NetRAID-4M
+
+Probably Supported Devices
+-------------------------
+	Any and All Adaptec branded AAC964/5400 series raid controllers.
+
+People
+-------------------------
+Alan Cox <Alan@redhat.com>
+
+Original Driver
+-------------------------
+Adaptec Unix OEM Product Group
+
+Mailing List
+-------------------------
+None currently. Also note this is very different to Brian's original driver
+so don't expect him to support it.
+
+Original by Brian Boerner February 2001
+Rewritten by Alan Cox, November 2001
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/TODO linux.gamma/drivers/scsi/aacraid/TODO
--- linux.15p3/drivers/scsi/aacraid/TODO	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/TODO	Fri Nov 30 14:08:25 2001
@@ -0,0 +1,3 @@
+o	Finish auditing for 32/64 bit cleanness
+o	Use the PCI dma mapping API for the scsi sg entries handed to us
+o	Make the driver work big endian
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/aachba.c linux.gamma/drivers/scsi/aacraid/aachba.c
--- linux.15p3/drivers/scsi/aacraid/aachba.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/aachba.c	Fri Nov 30 13:57:06 2001
@@ -0,0 +1,1117 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+#define MAJOR_NR SCSI_DISK0_MAJOR	/* For DEVICE_NR() */
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "aacraid.h"
+
+/*	SCSI Commands */
+#define	SS_TEST			0x00	/* Test unit ready */
+#define SS_REZERO		0x01	/* Rezero unit */
+#define	SS_REQSEN		0x03	/* Request Sense */
+#define SS_REASGN		0x07	/* Reassign blocks */
+#define	SS_READ			0x08	/* Read 6   */
+#define	SS_WRITE		0x0A	/* Write 6  */
+#define	SS_INQUIR		0x12	/* inquiry */
+#define	SS_ST_SP		0x1B	/* Start/Stop unit */
+#define	SS_LOCK			0x1E	/* prevent/allow medium removal */
+#define SS_RESERV		0x16	/* Reserve */
+#define SS_RELES		0x17	/* Release */
+#define SS_MODESEN		0x1A	/* Mode Sense 6 */
+#define	SS_RDCAP		0x25	/* Read Capacity */
+#define	SM_READ			0x28	/* Read 10  */
+#define	SM_WRITE		0x2A	/* Write 10 */
+#define SS_SEEK			0x2B	/* Seek */
+
+/* values for inqd_pdt: Peripheral device type in plain English */
+#define	INQD_PDT_DA	0x00	/* Direct-access (DISK) device */
+#define	INQD_PDT_PROC	0x03	/* Processor device */
+#define	INQD_PDT_CHNGR	0x08	/* Changer (jukebox, scsi2) */
+#define	INQD_PDT_COMM	0x09	/* Communication device (scsi2) */
+#define	INQD_PDT_NOLUN2 0x1f	/* Unknown Device (scsi2) */
+#define	INQD_PDT_NOLUN	0x7f	/* Logical Unit Not Present */
+
+#define	INQD_PDT_DMASK	0x1F	/* Peripheral Device Type Mask */
+#define	INQD_PDT_QMASK	0xE0	/* Peripheral Device Qualifer Mask */
+
+#define	TARGET_LUN_TO_CONTAINER(target, lun)	(((lun) << 4) | target)
+#define CONTAINER_TO_TARGET(cont)		((cont) & 0xf)
+#define CONTAINER_TO_LUN(cont)			((cont) >> 4)
+
+#define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
+
+#define MAX_DRIVER_SG_SEGMENT_COUNT 17
+
+/*
+ *	Sense keys
+ */
+#define SENKEY_NO_SENSE      0x00	
+#define SENKEY_UNDEFINED     0x01	
+#define SENKEY_NOT_READY     0x02	
+#define SENKEY_MEDIUM_ERR    0x03	
+#define SENKEY_HW_ERR        0x04	
+#define SENKEY_ILLEGAL       0x05	
+#define SENKEY_ATTENTION     0x06	
+#define SENKEY_PROTECTED     0x07	
+#define SENKEY_BLANK         0x08	
+#define SENKEY_V_UNIQUE      0x09	
+#define SENKEY_CPY_ABORT     0x0A	
+#define SENKEY_ABORT         0x0B	
+#define SENKEY_EQUAL         0x0C	
+#define SENKEY_VOL_OVERFLOW  0x0D	
+#define SENKEY_MISCOMP       0x0E	
+#define SENKEY_RESERVED      0x0F	
+
+/*
+ *	Sense codes
+ */
+ 
+#define SENCODE_NO_SENSE                        0x00
+#define SENCODE_END_OF_DATA                     0x00
+#define SENCODE_BECOMING_READY                  0x04
+#define SENCODE_INIT_CMD_REQUIRED               0x04
+#define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
+#define SENCODE_INVALID_COMMAND                 0x20
+#define SENCODE_LBA_OUT_OF_RANGE                0x21
+#define SENCODE_INVALID_CDB_FIELD               0x24
+#define SENCODE_LUN_NOT_SUPPORTED               0x25
+#define SENCODE_INVALID_PARAM_FIELD             0x26
+#define SENCODE_PARAM_NOT_SUPPORTED             0x26
+#define SENCODE_PARAM_VALUE_INVALID             0x26
+#define SENCODE_RESET_OCCURRED                  0x29
+#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
+#define SENCODE_INQUIRY_DATA_CHANGED            0x3F
+#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
+#define SENCODE_DIAGNOSTIC_FAILURE              0x40
+#define SENCODE_INTERNAL_TARGET_FAILURE         0x44
+#define SENCODE_INVALID_MESSAGE_ERROR           0x49
+#define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
+#define SENCODE_OVERLAPPED_COMMAND              0x4E
+
+/*
+ *	Additional sense codes
+ */
+ 
+#define ASENCODE_NO_SENSE                       0x00
+#define ASENCODE_END_OF_DATA                    0x05
+#define ASENCODE_BECOMING_READY                 0x01
+#define ASENCODE_INIT_CMD_REQUIRED              0x02
+#define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
+#define ASENCODE_INVALID_COMMAND                0x00
+#define ASENCODE_LBA_OUT_OF_RANGE               0x00
+#define ASENCODE_INVALID_CDB_FIELD              0x00
+#define ASENCODE_LUN_NOT_SUPPORTED              0x00
+#define ASENCODE_INVALID_PARAM_FIELD            0x00
+#define ASENCODE_PARAM_NOT_SUPPORTED            0x01
+#define ASENCODE_PARAM_VALUE_INVALID            0x02
+#define ASENCODE_RESET_OCCURRED                 0x00
+#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
+#define ASENCODE_INQUIRY_DATA_CHANGED           0x03
+#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
+#define ASENCODE_DIAGNOSTIC_FAILURE             0x80
+#define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
+#define ASENCODE_INVALID_MESSAGE_ERROR          0x00
+#define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
+#define ASENCODE_OVERLAPPED_COMMAND             0x00
+
+#define BYTE0(x) (unsigned char)(x)
+#define BYTE1(x) (unsigned char)((x) >> 8)
+#define BYTE2(x) (unsigned char)((x) >> 16)
+#define BYTE3(x) (unsigned char)((x) >> 24)
+
+/*------------------------------------------------------------------------------
+ *              S T R U C T S / T Y P E D E F S
+ *----------------------------------------------------------------------------*/
+/* SCSI inquiry data */
+struct inquiry_data {
+	u8 inqd_pdt;	/* Peripheral qualifier | Peripheral Device Type  */
+	u8 inqd_dtq;	/* RMB | Device Type Qualifier  */
+	u8 inqd_ver;	/* ISO version | ECMA version | ANSI-approved version */
+	u8 inqd_rdf;	/* AENC | TrmIOP | Response data format */
+	u8 inqd_len;	/* Additional length (n-4) */
+	u8 inqd_pad1[2];/* Reserved - must be zero */
+	u8 inqd_pad2;	/* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+	u8 inqd_vid[8];	/* Vendor ID */
+	u8 inqd_pid[16];/* Product ID */
+	u8 inqd_prl[4];	/* Product Revision Level */
+};
+
+struct sense_data {
+	u8 error_code;		/* 70h (current errors), 71h(deferred errors) */
+	u8 valid:1;		/* A valid bit of one indicates that the information  */
+	/* field contains valid information as defined in the
+	 * SCSI-2 Standard.
+	 */
+	u8 segment_number;	/* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
+	u8 sense_key:4;		/* Sense Key */
+	u8 reserved:1;
+	u8 ILI:1;		/* Incorrect Length Indicator */
+	u8 EOM:1;		/* End Of Medium - reserved for random access devices */
+	u8 filemark:1;		/* Filemark - reserved for random access devices */
+
+	u8 information[4];	/* for direct-access devices, contains the unsigned 
+				 * logical block address or residue associated with 
+				 * the sense key 
+				 */
+	u8 add_sense_len;	/* number of additional sense bytes to follow this field */
+	u8 cmnd_info[4];	/* not used */
+	u8 ASC;			/* Additional Sense Code */
+	u8 ASCQ;		/* Additional Sense Code Qualifier */
+	u8 FRUC;		/* Field Replaceable Unit Code - not used */
+	u8 bit_ptr:3;		/* indicates which byte of the CDB or parameter data
+				 * was in error
+				 */
+	u8 BPV:1;		/* bit pointer valid (BPV): 1- indicates that 
+				 * the bit_ptr field has valid value
+				 */
+	u8 reserved2:2;
+	u8 CD:1;		/* command data bit: 1- illegal parameter in CDB.
+				 * 0- illegal parameter in data.
+				 */
+	u8 SKSV:1;
+	u8 field_ptr[2];	/* byte of the CDB or parameter data in error */
+};
+
+/*
+ *              M O D U L E   G L O B A L S
+ */
+ 
+static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS];	/*  SCSI Device Instance Pointers */
+static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
+static void get_sd_devname(int disknum, char *buffer);
+
+/**
+ *	aac_get_containers	-	list containers
+ *	@common: adapter to probe
+ *
+ *	Make a list of all containers on this controller
+ */
+int aac_get_containers(struct aac_dev *dev)
+{
+	struct fsa_scsi_hba *fsa_dev_ptr;
+	int index, status;
+	struct aac_query_mount *dinfo;
+	struct aac_mount *dresp;
+	struct fib * fibptr;
+	unsigned instance;
+
+	fsa_dev_ptr = &(dev->fsa_dev);
+	instance = dev->scsi_host_ptr->unique_id;
+
+	if (!(fibptr = fib_alloc(dev)))
+		return -ENOMEM;
+
+	for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
+		fib_init(fibptr);
+		dinfo = (struct aac_query_mount *) fib_data(fibptr);
+
+		dinfo->command = VM_NameServe;
+		dinfo->count = index;
+		dinfo->type = FT_FILESYS;
+
+		status = fib_send(ContainerCommand,
+				    fibptr,
+				    sizeof (struct aac_query_mount),
+				    FsaNormal,
+				    1, 1,
+				    NULL, NULL);
+		if (status < 0 ) {
+			printk(KERN_WARNING "ProbeContainers: SendFIB failed.\n");
+			break;
+		}
+		dresp = (struct aac_mount *)fib_data(fibptr);
+
+		if ((dresp->status == ST_OK) &&
+		    (dresp->mnt[0].vol != CT_NONE)) {
+			fsa_dev_ptr->valid[index] = 1;
+			fsa_dev_ptr->type[index] = dresp->mnt[0].vol;
+			fsa_dev_ptr->size[index] = dresp->mnt[0].capacity;
+			if (dresp->mnt[0].state & FSCS_READONLY)
+				    fsa_dev_ptr->ro[index] = 1;
+		}
+		fib_complete(fibptr);
+		/*
+		 *	If there are no more containers, then stop asking.
+		 */
+		if ((index + 1) >= dresp->count)
+			break;
+	}
+	fib_free(fibptr);
+	fsa_dev[instance] = fsa_dev_ptr;
+	return status;
+}
+
+/**
+ *	probe_container		-	query a logical volume
+ *	@dev: device to query
+ *	@cid: container identifier
+ *
+ *	Queries the controller about the given volume. The volume information
+ *	is updated in the struct fsa_scsi_hba structure rather than returned.
+ */
+ 
+static int probe_container(struct aac_dev *dev, int cid)
+{
+	struct fsa_scsi_hba *fsa_dev_ptr;
+	int status;
+	struct aac_query_mount *dinfo;
+	struct aac_mount *dresp;
+	struct fib * fibptr;
+	unsigned instance;
+
+	fsa_dev_ptr = &(dev->fsa_dev);
+	instance = dev->scsi_host_ptr->unique_id;
+
+	if (!(fibptr = fib_alloc(dev)))
+		return -ENOMEM;
+
+	fib_init(fibptr);
+
+	dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+	dinfo->command = VM_NameServe;
+	dinfo->count = cid;
+	dinfo->type = FT_FILESYS;
+
+	status = fib_send(ContainerCommand,
+			    fibptr,
+			    sizeof(struct aac_query_mount),
+			    FsaNormal,
+			    1, 1,
+			    NULL, NULL);
+	if (status < 0) {
+		printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
+		goto error;
+	}
+
+	dresp = (struct aac_mount *) fib_data(fibptr);
+
+	if ((dresp->status == ST_OK) &&
+	    (dresp->mnt[0].vol != CT_NONE)) {
+		fsa_dev_ptr->valid[cid] = 1;
+		fsa_dev_ptr->type[cid] = dresp->mnt[0].vol;
+		fsa_dev_ptr->size[cid] = dresp->mnt[0].capacity;
+		if (dresp->mnt[0].state & FSCS_READONLY)
+			fsa_dev_ptr->ro[cid] = 1;
+	}
+
+error:
+	fib_complete(fibptr);
+	fib_free(fibptr);
+
+	return status;
+}
+
+/* Local Structure to set SCSI inquiry data strings */
+struct scsi_inq {
+	char vid[8];         /* Vendor ID */
+	char pid[16];        /* Product ID */
+	char prl[4];         /* Product Revision Level */
+};
+
+/**
+ *	InqStrCopy	-	string merge
+ *	@a:	string to copy from
+ *	@b:	string to copy to
+ *
+ * 	Copy a String from one location to another
+ *	without copying \0
+ */
+
+static void inqstrcpy(char *a, char *b)
+{
+
+	while(*a != (char)0) 
+		*b++ = *a++;
+}
+
+static char *container_types[] = {
+        "None",
+        "Volume",
+        "Mirror",
+        "Stripe",
+        "RAID5",
+        "SSRW",
+        "SSRO",
+        "Morph",
+        "Legacy",
+        "RAID4",
+        "RAID10",             
+        "RAID00",             
+        "V-MIRRORS",          
+        "PSEUDO R4",          
+	"RAID50",
+        "Unknown"
+};
+
+
+
+/* Function: setinqstr
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI inquiry data strings for vendor, product
+ * and revision level. Allows strings to be set in platform dependant
+ * files instead of in OS dependant driver source.
+ */
+
+static void setinqstr(int devtype, void *data, int tindex)
+{
+	struct scsi_inq *str;
+	char *findit;
+	struct aac_driver_ident *mp;
+	extern struct aac_driver_ident aac_drivers[];	/* HACK FIXME */
+
+	mp = &aac_drivers[devtype];
+   
+	str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
+
+	inqstrcpy (mp->vname, str->vid); 
+	inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
+
+	findit = str->pid;
+
+	for ( ; *findit != ' '; findit++); /* walk till we find a space then incr by 1 */
+		findit++;
+	
+	if (tindex < (sizeof(container_types)/sizeof(char *))){
+		inqstrcpy (container_types[tindex], findit);
+	}
+	inqstrcpy ("0001", str->prl);
+}
+
+void set_sense(char *sense_buf, u8 sense_key, u8 sense_code,
+		    u8 a_sense_code, u8 incorrect_length,
+		    u8 bit_pointer, unsigned field_pointer,
+		    unsigned long residue)
+{
+	sense_buf[0] = 0xF0;	/* Sense data valid, err code 70h (current error) */
+	sense_buf[1] = 0;	/* Segment number, always zero */
+
+	if (incorrect_length) {
+		sense_buf[2] = sense_key | 0x20;	/* Set the ILI bit | sense key */
+		sense_buf[3] = BYTE3(residue);
+		sense_buf[4] = BYTE2(residue);
+		sense_buf[5] = BYTE1(residue);
+		sense_buf[6] = BYTE0(residue);
+	} else
+		sense_buf[2] = sense_key;	/* Sense key */
+
+	if (sense_key == SENKEY_ILLEGAL)
+		sense_buf[7] = 10;	/* Additional sense length */
+	else
+		sense_buf[7] = 6;	/* Additional sense length */
+
+	sense_buf[12] = sense_code;	/* Additional sense code */
+	sense_buf[13] = a_sense_code;	/* Additional sense code qualifier */
+	if (sense_key == SENKEY_ILLEGAL) {
+		sense_buf[15] = 0;
+
+		if (sense_code == SENCODE_INVALID_PARAM_FIELD)
+			sense_buf[15] = 0x80;	/* Std sense key specific field */
+		/* Illegal parameter is in the parameter block */
+
+		if (sense_code == SENCODE_INVALID_CDB_FIELD)
+			sense_buf[15] = 0xc0;	/* Std sense key specific field */
+		/* Illegal parameter is in the CDB block */
+		sense_buf[15] |= bit_pointer;
+		sense_buf[16] = field_pointer >> 8;	/* MSB */
+		sense_buf[17] = field_pointer;		/* LSB */
+	}
+}
+
+static void aac_io_done(Scsi_Cmnd * scsi_cmnd_ptr)
+{
+	unsigned long cpu_flags;
+	spin_lock_irqsave(&io_request_lock, cpu_flags);
+	scsi_cmnd_ptr->scsi_done(scsi_cmnd_ptr);
+	spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+}
+
+static void __aac_io_done(Scsi_Cmnd * scsi_cmnd_ptr)
+{
+	scsi_cmnd_ptr->scsi_done(scsi_cmnd_ptr);
+}
+
+static void read_callback(void *context, struct fib * fibptr)
+{
+	struct aac_dev *dev;
+	struct aac_read_reply *readreply;
+	Scsi_Cmnd *scsi_cmnd_ptr;
+	unsigned long lba;
+	int cid;
+
+	scsi_cmnd_ptr = (Scsi_Cmnd *) context;
+
+	dev = (struct aac_dev *)scsi_cmnd_ptr->host->hostdata;
+	cid =TARGET_LUN_TO_CONTAINER(scsi_cmnd_ptr->target, scsi_cmnd_ptr->lun);
+
+	lba = ((scsi_cmnd_ptr->cmnd[1] & 0x1F) << 16) | (scsi_cmnd_ptr->cmnd[2] << 8) | scsi_cmnd_ptr->cmnd[3];
+	dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %ld, t = %ld.\n", smp_processor_id(), lba, jiffies));
+
+	if (fibptr == NULL)
+		BUG();
+	readreply = (struct aac_read_reply *) fib_data(fibptr);
+	if (readreply->status == ST_OK)
+		scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+	else {
+		printk(KERN_WARNING "read_callback: read failed, status = %d\n", readreply->status);
+		scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+		set_sense((char *) &sense_data[cid],
+				    SENKEY_HW_ERR,
+				    SENCODE_INTERNAL_TARGET_FAILURE,
+				    ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+				    0, 0);
+	}
+	fib_complete(fibptr);
+	fib_free(fibptr);
+
+	aac_io_done(scsi_cmnd_ptr);
+}
+
+static void write_callback(void *context, struct fib * fibptr)
+{
+	struct aac_dev *dev;
+	struct aac_write_reply *writereply;
+	Scsi_Cmnd *scsi_cmnd_ptr;
+	unsigned long lba;
+	int cid;
+
+	scsi_cmnd_ptr = (Scsi_Cmnd *) context;
+	dev = (struct aac_dev *)scsi_cmnd_ptr->host->hostdata;
+	cid = TARGET_LUN_TO_CONTAINER(scsi_cmnd_ptr->target, scsi_cmnd_ptr->lun);
+
+	lba = ((scsi_cmnd_ptr->cmnd[1] & 0x1F) << 16) | (scsi_cmnd_ptr->cmnd[2] << 8) | scsi_cmnd_ptr->cmnd[3];
+	dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %ld, t = %ld.\n", smp_processor_id(), lba, jiffies));
+	if (fibptr == NULL)
+		BUG();
+
+	writereply = (struct aac_write_reply *) fib_data(fibptr);
+	if (writereply->status == ST_OK)
+		scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+	else {
+		printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
+		scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+		set_sense((char *) &sense_data[cid],
+				    SENKEY_HW_ERR,
+				    SENCODE_INTERNAL_TARGET_FAILURE,
+				    ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+				    0, 0);
+	}
+
+	fib_complete(fibptr);
+	fib_free(fibptr);
+	aac_io_done(scsi_cmnd_ptr);
+}
+
+int aac_read(Scsi_Cmnd * scsi_cmnd_ptr, int cid)
+{
+	unsigned long lba;
+	unsigned long count;
+	unsigned long byte_count;
+	int status;
+
+	struct aac_read *readcmd;
+	u16 fibsize;
+	struct aac_dev *dev;
+	struct fib * cmd_fibcontext;
+
+	dev = (struct aac_dev *)scsi_cmnd_ptr->host->hostdata;
+	/*
+	 *	Get block address and transfer length
+	 */
+	if (scsi_cmnd_ptr->cmnd[0] == SS_READ)	/* 6 byte command */
+	{
+		dprintk((KERN_DEBUG "aachba: received a read(6) command on target %d.\n", cid));
+
+		lba = ((scsi_cmnd_ptr->cmnd[1] & 0x1F) << 16) | (scsi_cmnd_ptr->cmnd[2] << 8) | scsi_cmnd_ptr->cmnd[3];
+		count = scsi_cmnd_ptr->cmnd[4];
+
+		if (count == 0)
+			count = 256;
+	} else {
+		dprintk((KERN_DEBUG "aachba: received a read(10) command on target %d.\n", cid));
+
+		lba = (scsi_cmnd_ptr->cmnd[2] << 24) | (scsi_cmnd_ptr->cmnd[3] << 16) | (scsi_cmnd_ptr->cmnd[4] << 8) | scsi_cmnd_ptr->cmnd[5];
+		count = (scsi_cmnd_ptr->cmnd[7] << 8) | scsi_cmnd_ptr->cmnd[8];
+	}
+	dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %lu, t = %ld.\n", smp_processor_id(), lba, jiffies));
+	/*
+	 *	Alocate and initialize a Fib
+	 */
+	if (!(cmd_fibcontext = fib_alloc(dev))) {
+		scsi_cmnd_ptr->result = DID_ERROR << 16;
+		aac_io_done(scsi_cmnd_ptr);
+		return (-1);
+	}
+
+	fib_init(cmd_fibcontext);
+
+	readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
+	readcmd->command = VM_CtBlockRead;
+	readcmd->cid = cid;
+	readcmd->block = lba;
+	readcmd->count = count * 512;
+	readcmd->sg.count = 1;
+
+	if (readcmd->count > (64 * 1024))
+		BUG();
+	/*
+	 *	Build Scatter/Gather list
+	 */
+	if (scsi_cmnd_ptr->use_sg)	/* use scatter/gather list */
+	{
+		struct scatterlist *scatterlist_ptr;
+		int segment;
+
+		scatterlist_ptr = (struct scatterlist *) scsi_cmnd_ptr->request_buffer;
+		byte_count = 0;
+		for (segment = 0; segment < scsi_cmnd_ptr->use_sg; segment++) {
+			readcmd->sg.sg[segment].addr = (u32)virt_to_bus(scatterlist_ptr[segment].address);
+			readcmd->sg.sg[segment].count = scatterlist_ptr[segment].length;
+			byte_count += scatterlist_ptr[segment].length;
+
+			if (readcmd->sg.sg[segment].count > (64 * 1024))
+				BUG();
+		}
+		readcmd->sg.count = scsi_cmnd_ptr->use_sg;
+
+		if (readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT) {
+			BUG();
+		}
+	}
+	else			/* One piece of contiguous phys mem */
+	{
+		readcmd->sg.sg[0].addr = (u32) virt_to_bus(scsi_cmnd_ptr->request_buffer);
+		readcmd->sg.sg[0].count = scsi_cmnd_ptr->request_bufflen;
+
+		byte_count = scsi_cmnd_ptr->request_bufflen;
+
+		if (readcmd->sg.sg[0].count > (64 * 1024))
+			BUG();
+	}
+	if (byte_count != readcmd->count)
+		BUG();
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
+	status = fib_send(ContainerCommand, 
+			  cmd_fibcontext, 
+			  fibsize, 
+			  FsaNormal, 
+			  0, 1, 
+			  (fib_callback) read_callback, 
+			  (void *) scsi_cmnd_ptr);
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS) 
+		return 0;
+		
+	printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
+	/*
+	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
+	 */
+	scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
+	aac_io_done(scsi_cmnd_ptr);
+	fib_complete(cmd_fibcontext);
+	fib_free(cmd_fibcontext);
+	return -1;
+}
+
+static int aac_write(Scsi_Cmnd * scsi_cmnd_ptr, int cid)
+{
+	unsigned long lba;
+	unsigned long count;
+	unsigned long byte_count;
+	int status;
+	struct aac_write *writecmd;
+	u16 fibsize;
+	struct aac_dev *dev;
+	struct fib * cmd_fibcontext;
+
+	dev = (struct aac_dev *)scsi_cmnd_ptr->host->hostdata;
+	/*
+	 *	Get block address and transfer length
+	 */
+	if (scsi_cmnd_ptr->cmnd[0] == SS_WRITE)	/* 6 byte command */
+	{
+		lba = ((scsi_cmnd_ptr->cmnd[1] & 0x1F) << 16) | (scsi_cmnd_ptr->cmnd[2] << 8) | scsi_cmnd_ptr->cmnd[3];
+		count = scsi_cmnd_ptr->cmnd[4];
+		if (count == 0)
+			count = 256;
+	} else {
+		dprintk((KERN_DEBUG "aachba: received a write(10) command on target %d.\n", cid));
+		lba = (scsi_cmnd_ptr->cmnd[2] << 24) | (scsi_cmnd_ptr->cmnd[3] << 16) | (scsi_cmnd_ptr->cmnd[4] << 8) | scsi_cmnd_ptr->cmnd[5];
+		count = (scsi_cmnd_ptr->cmnd[7] << 8) | scsi_cmnd_ptr->cmnd[8];
+	}
+	dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %lu, t = %ld.\n", smp_processor_id(), lba, jiffies));
+	/*
+	 *	Allocate and initialize a Fib then setup a BlockWrite command
+	 */
+	if (!(cmd_fibcontext = fib_alloc(dev))) {
+		scsi_cmnd_ptr->result = DID_ERROR << 16;
+		aac_io_done(scsi_cmnd_ptr);
+		return -1;
+	}
+	fib_init(cmd_fibcontext);
+
+	writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
+	writecmd->command = VM_CtBlockWrite;
+	writecmd->cid = cid;
+	writecmd->block = lba;
+	writecmd->count = count * 512;
+	writecmd->sg.count = 1;
+	/* FIXME: why isnt ->stable setup */
+
+	if (writecmd->count > (64 * 1024)) {
+		BUG();
+	}
+	/*
+	 * Build Scatter/Gather list
+	 */
+	if (scsi_cmnd_ptr->use_sg)
+	{
+		struct scatterlist *scatterlist_ptr;
+		int segment;
+
+		scatterlist_ptr = (struct scatterlist *) scsi_cmnd_ptr->request_buffer;
+		byte_count = 0;
+		for (segment = 0; segment < scsi_cmnd_ptr->use_sg; segment++) {
+			writecmd->sg.sg[segment].addr = (u32) virt_to_bus(scatterlist_ptr[segment].address);
+			writecmd->sg.sg[segment].count = scatterlist_ptr[segment].length;
+			byte_count += scatterlist_ptr[segment].length;
+
+			if (writecmd->sg.sg[segment].count > (64 * 1024)) {
+				printk(KERN_WARNING "aac_write: Segment byte count is larger than 64K.\n");
+				BUG();
+			}
+		}
+		writecmd->sg.count = scsi_cmnd_ptr->use_sg;
+
+		if (writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT) {
+			BUG();
+		}
+	}
+	else
+	{
+		writecmd->sg.sg[0].addr = (u32) virt_to_bus(scsi_cmnd_ptr->request_buffer);
+		writecmd->sg.sg[0].count =  scsi_cmnd_ptr->request_bufflen;
+		byte_count = scsi_cmnd_ptr->request_bufflen;
+
+		if (writecmd->sg.sg[0].count > (64 * 1024)) {
+			BUG();
+		}
+	}
+	if (byte_count != writecmd->count)
+		BUG();
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	fibsize = sizeof (struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
+
+	status = fib_send(ContainerCommand, 
+			  cmd_fibcontext,
+			  fibsize, FsaNormal,
+			  0, 1,
+			  (fib_callback) write_callback,
+			  (void *) scsi_cmnd_ptr);
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
+	/*
+	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
+	 */
+	scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
+	aac_io_done(scsi_cmnd_ptr);
+
+	fib_complete(cmd_fibcontext);
+	fib_free(cmd_fibcontext);
+	return -1;
+}
+
+
+/**
+ *	aac_scsi_cmd()		-	Process SCSI command
+ *	@scsi_cmnd_ptr:		SCSI command block
+ *	@wait:			1 if the user wants to await completion
+ *
+ *	Emulate a SCSI command and queue the required request for the
+ *	aacraid firmware.
+ */
+ 
+int aac_scsi_cmd(Scsi_Cmnd * scsi_cmnd_ptr)
+{
+	int cid = 0;
+	struct fsa_scsi_hba *fsa_dev_ptr;
+	int cardtype;
+	int ret;
+	struct aac_dev *dev = (struct aac_dev *)scsi_cmnd_ptr->host->hostdata;
+	
+	cardtype = dev->cardtype;
+
+	fsa_dev_ptr = fsa_dev[scsi_cmnd_ptr->host->unique_id];
+
+	/*
+	 *	If the bus, target or lun is out of range, return fail
+	 *	Test does not apply to ID 16, the pseudo id for the controller
+	 *	itself.
+	 */
+	if (scsi_cmnd_ptr->target != scsi_cmnd_ptr->host->this_id) {
+		if ((scsi_cmnd_ptr->channel > 0) ||(scsi_cmnd_ptr->target > 15) || (scsi_cmnd_ptr->lun > 7)) 
+		{
+			dprintk((KERN_DEBUG "The bus, target or lun is out of range = %d, %d, %d.\n", 
+				scsi_cmnd_ptr->channel, scsi_cmnd_ptr->target, scsi_cmnd_ptr->lun));
+			scsi_cmnd_ptr->result = DID_BAD_TARGET << 16;
+			__aac_io_done(scsi_cmnd_ptr);
+			return -1;
+		}
+		cid = TARGET_LUN_TO_CONTAINER(scsi_cmnd_ptr->target, scsi_cmnd_ptr->lun);
+		/*
+		 *	If the target container doesn't exist, it may have
+		 *	been newly created
+		 */
+		if (fsa_dev_ptr->valid[cid] == 0) {
+			switch (scsi_cmnd_ptr->cmnd[0]) {
+			case SS_INQUIR:
+			case SS_RDCAP:
+			case SS_TEST:
+				spin_unlock_irq(&io_request_lock);
+				probe_container(dev, cid);
+				spin_lock_irq(&io_request_lock);
+			default:
+				break;
+			}
+		}
+		/*
+		 *	If the target container still doesn't exist, 
+		 *	return failure
+		 */
+		if (fsa_dev_ptr->valid[cid] == 0) {
+			scsi_cmnd_ptr->result = DID_BAD_TARGET << 16;
+			__aac_io_done(scsi_cmnd_ptr);
+			return -1;
+		}
+	} 
+	else if ((scsi_cmnd_ptr->cmnd[0] != SS_INQUIR) &&	/* only INQUIRY & TUR cmnd supported for controller */
+	    (scsi_cmnd_ptr->cmnd[0] != SS_TEST)) 
+	{
+		/*
+		 *	Command aimed at the controller
+		 */
+		dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsi_cmnd_ptr->cmnd[0]));
+		scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+		set_sense((char *) &sense_data[cid],
+			    SENKEY_ILLEGAL,
+			    SENCODE_INVALID_COMMAND,
+			    ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+		__aac_io_done(scsi_cmnd_ptr);
+		return -1;
+	}
+	/* Handle commands here that don't really require going out to the adapter */
+	switch (scsi_cmnd_ptr->cmnd[0]) 
+	{
+		case SS_INQUIR:
+		{
+			struct inquiry_data *inq_data_ptr;
+
+			dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsi_cmnd_ptr->target));
+			inq_data_ptr = (struct inquiry_data *)scsi_cmnd_ptr->request_buffer;
+			memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
+
+			inq_data_ptr->inqd_ver = 2;	/* claim compliance to SCSI-2 */
+			inq_data_ptr->inqd_dtq = 0x80;	/* set RMB bit to one indicating that the medium is removable */
+			inq_data_ptr->inqd_rdf = 2;	/* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
+			inq_data_ptr->inqd_len = 31;
+
+			/*
+			 *	Set the Vendor, Product, and Revision Level
+			 *	see: <vendor>.c i.e. aac.c
+			 */
+			setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
+			if (scsi_cmnd_ptr->target == scsi_cmnd_ptr->host->this_id)
+			    	inq_data_ptr->inqd_pdt = INQD_PDT_PROC;	/* Processor device */
+			else
+				inq_data_ptr->inqd_pdt = INQD_PDT_DA;	/* Direct/random access device */
+			scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+			__aac_io_done(scsi_cmnd_ptr);
+			return 0;
+		}
+		case SS_RDCAP:
+		{
+			int capacity;
+			char *cp;
+
+			dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
+			capacity = fsa_dev_ptr->size[cid] - 1;
+			cp = scsi_cmnd_ptr->request_buffer;
+			cp[0] = (capacity >> 24) & 0xff;
+			cp[1] = (capacity >> 16) & 0xff;
+			cp[2] = (capacity >> 8) & 0xff;
+			cp[3] = (capacity >> 0) & 0xff;
+			cp[4] = 0;
+			cp[5] = 0;
+			cp[6] = 2;
+			cp[7] = 0;
+
+			scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+			__aac_io_done(scsi_cmnd_ptr);
+
+			return 0;
+		}
+
+		case SS_MODESEN:
+		{
+			char *mode_buf;
+
+			dprintk((KERN_DEBUG "MODE SENSE command.\n"));
+			mode_buf = scsi_cmnd_ptr->request_buffer;
+			mode_buf[0] = 0;	/* Mode data length (MSB) */
+			mode_buf[1] = 6;	/* Mode data length (LSB) */
+			mode_buf[2] = 0;	/* Medium type - default */
+			mode_buf[3] = 0;	/* Device-specific param, bit 8: 0/1 = write enabled/protected */
+			mode_buf[4] = 0;	/* reserved */
+			mode_buf[5] = 0;	/* reserved */
+			mode_buf[6] = 0;	/* Block descriptor length (MSB) */
+			mode_buf[7] = 0;	/* Block descriptor length (LSB) */
+
+			scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+			__aac_io_done(scsi_cmnd_ptr);
+
+			return 0;
+		}
+		case SS_REQSEN:
+			dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
+			memcpy(scsi_cmnd_ptr->sense_buffer, &sense_data[cid], sizeof (struct sense_data));
+			memset(&sense_data[cid], 0, sizeof (struct sense_data));
+			scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+			__aac_io_done(scsi_cmnd_ptr);
+			return (0);
+
+		case SS_LOCK:
+			dprintk((KERN_DEBUG "LOCK command.\n"));
+			if (scsi_cmnd_ptr->cmnd[4])
+				fsa_dev_ptr->locked[cid] = 1;
+			else
+				fsa_dev_ptr->locked[cid] = 0;
+
+			scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+			__aac_io_done(scsi_cmnd_ptr);
+			return 0;
+		/*
+		 *	These commands are all No-Ops
+		 */
+		case SS_TEST:
+		case SS_RESERV:
+		case SS_RELES:
+		case SS_REZERO:
+		case SS_REASGN:
+		case SS_SEEK:
+		case SS_ST_SP:
+			scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+			__aac_io_done(scsi_cmnd_ptr);
+			return (0);
+	}
+
+	switch (scsi_cmnd_ptr->cmnd[0]) 
+	{
+		case SS_READ:
+		case SM_READ:
+			/*
+			 *	Hack to keep track of ordinal number of the device that
+			 *	corresponds to a container. Needed to convert
+			 *	containers to /dev/sd device names
+			 */
+			 
+			spin_unlock_irq(&io_request_lock);
+			fsa_dev_ptr->devno[cid] = DEVICE_NR(scsi_cmnd_ptr->request.rq_dev);
+			ret = aac_read(scsi_cmnd_ptr, cid);
+			spin_lock_irq(&io_request_lock);
+			return ret;
+
+		case SS_WRITE:
+		case SM_WRITE:
+			spin_unlock_irq(&io_request_lock);
+			ret = aac_write(scsi_cmnd_ptr, cid);
+			spin_lock_irq(&io_request_lock);
+			return ret;
+		default:
+			/*
+			 *	Unhandled commands
+			 */
+			printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsi_cmnd_ptr->cmnd[0]);
+			scsi_cmnd_ptr->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+			set_sense((char *) &sense_data[cid],
+				SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
+			ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+			__aac_io_done(scsi_cmnd_ptr);
+			return -1;
+	}
+}
+
+static int query_disk(struct aac_dev *dev, void *arg)
+{
+	struct aac_query_disk qd;
+	struct fsa_scsi_hba *fsa_dev_ptr;
+
+	fsa_dev_ptr = &(dev->fsa_dev);
+	if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
+		return -EFAULT;
+	if (qd.cnum == -1)
+		qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
+	else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1)) 
+	{
+		if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
+			return -EINVAL;
+		qd.instance = dev->scsi_host_ptr->host_no;
+		qd.bus = 0;
+		qd.target = CONTAINER_TO_TARGET(qd.cnum);
+		qd.lun = CONTAINER_TO_LUN(qd.cnum);
+	}
+	else return -EINVAL;
+
+	qd.valid = fsa_dev_ptr->valid[qd.cnum];
+	qd.locked = fsa_dev_ptr->locked[qd.cnum];
+	qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
+
+	if (fsa_dev_ptr->devno[qd.cnum] == -1)
+		qd.unmapped = 1;
+	else
+		qd.unmapped = 0;
+
+	get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
+
+	if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
+		return -EFAULT;
+	return 0;
+}
+
+static void get_sd_devname(int disknum, char *buffer)
+{
+	if (disknum < 0) {
+		sprintf(buffer, "%s", "");
+		return;
+	}
+
+	if (disknum < 26)
+		sprintf(buffer, "sd%c", 'a' + disknum);
+	else {
+		unsigned int min1;
+		unsigned int min2;
+		/*
+		 * For larger numbers of disks, we need to go to a new
+		 * naming scheme.
+		 */
+		min1 = disknum / 26;
+		min2 = disknum % 26;
+		sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
+	}
+}
+
+static int force_delete_disk(struct aac_dev *dev, void *arg)
+{
+	struct aac_delete_disk dd;
+	struct fsa_scsi_hba *fsa_dev_ptr;
+
+	fsa_dev_ptr = &(dev->fsa_dev);
+
+	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+		return -EFAULT;
+
+	if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
+		return -EINVAL;
+	/*
+	 *	Mark this container as being deleted.
+	 */
+	fsa_dev_ptr->deleted[dd.cnum] = 1;
+	/*
+	 *	Mark the container as no longer valid
+	 */
+	fsa_dev_ptr->valid[dd.cnum] = 0;
+	return 0;
+}
+
+static int delete_disk(struct aac_dev *dev, void *arg)
+{
+	struct aac_delete_disk dd;
+	struct fsa_scsi_hba *fsa_dev_ptr;
+
+	fsa_dev_ptr = &(dev->fsa_dev);
+
+	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+		return -EFAULT;
+
+	if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
+		return -EINVAL;
+	/*
+	 *	If the container is locked, it can not be deleted by the API.
+	 */
+	if (fsa_dev_ptr->locked[dd.cnum])
+		return -EBUSY;
+	else {
+		/*
+		 *	Mark the container as no longer being valid.
+		 */
+		fsa_dev_ptr->valid[dd.cnum] = 0;
+		fsa_dev_ptr->devno[dd.cnum] = -1;
+		return 0;
+	}
+}
+
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
+{
+	switch (cmd) {
+	case FSACTL_QUERY_DISK:
+		return query_disk(dev, arg);
+	case FSACTL_DELETE_DISK:
+		return delete_disk(dev, arg);
+	case FSACTL_FORCE_DELETE_DISK:
+		return force_delete_disk(dev, arg);
+	case 2131:
+		return aac_get_containers(dev);
+	default:
+		return -ENOTTY;
+	}
+}
+
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/commctrl.c linux.gamma/drivers/scsi/aacraid/commctrl.c
--- linux.15p3/drivers/scsi/aacraid/commctrl.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/commctrl.c	Fri Nov 30 13:57:16 2001
@@ -0,0 +1,386 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commctrl.c
+ *
+ * Abstract: Contains all routines for control of the AFA comm layer
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blk.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+/**
+ *	ioctl_send_fib	-	send a FIB from userspace
+ *	@dev:	adapter is being processed
+ *	@arg:	arguments to the ioctl call
+ *	
+ *	This routine sends a fib to the adapter on behalf of a user level
+ *	program.
+ */
+ 
+static int ioctl_send_fib(struct aac_dev * dev, void *arg)
+{
+	struct hw_fib * kfib;
+	struct fib *fibptr;
+
+	fibptr = fib_alloc(dev);
+	if(fibptr == NULL)
+		return -ENOMEM;
+		
+	kfib = fibptr->fib;
+	/*
+	 *	First copy in the header so that we can check the size field.
+	 */
+	if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
+		fib_free(fibptr);
+		return -EFAULT;
+	}
+	/*
+	 *	Since we copy based on the fib header size, make sure that we
+	 *	will not overrun the buffer when we copy the memory. Return
+	 *	an error if we would.
+	 */
+	if(kfib->header.Size > sizeof(struct hw_fib) - sizeof(struct aac_fibhdr)) {
+		fib_free(fibptr);
+		return -EINVAL;
+	}
+
+	if (copy_from_user((void *) kfib, arg, kfib->header.Size + sizeof(struct aac_fibhdr))) {
+		fib_free(fibptr);
+		return -EFAULT;
+	}
+
+	if (kfib->header.Command == TakeABreakPt) {
+		aac_adapter_interrupt(dev);
+		/*
+		 * Since we didn't really send a fib, zero out the state to allow 
+		 * cleanup code not to assert.
+		 */
+		kfib->header.XferState = 0;
+	} else {
+		if (fib_send(kfib->header.Command, fibptr, kfib->header.Size , FsaNormal,
+			1, 1, NULL, NULL) != 0) 
+		{
+			fib_free(fibptr);
+			return -EINVAL;
+		}
+		if (fib_complete(fibptr) != 0) {
+			fib_free(fibptr);
+			return -EINVAL;
+		}
+	}
+	/*
+	 *	Make sure that the size returned by the adapter (which includes
+	 *	the header) is less than or equal to the size of a fib, so we
+	 *	don't corrupt application data. Then copy that size to the user
+	 *	buffer. (Don't try to add the header information again, since it
+	 *	was already included by the adapter.)
+	 */
+
+	if (copy_to_user(arg, (void *)kfib, kfib->header.Size)) {
+		fib_free(fibptr);
+		return -EFAULT;
+	}
+	fib_free(fibptr);
+	return 0;
+}
+
+/**
+ *	open_getadapter_fib	-	Get the next fib
+ *
+ *	This routine will get the next Fib, if available, from the AdapterFibContext
+ *	passed in from the user.
+ */
+
+static int open_getadapter_fib(struct aac_dev * dev, void *arg)
+{
+	struct aac_fib_context * fibctx;
+	int status;
+	unsigned long flags;
+
+	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
+	if (fibctx == NULL) {
+		status = -ENOMEM;
+	} else {
+		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
+		fibctx->size = sizeof(struct aac_fib_context);
+		/*
+		 *	Initialize the mutex used to wait for the next AIF.
+		 */
+		init_MUTEX_LOCKED(&fibctx->wait_sem);
+		fibctx->wait = 0;
+		/*
+		 *	Initialize the fibs and set the count of fibs on
+		 *	the list to 0.
+		 */
+		fibctx->count = 0;
+		INIT_LIST_HEAD(&fibctx->fibs);
+		fibctx->jiffies = jiffies/HZ;
+		/*
+		 *	Now add this context onto the adapter's 
+		 *	AdapterFibContext list.
+		 */
+		spin_lock_irqsave(&dev->fib_lock, flags);
+		list_add_tail(&fibctx->next, &dev->fib_list);
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (copy_to_user(arg,  &fibctx, sizeof(struct aac_fib_context *))) {
+			status = -EFAULT;
+		} else {
+			status = 0;
+		}	
+	}
+	return status;
+}
+
+/**
+ *	next_getadapter_fib	-	get the next fib
+ *	@dev: adapter to use
+ *	@arg: ioctl argument
+ *	
+ * 	This routine will get the next Fib, if available, from the AdapterFibContext
+ *	passed in from the user.
+ */
+
+static int next_getadapter_fib(struct aac_dev * dev, void *arg)
+{
+	struct fib_ioctl f;
+	struct aac_fib_context *fibctx, *aifcp;
+	struct hw_fib * fib;
+	int status;
+	struct list_head * entry;
+	int found;
+	unsigned long flags;
+	
+	if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
+		return -EFAULT;
+	/*
+	 *	Extract the AdapterFibContext from the Input parameters.
+	 */
+	fibctx = (struct aac_fib_context *) f.fibctx;
+
+	/*
+	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
+	 *
+	 *	Search the list of AdapterFibContext addresses on the adapter
+	 *	to be sure this is a valid address
+	 */
+	found = 0;
+	entry = dev->fib_list.next;
+
+	while(entry != &dev->fib_list) {
+		aifcp = CONTAINING_RECORD(entry, struct aac_fib_context, next);
+		if(fibctx == aifcp) {   /* We found a winner */
+			found = 1;
+			break;
+		}
+		entry = entry->next;
+	}
+	if (found == 0)
+		return -EINVAL;
+
+	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+		 (fibctx->size != sizeof(struct aac_fib_context)))
+		return -EINVAL;
+	status = 0;
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	/*
+	 *	If there are no fibs to send back, then either wait or return
+	 *	-EAGAIN
+	 */
+return_fib:
+	if (!list_empty(&fibctx->fibs)) {
+		struct list_head * entry;
+		/*
+		 *	Pull the next fib from the fibs
+		 */
+		entry = fibctx->fibs.next;
+		list_del(entry);
+		
+		fib = CONTAINING_RECORD(entry, struct hw_fib, header.FibLinks);
+		fibctx->count--;
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (copy_to_user(f.fib, fib, sizeof(struct hw_fib))) {
+			kfree(fib);
+			return -EFAULT;
+		}	
+		/*
+		 *	Free the space occupied by this copy of the fib.
+		 */
+		kfree(fib);
+		status = 0;
+		fibctx->jiffies = jiffies/HZ;
+	} else {
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (f.wait) {
+			if(down_interruptible(&fibctx->wait_sem)==0) {
+				status = -EINTR;
+			} else {
+				/* Lock again and retry */
+				spin_lock_irqsave(&dev->fib_lock, flags);
+				goto return_fib;
+			}
+		} else {
+			status = -EAGAIN;
+		}	
+	}
+	return status;
+}
+
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
+{
+	struct hw_fib *fib;
+
+	/*
+	 *	First free any FIBs that have not been consumed.
+	 */
+	while (!list_empty(&fibctx->fibs)) {
+		struct list_head * entry;
+		/*
+		 *	Pull the next fib from the fibs
+		 */
+		entry = fibctx->fibs.next;
+		list_del(entry);
+		fib = CONTAINING_RECORD( entry, struct hw_fib, header.FibLinks );
+		fibctx->count--;
+		/*
+		 *	Free the space occupied by this copy of the fib.
+		 */
+		kfree(fib);
+	}
+	/*
+	 *	Remove the Context from the AdapterFibContext List
+	 */
+	list_del(&fibctx->next);
+	/*
+	 *	Invalidate context
+	 */
+	fibctx->type = 0;
+	/*
+	 *	Free the space occupied by the Context
+	 */
+	kfree(fibctx);
+	return 0;
+}
+
+/**
+ *	close_getadapter_fib	-	close down user fib context
+ *	@dev: adapter
+ *	@arg: ioctl arguments
+ *
+ *	This routine will close down the fibctx passed in from the user.
+ */
+ 
+static int close_getadapter_fib(struct aac_dev * dev, void *arg)
+{
+	struct aac_fib_context *fibctx, *aifcp;
+	int status;
+	unsigned long flags;
+	struct list_head * entry;
+	int found;
+
+	/*
+	 *	Extract the fibctx from the input parameters
+	 */
+	fibctx = arg;
+
+	/*
+	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
+	 *
+	 *	Search the list of AdapterFibContext addresses on the adapter
+	 *	to be sure this is a valid address
+	 */
+
+	found = 0;
+	entry = dev->fib_list.next;
+
+	while(entry != &dev->fib_list) {
+		aifcp = CONTAINING_RECORD(entry, struct aac_fib_context, next);
+		if(fibctx == aifcp) {   /* We found a winner */
+			found = 1;
+			break;
+		}
+		entry = entry->next;
+	}
+
+	if(found == 0)
+		return 0; /* Already gone */
+
+	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+		 (fibctx->size != sizeof(struct aac_fib_context))) {
+		return -EINVAL;
+	}
+	spin_lock_irqsave(dev->fib_lock, flags);
+	status = aac_close_fib_context(dev, fibctx);
+	spin_unlock_irqrestore(dev->fib_lock, flags);
+	return status;
+}
+
+
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void *arg)
+{
+	int status;
+	
+	/*
+	 *	HBA gets first crack
+	 */
+	 
+	status = aac_dev_ioctl(dev, cmd, arg);
+	if(status != -ENOTTY)
+		return status;
+
+	switch (cmd) {
+	case FSACTL_SENDFIB:
+		status = ioctl_send_fib(dev, arg);
+		break;
+	case FSACTL_OPEN_GET_ADAPTER_FIB:
+		status = open_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_GET_NEXT_ADAPTER_FIB:
+		status = next_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_CLOSE_GET_ADAPTER_FIB:
+		status = close_getadapter_fib(dev, arg);
+		break;
+	default:
+		status = -ENOTTY;
+	  	break;	
+	}
+	return status;
+}
+
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/comminit.c linux.gamma/drivers/scsi/aacraid/comminit.c
--- linux.15p3/drivers/scsi/aacraid/comminit.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/comminit.c	Fri Nov 30 13:59:28 2001
@@ -0,0 +1,332 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  comminit.c
+ *
+ * Abstract: This supports the initialization of the host adapter commuication interface.
+ *    This is a platform dependent module for the pci cyclone board.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blk.h>
+#include <asm/semaphore.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+struct aac_common aac_config;
+
+static struct aac_dev *devices;
+
+static int aac_alloc_comm(struct aac_dev *dev, void **CommHeaderAddress, unsigned long CommAreaSize, unsigned long CommAreaAlignment)
+{
+	unsigned char *base;
+	unsigned long size, BytesToAlign;
+	unsigned long fibsize = 4096;
+	unsigned long printfbufsiz = 256;
+	struct aac_init *init;
+	dma_addr_t phys;
+
+	size = fibsize + sizeof(struct aac_init) + CommAreaSize + CommAreaAlignment + printfbufsiz;
+
+	base = pci_alloc_consistent(dev->pdev, size, &phys);
+	if(base == NULL)
+	{
+		printk(KERN_ERR "aacraid: unable to create mapping.\n");
+		return 0;
+	}
+	dev->comm_addr  = (void *)base;
+	dev->comm_phys = phys;
+	dev->comm_size     = size;
+
+	dev->init = (struct aac_init *)(base + fibsize);
+	dev->init_pa = (struct aac_init *)(phys + fibsize);
+
+	init = dev->init;
+
+	init->InitStructRevision = ADAPTER_INIT_STRUCT_REVISION;
+	init->MiniPortRevision = Sa_MINIPORT_REVISION;
+	init->fsrev = dev->fsrev;
+
+	/*
+	 *	Adapter Fibs are the first thing allocated so that they
+	 *	start page aligned
+	 */
+	init->AdapterFibsVirtualAddress = base;
+	init->AdapterFibsPhysicalAddress = (void *) phys;
+	init->AdapterFibsSize = fibsize;
+	init->AdapterFibAlign = sizeof(struct hw_fib);
+
+	/*
+	 * Increment the base address by the amount already used
+	 */
+	base = base + fibsize + sizeof(struct aac_init);
+	phys = phys + fibsize + sizeof(struct aac_init);
+	/*
+	 *	Align the beginning of Headers to CommAreaAlignment
+	 */
+	BytesToAlign = (CommAreaAlignment - ((unsigned long)(base) & (CommAreaAlignment - 1)));
+	base = base + BytesToAlign;
+	phys = phys + BytesToAlign;
+	/*
+	 *	Fill in addresses of the Comm Area Headers and Queues
+	 */
+	*CommHeaderAddress = (unsigned long *)base;
+	init->CommHeaderAddress = (void *)phys;
+	/*
+	 *	Increment the base address by the size of the CommArea
+	 */
+	base = base + CommAreaSize;
+	phys = phys + CommAreaSize;
+	/*
+	 *	 Place the Printf buffer area after the Fast I/O comm area.
+	 */
+	dev->printfbuf = (void *)base;
+	init->printfbuf = (void *)phys;
+	init->printfbufsiz = printfbufsiz;
+	memset(base, 0, printfbufsiz);
+	return 1;
+}
+    
+static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
+{
+	q->numpending = 0;
+	q->dev = dev;
+	INIT_LIST_HEAD(&q->pendingq);
+	init_waitqueue_head(&q->cmdready);
+	INIT_LIST_HEAD(&q->cmdq);
+	init_waitqueue_head(&q->qfull);
+	spin_lock_init(&q->lockdata);
+	q->lock = &q->lockdata;
+	q->headers.producer = mem;
+	q->headers.consumer = mem+1;
+	*q->headers.producer = qsize;
+	*q->headers.consumer = qsize;
+	q->entries = qsize;
+}
+
+/**
+ *	aac_send_shutdown		-	shutdown an adapter
+ *	@dev: Adapter to shutdown
+ *
+ *	This routine will send a VM_CloseAll (shutdown) request to the adapter.
+ */
+
+static int aac_send_shutdown(struct aac_dev * dev)
+{
+	struct fib * fibctx;
+	struct aac_close *cmd;
+	int status;
+
+	fibctx = fib_alloc(dev);
+	fib_init(fibctx);
+
+	cmd = (struct aac_close *) fib_data(fibctx);
+
+	cmd->command = VM_CloseAll;
+	cmd->cid = 0xffffffff;
+
+	status = fib_send(ContainerCommand,
+			  fibctx,
+			  sizeof(struct aac_close),
+			  FsaNormal,
+			  1, 1,
+			  NULL, NULL);
+
+	if (status == 0)
+		fib_complete(fibctx);
+	fib_free(fibctx);
+	return status;
+}
+
+/**
+ *	aac_detach	-	detach adapter
+ *	@detach: adapter to disconnect
+ *
+ *	Disconnect and shutdown an AAC based adapter, freeing resources
+ *	as we go.
+ */
+
+int aac_detach(struct aac_dev *detach)
+{
+	struct aac_dev **dev = &devices;
+	
+	while(*dev)
+	{
+		if(*dev == detach)
+		{
+			*dev = detach->next;
+			aac_send_shutdown(detach);
+			fib_map_free(detach);
+			pci_free_consistent(detach->pdev, detach->comm_size, detach->comm_addr, detach->comm_phys);
+			kfree(detach->queues);
+			return 1;
+		}
+		dev=&((*dev)->next);
+	}
+	BUG();
+	return 0;
+}
+
+/**
+ *	aac_comm_init	-	Initialise FSA data structures
+ *	@dev:	Adapter to intialise
+ *
+ *	Initializes the data structures that are required for the FSA commuication
+ *	interface to operate. 
+ *	Returns
+ *		1 - if we were able to init the commuication interface.
+ *		0 - If there were errors initing. This is a fatal error.
+ */
+ 
+int aac_comm_init(struct aac_dev * dev)
+{
+	unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
+	unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
+	u32 *headers;
+	struct aac_entry * queues;
+	unsigned long size;
+	struct aac_queue_block * comm = dev->queues;
+
+	/*
+	 *	Now allocate and initialize the zone structures used as our 
+	 *	pool of FIB context records.  The size of the zone is based
+	 *	on the system memory size.  We also initialize the mutex used
+	 *	to protect the zone.
+	 */
+	spin_lock_init(&dev->FibContextZoneSpinLock);
+
+	/*
+	 *	Allocate the physically contigous space for the commuication
+	 *	queue headers. 
+	 */
+
+	size = hdrsize + queuesize;
+
+	if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
+		return -ENOMEM;
+
+	queues = (struct aac_entry *)((unsigned char *)headers + hdrsize);
+
+	/* Adapter to Host normal priority Command queue */ 
+	comm->queue[HostNormCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
+	queues += HOST_NORM_CMD_ENTRIES;
+	headers += 2;
+
+	/* Adapter to Host high priority command queue */
+	comm->queue[HostHighCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
+    
+	queues += HOST_HIGH_CMD_ENTRIES;
+	headers +=2;
+
+	/* Host to adapter normal priority command queue */
+	comm->queue[AdapNormCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
+    
+	queues += ADAP_NORM_CMD_ENTRIES;
+	headers += 2;
+
+	/* host to adapter high priority command queue */
+	comm->queue[AdapHighCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
+    
+	queues += ADAP_HIGH_CMD_ENTRIES;
+	headers += 2;
+
+	/* adapter to host normal priority response queue */
+	comm->queue[HostNormRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
+    
+	queues += HOST_NORM_RESP_ENTRIES;
+	headers += 2;
+
+	/* adapter to host high priority response queue */
+	comm->queue[HostHighRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
+   
+	queues += HOST_HIGH_RESP_ENTRIES;
+	headers += 2;
+
+	/* host to adapter normal priority response queue */
+	comm->queue[AdapNormRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
+
+	queues += ADAP_NORM_RESP_ENTRIES;
+	headers += 2;
+	
+	/* host to adapter high priority response queue */ 
+	comm->queue[AdapHighRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
+
+	comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
+	comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
+	comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
+	comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
+
+	return 0;
+}
+
+struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+{
+	/*
+	 *	Ok now init the communication subsystem
+	 */
+	dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
+	if (dev->queues == NULL) {
+		printk(KERN_ERR "Error could not allocate comm region.\n");
+		return NULL;
+	}
+	memset(dev->queues, 0, sizeof(struct aac_queue_block));
+
+	if (aac_comm_init(dev)<0)
+		return NULL;
+	/*
+	 *	Initialize the list of fibs
+	 */
+	if(fib_setup(dev)<0)
+		return NULL;
+		
+	INIT_LIST_HEAD(&dev->fib_list);
+	spin_lock_init(&dev->fib_lock);
+	init_completion(&dev->aif_completion);
+	/*
+	 *	Add this adapter in to our dev List.
+	 */
+	dev->next = devices;
+	devices = dev;
+	return dev;
+}
+
+    
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/commsup.c linux.gamma/drivers/scsi/aacraid/commsup.c
--- linux.15p3/drivers/scsi/aacraid/commsup.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/commsup.c	Fri Nov 30 13:56:28 2001
@@ -0,0 +1,945 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commsup.c
+ *
+ * Abstract: Contain all routines that are required for FSA host/adapter
+ *    commuication.
+ *
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <asm/semaphore.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+/**
+ *	fib_map_alloc		-	allocate the fib objects
+ *	@dev: Adapter to allocate for
+ *
+ *	Allocate and map the shared PCI space for the FIB blocks used to
+ *	talk to the Adaptec firmware.
+ */
+ 
+static int fib_map_alloc(struct aac_dev *dev)
+{
+	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ *	fib_map_free		-	free the fib objects
+ *	@dev: Adapter to free
+ *
+ *	Free the PCI mappings and the memory allocated for FIB blocks
+ *	on this adapter.
+ */
+
+void fib_map_free(struct aac_dev *dev)
+{
+	pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
+}
+
+/**
+ *	fib_setup	-	setup the fibs
+ *	@dev: Adapter to set up
+ *
+ *	Allocate the PCI space for the fibs, map it and then intialise the
+ *	fib area, the unmapped fib data and also the free list
+ */
+
+int fib_setup(struct aac_dev * dev)
+{
+	struct fib *fibptr;
+	struct hw_fib *fib;
+	dma_addr_t fibpa;
+	int i;
+	
+	if(fib_map_alloc(dev)<0)
+		return -ENOMEM;
+		
+	fib = dev->hw_fib_va;
+	fibpa = dev->hw_fib_pa;
+	memset(fib, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
+	/*
+	 *	Initialise the fibs
+	 */
+	for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
+	{
+		fibptr->dev = dev;
+		fibptr->fib = fib;
+		fibptr->data = (void *) fibptr->fib->data;
+		fibptr->next = fibptr+1;	/* Forward chain the fibs */
+		init_MUTEX_LOCKED(&fibptr->event_wait);
+		spin_lock_init(&fibptr->event_lock);
+		fib->header.XferState = 0xffffffff;
+		fib->header.SenderSize = sizeof(struct hw_fib);
+		fibptr->logicaladdr = (unsigned long) fibpa;
+		fib = (struct hw_fib *)((unsigned char *)fib + sizeof(struct hw_fib));
+		fibpa = fibpa + sizeof(struct hw_fib);
+	}
+	/*
+	 *	Add the fib chain to the free list
+	 */
+	dev->fibs[AAC_NUM_FIB-1].next = NULL;
+	/*
+	 *	Enable this to debug out of queue space
+	 */
+	dev->free_fib = &dev->fibs[0];
+	return 0;
+}
+
+/**
+ *	fib_alloc	-	allocate a fib
+ *	@dev: Adapter to allocate the fib for
+ *
+ *	Allocate a fib from the adapter fib pool. If the pool is empty we
+ *	wait for fibs to become free.
+ */
+ 
+struct fib * fib_alloc(struct aac_dev *dev)
+{
+	struct fib * fibptr;
+	unsigned long flags;
+	
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	fibptr = dev->free_fib;	
+	if(!fibptr)
+		BUG();
+	dev->free_fib = fibptr->next;
+	spin_unlock_irqrestore(&dev->fib_lock, flags);
+	/*
+	 *	Set the proper node type code and node byte size
+	 */
+	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+	fibptr->size = sizeof(struct fib);
+	/*
+	 *	Null out fields that depend on being zero at the start of
+	 *	each I/O
+	 */
+	fibptr->fib->header.XferState = 0;
+	fibptr->callback = NULL;
+	fibptr->callback_data = NULL;
+
+	return fibptr;
+}
+
+/**
+ *	fib_free	-	free a fib
+ *	@fibptr: fib to free up
+ *
+ *	Frees up a fib and places it on the appropriate queue
+ *	(either free or timed out)
+ */
+ 
+void fib_free(struct fib * fibptr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
+
+	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
+		aac_config.fib_timeouts++;
+		fibptr->next = fibptr->dev->timeout_fib;
+		fibptr->dev->timeout_fib = fibptr;
+	} else {
+		if (fibptr->fib->header.XferState != 0) {
+			printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 
+					 fibptr, fibptr->fib->header.XferState);
+		}
+		fibptr->next = fibptr->dev->free_fib;
+		fibptr->dev->free_fib = fibptr;
+	}	
+	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
+}
+
+/**
+ *	fib_init	-	initialise a fib
+ *	@fibptr: The fib to initialize
+ *	
+ *	Set up the generic fib fields ready for use
+ */
+ 
+void fib_init(struct fib *fibptr)
+{
+	struct hw_fib *fib = fibptr->fib;
+
+	fib->header.StructType = FIB_MAGIC;
+	fib->header.Size = sizeof(struct hw_fib);
+        fib->header.XferState = HostOwned | FibInitialized | FibEmpty | FastResponseCapable;
+	fib->header.SenderFibAddress = 0;
+	fib->header.ReceiverFibAddress = 0;
+	fib->header.SenderSize = sizeof(struct hw_fib);
+}
+
+/**
+ *	fib_deallocate		-	deallocate a fib
+ *	@fibptr: fib to deallocate
+ *
+ *	Will deallocate and return to the free pool the FIB pointed to by the
+ *	caller.
+ */
+ 
+void fib_dealloc(struct fib * fibptr)
+{
+	struct hw_fib *fib = fibptr->fib;
+	if(fib->header.StructType != FIB_MAGIC) 
+		BUG();
+	fib->header.XferState = 0;        
+}
+
+/*
+ *	Commuication primitives define and support the queuing method we use to
+ *	support host to adapter commuication. All queue accesses happen through
+ *	these routines and are the only routines which have a knowledge of the
+ *	 how these queues are implemented.
+ */
+ 
+/**
+ *	aac_get_entry		-	get a queue entry
+ *	@dev: Adapter
+ *	@qid: Queue Number
+ *	@entry: Entry return
+ *	@index: Index return
+ *	@nonotify: notification control
+ *
+ *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
+ *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
+ *	returned.
+ */
+ 
+static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
+{
+	struct aac_queue * q;
+
+	/*
+	 *	All of the queues wrap when they reach the end, so we check
+	 *	to see if they have reached the end and if they have we just
+	 *	set the index back to zero. This is a wrap. You could or off
+	 *	the high bits in all updates but this is a bit faster I think.
+	 */
+
+	q = &dev->queues->queue[qid];
+	
+	*index = *(q->headers.producer);
+		if (*index - 2 == *(q->headers.consumer))
+			*nonotify = 1; 
+
+	if (qid == AdapHighCmdQueue) {
+	        if (*index >= ADAP_HIGH_CMD_ENTRIES)
+        		*index = 0;
+        	/* SCALING ?? */
+	} else if (qid == AdapNormCmdQueue) {
+	        if (*index >= ADAP_NORM_CMD_ENTRIES) 
+			*index = 0; /* Wrap to front of the Producer Queue. */
+	}
+	else if (qid == AdapHighRespQueue) 
+	{
+	        if (*index >= ADAP_HIGH_RESP_ENTRIES)
+			*index = 0;
+	}
+	else if (qid == AdapNormRespQueue) 
+	{
+		if (*index >= ADAP_NORM_RESP_ENTRIES) 
+			*index = 0; /* Wrap to front of the Producer Queue. */
+	}
+	else BUG();
+
+        if (*index + 1 == *(q->headers.consumer)) { /* Queue is full */
+		printk(KERN_WARNING "Queue %d full, %ld outstanding.\n",
+				qid, q->numpending);
+		return 0;
+	} else {
+	        *entry = q->base + *index;
+		return 1;
+	}
+}   
+
+/**
+ *	aac_queue_get		-	get the next free QE
+ *	@dev: Adapter
+ *	@index: Returned index
+ *	@priority: Priority of fib
+ *	@fib: Fib to associate with the queue entry
+ *	@wait: Wait if queue full
+ *	@fibptr: Driver fib object to go with fib
+ *	@nonotify: Don't notify the adapter
+ *
+ *	Gets the next free QE off the requested priorty adapter command
+ *	queue and associates the Fib with the QE. The QE represented by
+ *	index is ready to insert on the queue when this routine returns
+ *	success.
+ */
+
+static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * fib, int wait, struct fib * fibptr, unsigned long *nonotify)
+{
+	struct aac_entry * entry = NULL;
+	int map = 0;
+	struct aac_queue * q = &dev->queues->queue[qid];
+		
+	spin_lock_irqsave(q->lock, q->SavedIrql);
+	    
+	if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
+	{
+		/*  if no entries wait for some if caller wants to */
+        	while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+        	{
+			printk(KERN_ERR "GetEntries failed\n");
+		}
+	        /*
+	         *	Setup queue entry with a command, status and fib mapped
+	         */
+	        entry->size = fib->header.Size;
+	        map = 1;
+	}
+	else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
+	{
+	        while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+	        {
+			/* if no entries wait for some if caller wants to */
+		}
+        	/*
+        	 *	Setup queue entry with command, status and fib mapped
+        	 */
+        	entry->size = fib->header.Size;
+        	entry->addr = fib->header.SenderFibAddress;     			/* Restore adapters pointer to the FIB */
+		fib->header.ReceiverFibAddress = fib->header.SenderFibAddress;		/* Let the adapter now where to find its data */
+        	map = 0;
+	} 
+	/*
+	 *	If MapFib is true than we need to map the Fib and put pointers
+	 *	in the queue entry.
+	 */
+	if (map)
+		entry->addr = (unsigned long)(fibptr->logicaladdr);
+	return 0;
+}
+
+
+/**
+ *	aac_insert_entry	-	insert a queue entry
+ *	@dev: Adapter
+ *	@index: Index of entry to insert
+ *	@qid: Queue number
+ *	@nonotify: Suppress adapter notification
+ *
+ *	Gets the next free QE off the requested priorty adapter command
+ *	queue and associates the Fib with the QE. The QE represented by
+ *	index is ready to insert on the queue when this routine returns
+ *	success.
+ */
+ 
+static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) 
+{
+	struct aac_queue * q = &dev->queues->queue[qid];
+
+	if(q == NULL)
+		BUG();
+	*(q->headers.producer) = index + 1;
+	spin_unlock_irqrestore(q->lock, q->SavedIrql);
+
+	if (qid == AdapHighCmdQueue ||
+	    qid == AdapNormCmdQueue ||
+	    qid == AdapHighRespQueue ||
+	    qid == AdapNormRespQueue)
+	{
+		if (!nonotify)
+			aac_adapter_notify(dev, qid);
+	}
+	else
+		printk("Suprise insert!\n");
+	return 0;
+}
+
+/*
+ *	Define the highest level of host to adapter communication routines. 
+ *	These routines will support host to adapter FS commuication. These 
+ *	routines have no knowledge of the commuication method used. This level
+ *	sends and receives FIBs. This level has no knowledge of how these FIBs
+ *	get passed back and forth.
+ */
+
+/**
+ *	fib_send	-	send a fib to the adapter
+ *	@command: Command to send
+ *	@fibptr: The fib
+ *	@size: Size of fib data area
+ *	@priority: Priority of Fib
+ *	@wait: Async/sync select
+ *	@reply: True if a reply is wanted
+ *	@callback: Called with reply
+ *	@callback_data: Passed to callback
+ *
+ *	Sends the requested FIB to the adapter and optionally will wait for a
+ *	response FIB. If the caller does not wish to wait for a response than
+ *	an event to wait on must be supplied. This event will be set when a
+ *	response FIB is received from the adapter.
+ */
+ 
+int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
+{
+	u32 index;
+	u32 qid;
+	struct aac_dev * dev = fibptr->dev;
+	unsigned long nointr = 0;
+	struct hw_fib * fib = fibptr->fib;
+	struct aac_queue * q;
+	unsigned long flags;
+
+	if (!(fib->header.XferState & HostOwned))
+		return -EBUSY;
+	/*
+	 *	There are 5 cases with the wait and reponse requested flags. 
+	 *	The only invalid cases are if the caller requests to wait and
+	 *	does not request a response and if the caller does not want a
+	 *	response and the Fibis not allocated from pool. If a response
+	 *	is not requesed the Fib will just be deallocaed by the DPC
+	 *	routine when the response comes back from the adapter. No
+	 *	further processing will be done besides deleting the Fib. We 
+	 *	will have a debug mode where the adapter can notify the host
+	 *	it had a problem and the host can log that fact.
+	 */
+	if (wait && !reply) {
+		return -EINVAL;
+	} else if (!wait && reply) {
+		fib->header.XferState |= (Async | ResponseExpected);
+		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
+	} else if (!wait && !reply) {
+		fib->header.XferState |= NoResponseExpected;
+		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
+	} else if (wait && reply) {
+		fib->header.XferState |= ResponseExpected;
+		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
+	} 
+	fib->header.SenderData = (unsigned long)fibptr;	/* for callback */
+	/*
+	 *	Set FIB state to indicate where it came from and if we want a
+	 *	response from the adapter. Also load the command from the
+	 *	caller.
+	 */
+	fib->header.SenderFibAddress = (unsigned long)fib;
+	fib->header.Command = command;
+	fib->header.XferState |= SentFromHost;
+	fibptr->fib->header.Flags = 0;				/* Zero the flags field - its internal only...	 */
+	/*
+	 *	Set the size of the Fib we want to send to the adapter
+	 */
+	fib->header.Size = sizeof(struct aac_fibhdr) + size;
+	if (fib->header.Size > fib->header.SenderSize) {
+		return -EMSGSIZE;
+	}                
+	/*
+	 *	Get a queue entry connect the FIB to it and send an notify
+	 *	the adapter a command is ready.
+	 */
+	if (priority == FsaHigh) {
+		fib->header.XferState |= HighPriority;
+		qid = AdapHighCmdQueue;
+	} else {
+		fib->header.XferState |= NormalPriority;
+		qid = AdapNormCmdQueue;
+	}
+	q = &dev->queues->queue[qid];
+
+	if(wait)
+		spin_lock_irqsave(&fibptr->event_lock, flags);
+	if(aac_queue_get( dev, &index, qid, fib, 1, fibptr, &nointr)<0)
+		return -EWOULDBLOCK;
+	dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
+	dprintk((KERN_DEBUG "Fib contents:.\n"));
+	dprintk((KERN_DEBUG "  Command =               %d.\n", fib->header.Command));
+	dprintk((KERN_DEBUG "  XferState  =            %x.\n", fib->header.XferState));
+	/*
+	 *	Fill in the Callback and CallbackContext if we are not
+	 *	going to wait.
+	 */
+	if (!wait) {
+		fibptr->callback = callback;
+		fibptr->callback_data = callback_data;
+	}
+	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+	list_add_tail(&fibptr->queue, &q->pendingq);
+	q->numpending++;
+
+	fibptr->done = 0;
+
+	if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
+		return -EWOULDBLOCK;
+	/*
+	 *	If the caller wanted us to wait for response wait now. 
+	 */
+    
+	if (wait) {
+		spin_unlock_irqrestore(&fibptr->event_lock, flags);
+		down(&fibptr->event_wait);
+		if(fibptr->done == 0)
+			BUG();
+			
+		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+			return -ETIMEDOUT;
+		else
+			return 0;
+	}
+	/*
+	 *	If the user does not want a response than return success otherwise
+	 *	return pending
+	 */
+	if (reply)
+		return -EINPROGRESS;
+	else
+		return 0;
+}
+
+/** 
+ *	aac_consumer_get	-	get the top of the queue
+ *	@dev: Adapter
+ *	@q: Queue
+ *	@entry: Return entry
+ *
+ *	Will return a pointer to the entry on the top of the queue requested that
+ * 	we are a consumer of, and return the address of the queue entry. It does
+ *	not change the state of the queue. 
+ */
+
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
+{
+	u32 index;
+	int status;
+
+	if (*q->headers.producer == *q->headers.consumer) {
+		status = 0;
+	} else {
+		/*
+		 *	The consumer index must be wrapped if we have reached
+		 *	the end of the queue, else we just use the entry
+		 *	pointed to by the header index
+		 */
+		if (*q->headers.consumer >= q->entries) 
+			index = 0;		
+		else
+		        index = *q->headers.consumer;
+		*entry = q->base + index;
+		status = 1;
+	}
+	return(status);
+}
+
+int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
+{
+	return (*q->headers.producer != *q->headers.consumer);
+}
+
+
+/**
+ *	aac_consumer_free	-	free consumer entry
+ *	@dev: Adapter
+ *	@q: Queue
+ *	@qid: Queue ident
+ *
+ *	Frees up the current top of the queue we are a consumer of. If the
+ *	queue was full notify the producer that the queue is no longer full.
+ */
+
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
+{
+	int wasfull = 0;
+	u32 notify;
+
+	if (*q->headers.producer+1 == *q->headers.consumer)
+		wasfull = 1;
+        
+	if (*q->headers.consumer >= q->entries)
+		*q->headers.consumer = 1;
+	else
+		*q->headers.consumer += 1;
+        
+	if (wasfull) {
+		switch (qid) {
+
+		case HostNormCmdQueue:
+			notify = HostNormCmdNotFull;
+			break;
+		case HostHighCmdQueue:
+			notify = HostHighCmdNotFull;
+			break;
+		case HostNormRespQueue:
+			notify = HostNormRespNotFull;
+			break;
+		case HostHighRespQueue:
+			notify = HostHighRespNotFull;
+			break;
+		default:
+			BUG();
+			return;
+		}
+		aac_adapter_notify(dev, notify);
+	}
+}        
+
+/**
+ *	fib_adapter_complete	-	complete adapter issued fib
+ *	@fibptr: fib to complete
+ *	@size: size of fib
+ *
+ *	Will do all necessary work to complete a FIB that was sent from
+ *	the adapter.
+ */
+
+int fib_adapter_complete(struct fib * fibptr, unsigned short size)
+{
+	struct hw_fib * fib = fibptr->fib;
+	struct aac_dev * dev = fibptr->dev;
+	unsigned long nointr = 0;
+
+	if (fib->header.XferState == 0)
+        	return 0;
+	/*
+	 *	If we plan to do anything check the structure type first.
+	 */ 
+	if ( fib->header.StructType != FIB_MAGIC ) {
+        	return -EINVAL;
+	}
+	/*
+	 *	This block handles the case where the adapter had sent us a
+	 *	command and we have finished processing the command. We
+	 *	call completeFib when we are done processing the command 
+	 *	and want to send a response back to the adapter. This will 
+	 *	send the completed cdb to the adapter.
+	 */
+	if (fib->header.XferState & SentFromAdapter) {
+	        fib->header.XferState |= HostProcessed;
+	        if (fib->header.XferState & HighPriority) {
+        		u32 index;
+       			if (size) 
+			{
+				size += sizeof(struct aac_fibhdr);
+				if (size > fib->header.SenderSize) 
+					return -EMSGSIZE;
+				fib->header.Size = size;
+			}
+			if(aac_queue_get(dev, &index, AdapHighRespQueue, fib, 1, NULL, &nointr) < 0) {
+				return -EWOULDBLOCK;
+			}
+			if (aac_insert_entry(dev, index, AdapHighRespQueue,  (nointr & (int)aac_config.irq_mod)) != 0) {
+			}
+		}
+		else if (fib->header.XferState & NormalPriority) 
+		{
+			u32 index;
+
+			if (size) {
+				size += sizeof(struct aac_fibhdr);
+				if (size > fib->header.SenderSize) 
+					return -EMSGSIZE;
+				fib->header.Size = size;
+			}
+			if (aac_queue_get(dev, &index, AdapNormRespQueue, fib, 1, NULL, &nointr) < 0) 
+				return -EWOULDBLOCK;
+			if (aac_insert_entry(dev, index, AdapNormRespQueue, 
+				(nointr & (int)aac_config.irq_mod)) != 0) 
+			{
+			}
+		}
+	}
+	else 
+	{
+        	printk(KERN_WARNING "fib_complete: Unknown xferstate detected.\n");
+        	BUG();
+	}   
+	return 0;
+}
+
+/**
+ *	fib_complete	-	fib completion handler
+ *	@fib: FIB to complete
+ *
+ *	Will do all necessary work to complete a FIB.
+ */
+ 
+int fib_complete(struct fib * fibptr)
+{
+	struct hw_fib * fib = fibptr->fib;
+
+	/*
+	 *	Check for a fib which has already been completed
+	 */
+
+	if (fib->header.XferState == 0)
+        	return 0;
+	/*
+	 *	If we plan to do anything check the structure type first.
+	 */ 
+
+	if (fib->header.StructType != FIB_MAGIC)
+	        return -EINVAL;
+	/*
+	 *	This block completes a cdb which orginated on the host and we 
+	 *	just need to deallocate the cdb or reinit it. At this point the
+	 *	command is complete that we had sent to the adapter and this
+	 *	cdb could be reused.
+	 */
+	if((fib->header.XferState & SentFromHost) &&
+		(fib->header.XferState & AdapterProcessed))
+	{
+		fib_dealloc(fibptr);
+	}
+	else if(fib->header.XferState & SentFromHost) 
+	{
+		/*
+		 *	This handles the case when the host has aborted the I/O
+		 *	to the adapter because the adapter is not responding
+		 */
+		fib_dealloc(fibptr);
+	} else if(fib->header.XferState & HostOwned) {
+		fib_dealloc(fibptr);
+	} else {
+		BUG();
+	}   
+	return 0;
+}
+
+/**
+ *	aac_printf	-	handle printf from firmware
+ *	@dev: Adapter
+ *	@val: Message info
+ *
+ *	Print a message passed to us by the controller firmware on the
+ *	Adaptec board
+ */
+
+void aac_printf(struct aac_dev *dev, u32 val)
+{
+	int length = val & 0xffff;
+	int level = (val >> 16) & 0xffff;
+	char *cp = dev->printfbuf;
+	
+	/*
+	 *	The size of the printfbuf is set in port.c
+	 *	There is no variable or define for it
+	 */
+	if (length > 255)
+		length = 255;
+	if (cp[length] != 0)
+		cp[length] = 0;
+	if (level == LOG_HIGH_ERROR)
+		printk(KERN_WARNING "aacraid:%s.\n", cp);
+	else
+		printk(KERN_INFO "aacraid:%s.\n", cp);
+	memset(cp, 0,  256);
+}
+
+
+/**
+ *	aac_handle_aif		-	Handle a message from the firmware
+ *	@dev: Which adapter this fib is from
+ *	@fibptr: Pointer to fibptr from adapter
+ *
+ *	This routine handles a driver notify fib from the adapter and
+ *	dispatches it to the appropriate routine for handling.
+ */
+
+static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+{
+	struct hw_fib * fib = fibptr->fib;
+	/*
+	 * Set the status of this FIB to be Invalid parameter.
+	 *
+	 *	*(u32 *)fib->data = ST_INVAL;
+	 */
+	*(u32 *)fib->data = ST_OK;
+	fib_adapter_complete(fibptr, sizeof(u32));
+}
+
+/**
+ *	aac_command_thread	-	command processing thread
+ *	@dev: Adapter to monitor
+ *
+ *	Waits on the commandready event in it's queue. When the event gets set
+ *	it will pull FIBs off it's queue. It will continue to pull FIBs off
+ *	until the queue is empty. When the queue is empty it will wait for
+ *	more FIBs.
+ */
+ 
+int aac_command_thread(struct aac_dev * dev)
+{
+	struct hw_fib *fib, *newfib;
+	struct fib fibptr; /* for error logging */
+	struct aac_queue_block *queues = dev->queues;
+	struct aac_fib_context *fibctx;
+	unsigned long flags;
+	DECLARE_WAITQUEUE(wait, current);
+
+	/*
+	 *	We can only have one thread per adapter for AIF's.
+	 */
+	if (dev->aif_thread)
+		return -EINVAL;
+	/*
+	 *	Set up the name that will appear in 'ps'
+	 *	stored in  task_struct.comm[16].
+	 */
+	sprintf(current->comm, "AIFd");
+	daemonize();
+	/*
+	 *	Let the DPC know it has a place to send the AIF's to.
+	 */
+	dev->aif_thread = 1;
+	memset(&fibptr, 0, sizeof(struct fib));
+	add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	while(1) 
+	{
+		spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+		while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
+			struct list_head *entry;
+			struct aac_aifcmd * aifcmd;
+
+			set_current_state(TASK_RUNNING);
+		
+			entry = queues->queue[HostNormCmdQueue].cmdq.next;
+			list_del(entry);
+			
+			spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+			fib = CONTAINING_RECORD( entry, struct hw_fib, header.FibLinks );
+			/*
+			 *	We will process the FIB here or pass it to a 
+			 *	worker thread that is TBD. We Really can't 
+			 *	do anything at this point since we don't have
+			 *	anything defined for this thread to do.
+			 */
+			memset(&fibptr, 0, sizeof(struct fib));
+			fibptr.type = FSAFS_NTC_FIB_CONTEXT;
+			fibptr.size = sizeof( struct fib );
+			fibptr.fib = fib;
+			fibptr.data = fib->data;
+			fibptr.dev = dev;
+			/*
+			 *	We only handle AifRequest fibs from the adapter.
+			 */
+			aifcmd = (struct aac_aifcmd *) fib->data;
+			if (aifcmd->command == AifCmdDriverNotify) {
+				aac_handle_aif(dev, &fibptr);
+			} else {
+				/* The u32 here is important and intended. We are using
+				   32bit wrapping time to fit the adapter field */
+				   
+				u32 time_now, time_last;
+				unsigned long flagv;
+				
+				time_now = jiffies/HZ;
+
+				spin_lock_irqsave(&dev->fib_lock, flagv);
+				entry = dev->fib_list.next;
+				/*
+				 * For each Context that is on the 
+				 * fibctxList, make a copy of the
+				 * fib, and then set the event to wake up the
+				 * thread that is waiting for it.
+				 */
+				while (entry != &dev->fib_list) {
+					/*
+					 * Extract the fibctx
+					 */
+					fibctx = CONTAINING_RECORD(entry, struct aac_fib_context, next);
+					/*
+					 * Check if the queue is getting
+					 * backlogged
+					 */
+					if (fibctx->count > 20)
+					{
+						time_last = fibctx->jiffies;
+						/*
+						 * Has it been > 2 minutes 
+						 * since the last read off
+						 * the queue?
+						 */
+						if ((time_now - time_last) > 120) {
+							entry = entry->next;
+							aac_close_fib_context(dev, fibctx);
+							continue;
+						}
+					}
+					/*
+					 * Warning: no sleep allowed while
+					 * holding spinlock
+					 */
+					newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
+					if (newfib) {
+						/*
+						 * Make the copy of the FIB
+						 */
+						memcpy(newfib, fib, sizeof(struct hw_fib));
+						/*
+						 * Put the FIB onto the
+						 * fibctx's fibs
+						 */
+						list_add_tail(&newfib->header.FibLinks, &fibctx->fibs);
+						fibctx->count++;
+						/* 
+						 * Set the event to wake up the
+						 * thread that will waiting.
+						 */
+						up(&fibctx->wait_sem);
+					} else {
+						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+					}
+					entry = entry->next;
+				}
+				/*
+				 *	Set the status of this FIB
+				 */
+				*(u32 *)fib->data = ST_OK;
+				fib_adapter_complete(&fibptr, sizeof(u32));
+				spin_unlock_irqrestore(&dev->fib_lock, flagv);
+			}
+			spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+		}
+		/*
+		 *	There are no more AIF's
+		 */
+		spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+		schedule();
+
+		if(signal_pending(current))
+			break;
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+	dev->aif_thread = 0;
+	complete_and_exit(&dev->aif_completion, 0);
+}
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/dpcsup.c linux.gamma/drivers/scsi/aacraid/dpcsup.c
--- linux.15p3/drivers/scsi/aacraid/dpcsup.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/dpcsup.c	Fri Nov 30 13:57:43 2001
@@ -0,0 +1,201 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  dpcsup.c
+ *
+ * Abstract: All DPC processing routines for the cyclone board occur here.
+ *
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blk.h>
+#include <asm/semaphore.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+/**
+ *	aac_response_normal	-	Handle command replies
+ *	@q: Queue to read from
+ *
+ *	This DPC routine will be run when the adapter interrupts us to let us
+ *	know there is a response on our normal priority queue. We will pull off
+ *	all QE there are and wake up all the waiters before exiting. We will
+ *	take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_response_normal(struct aac_queue * q)
+{
+	struct aac_dev * dev = q->dev;
+	struct aac_entry *entry;
+	struct hw_fib * fib;
+	struct fib * fibctx;
+	int consumed = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->lock, flags);	
+
+	/*
+	 *	Keep pulling response QEs off the response queue and waking
+	 *	up the waiters until there are no more QEs. We then return
+	 *	back to the system. If no response was requesed we just
+	 *	deallocate the Fib here and continue.
+	 */
+	while(aac_consumer_get(dev, q, &entry))
+	{
+		int fast;
+
+		fast = (int) (entry->addr & 0x01);
+		fib = (struct hw_fib *) (entry->addr & ~0x01);
+		aac_consumer_free(dev, q, HostNormRespQueue);
+		fibctx = (struct fib *)fib->header.SenderData;
+		/*
+		 *	Remove this fibctx from the Outstanding I/O queue.
+		 *	But only if it has not already been timed out.
+		 *
+		 *	If the fib has been timed out already, then just 
+		 *	continue. The caller has already been notified that
+		 *	the fib timed out.
+		 */
+		if (!(fibctx->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+			list_del(&fibctx->queue);
+			dev->queues->queue[AdapNormCmdQueue].numpending--;
+		} else {
+			printk(KERN_WARNING "aacraid: FIB timeout.\n");
+			continue;
+		}
+		spin_unlock_irqrestore(q->lock, flags);
+
+		if (fast) {
+			/*
+			 *	Doctor the fib
+			 */
+			*(u32 *)fib->data = ST_OK;
+			fib->header.XferState |= AdapterProcessed;
+		}
+
+		FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+		if (fib->header.Command == NuFileSystem) 
+		{
+			u32 *pstatus = (u32 *)fib->data;
+			if (*pstatus & 0xffff0000)
+				*pstatus = ST_OK;
+		}
+		if (fib->header.XferState & (NoResponseExpected | Async) ) 
+		{
+	        	if (fib->header.XferState & NoResponseExpected)
+				FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+			else 
+				FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+			/*
+			 *	NOTE:  we cannot touch the fibctx after this
+			 *	    call, because it may have been deallocated.
+			 */
+			fibctx->callback(fibctx->callback_data, fibctx);
+		} else {
+			unsigned long flagv;
+			spin_lock_irqsave(&fibctx->event_lock, flagv);
+			fibctx->done = 1;
+			up(&fibctx->event_wait);
+			spin_unlock_irqrestore(&fibctx->event_lock, flagv);
+			FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+		}
+		consumed++;
+		spin_lock_irqsave(q->lock, flags);
+	}
+
+	if (consumed > aac_config.peak_fibs)
+		aac_config.peak_fibs = consumed;
+	if (consumed == 0) 
+		aac_config.zero_fibs++;
+
+	spin_unlock_irqrestore(q->lock, flags);
+	return 0;
+}
+
+
+/**
+ *	aac_command_normal	-	handle commands
+ *	@q: queue to process
+ *
+ *	This DPC routine will be queued when the adapter interrupts us to 
+ *	let us know there is a command on our normal priority queue. We will 
+ *	pull off all QE there are and wake up all the waiters before exiting.
+ *	We will take a spinlock out on the queue before operating on it.
+ */
+ 
+unsigned int aac_command_normal(struct aac_queue *q)
+{
+	struct aac_dev * dev = q->dev;
+	struct aac_entry *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->lock, flags);
+
+	/*
+	 *	Keep pulling response QEs off the response queue and waking
+	 *	up the waiters until there are no more QEs. We then return
+	 *	back to the system.
+	 */
+	while(aac_consumer_get(dev, q, &entry))
+	{
+		struct hw_fib * fib;
+		fib = (struct hw_fib *)entry->addr;
+
+		if (dev->aif_thread) {
+		        list_add_tail(&fib->header.FibLinks, &q->cmdq);
+	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
+		        wake_up_interruptible(&q->cmdready);
+		} else {
+			struct fib fibctx;
+	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
+			spin_unlock_irqrestore(q->lock, flags);
+			memset(&fibctx, 0, sizeof(struct fib));
+			fibctx.type = FSAFS_NTC_FIB_CONTEXT;
+			fibctx.size = sizeof(struct fib);
+			fibctx.fib = fib;
+			fibctx.data = fib->data;
+			fibctx.dev = dev;
+			/*
+			 *	Set the status of this FIB
+			 */
+			*(u32 *)fib->data = ST_OK;
+			fib_adapter_complete(&fibctx, sizeof(u32));
+			spin_lock_irqsave(q->lock, flags);
+		}		
+	}
+	spin_unlock_irqrestore(q->lock, flags);
+	return 0;
+}
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/include/aacraid.h linux.gamma/drivers/scsi/aacraid/include/aacraid.h
--- linux.15p3/drivers/scsi/aacraid/include/aacraid.h	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/include/aacraid.h	Fri Nov 30 13:55:32 2001
@@ -0,0 +1,1242 @@
+#define dprintk(x)
+
+#define FAILURE 1
+#define	AAC_MAX_ADAPTERS 64
+#define AAC_NUM_FIB	128
+#define AAC_NUM_IO_FIB	116
+
+#define PAGE_SIZE	4096
+
+/*------------------------------------------------------------------------------
+ *              D E F I N E S
+ *----------------------------------------------------------------------------*/
+/* Define the AAC SCSI Host Template structure. */
+#define AAC_HOST_TEMPLATE_ENTRY	\
+{ \
+	name:           	"AAC",			/* Driver Name                */ \
+	proc_info:      	aac_procinfo,	/* ProcFS Info Func           */ \
+	detect:         	aac_detect,		/* Detect Host Adapter        */ \
+	release:        	aac_release,		/* Release Host Adapter       */ \
+	info:           	aac_driverinfo,		/* Driver Info Function       */ \
+	ioctl:          	aac_ioctl,		/* ioctl Interface            */ \
+	command:        	aac_command,		/* unqueued command           */ \
+	queuecommand:   	aac_queuecommand,	/* Queue Command Function     */ \
+	abort:          	aac_abortcommand,	/* Abort Command Function     */ \
+	reset:          	aac_resetcommand,	/* Reset Command Function     */ \
+	bios_param:     	aac_biosparm,		/* BIOS Disk Parameters       */ \
+	can_queue:      	AAC_NUM_IO_FIB,		/* Default initial value      */ \
+	this_id:        	16,			/* Default initial value      */ \
+	sg_tablesize:   	16,			/* Default initial value      */ \
+	max_sectors:    	128,			/* max xfer size of 64k       */ \
+	cmd_per_lun:    	1,			/* Default initial value      */ \
+	present:        	0,			/* Default initial value      */ \
+	unchecked_isa_dma:	0,			/* Default Initial Value      */ \
+	use_new_eh_code:	0,			/* Default initial value      */ \
+	eh_abort_handler:       aac_abortcommand,	/* New Abort Command func     */ \
+	eh_strategy_handler:	NULL,			/* New Strategy Error Handler */ \
+	eh_device_reset_handler:NULL,			/* New Device Reset Handler   */ \
+	eh_bus_reset_handler:	NULL,			/* New Bus Reset Handler      */ \
+	eh_host_reset_handler:	NULL,			/* New Host reset Handler     */ \
+	use_clustering:		ENABLE_CLUSTERING	/* Enable Clustering          */ \
+}
+
+struct diskparm
+{
+	int heads;
+	int sectors;
+	int cylinders;
+};
+
+
+//
+//  Singly linked list structure. Can be used as either a list head, or
+//  as link words.
+//
+
+struct single_list
+{
+	struct single_list *next;
+};
+
+//
+// Calculate the address of the base of the structure given its type, and an
+// address of a field within the structure.
+//
+
+#define CONTAINING_RECORD(address, type, field) ((type *)( \
+                                                  (char *)(address) - \
+                                                  (char *)(&((type *)0)->field)))
+
+/*
+ *	DON'T CHANGE THE ORDER, this is set by the firmware
+ */
+ 
+#define		CT_NONE			0
+#define		CT_VOLUME		1
+#define		CT_MIRROR		2
+#define		CT_STRIPE		3
+#define		CT_RAID5		4
+#define		CT_SSRW			5
+#define		CT_SSRO			6
+#define		CT_MORPH		7
+#define		CT_PASSTHRU		8
+#define		CT_RAID4		9
+#define		CT_RAID10		10	/* stripe of mirror */
+#define		CT_RAID00		11	/* stripe of stripe */
+#define		CT_VOLUME_OF_MIRRORS	12	/* volume of mirror */
+#define		CT_PSEUDO_RAID		13	/* really raid4 */
+#define		CT_LAST_VOLUME_TYPE	14
+
+/*
+ *	Types of objects addressable in some fashion by the client.
+ *	This is a superset of those objects handled just by the filesystem
+ *	and includes "raw" objects that an administrator would use to
+ *	configure containers and filesystems.
+ */
+
+#define		FT_REG		1	/* regular file */
+#define		FT_DIR		2	/* directory */
+#define		FT_BLK		3	/* "block" device - reserved */
+#define		FT_CHR		4	/* "character special" device - reserved */
+#define		FT_LNK		5	/* symbolic link */
+#define		FT_SOCK		6	/* socket */
+#define		FT_FIFO		7	/* fifo */
+#define		FT_FILESYS	8	/* ADAPTEC's "FSA"(tm) filesystem */
+#define		FT_DRIVE	9	/* physical disk - addressable in scsi by bus/target/lun */
+#define		FT_SLICE	10	/* virtual disk - raw volume - slice */
+#define		FT_PARTITION	11	/* FSA partition - carved out of a slice - building block for containers */
+#define		FT_VOLUME	12	/* Container - Volume Set */
+#define		FT_STRIPE	13	/* Container - Stripe Set */
+#define		FT_MIRROR	14	/* Container - Mirror Set */
+#define		FT_RAID5	15	/* Container - Raid 5 Set */
+#define		FT_DATABASE	16	/* Storage object with "foreign" content manager */
+
+/*
+ *	Host side memory scatter gather list
+ *	Used by the adapter for read, write, and readdirplus operations
+ */
+
+struct sgentry {
+	u32	addr;	/* 32-bit Base address. */
+	u32	count;	/* Length. */
+};
+
+/*
+ *	SGMAP
+ *
+ *	This is the SGMAP structure for all commands that use
+ *	32-bit addressing.
+ *
+ *	Note that the upper 16 bits of SgCount are used as flags.
+ *	Only the lower 16 bits of SgCount are actually used as the
+ *	SG element count.
+ */
+
+struct sgmap {
+	u32		count;
+	struct sgentry	sg[1];
+};
+
+struct creation_info
+{
+	u8 		buildnum;		/* e.g., 588 */
+	u8 		usec;			/* e.g., 588 */
+	u8	 	via;			/* e.g., 1 = FSU,
+						 * 	 2 = API
+						 */
+	u8	 	year;		 	/* e.g., 1997 = 97 */
+	u32		date;			/*
+						 * unsigned 	Month		:4;	// 1 - 12
+						 * unsigned 	Day		:6;	// 1 - 32
+						 * unsigned 	Hour		:6;	// 0 - 23
+						 * unsigned 	Minute		:6;	// 0 - 60
+						 * unsigned 	Second		:6;	// 0 - 60
+						 */
+	u64		serial;			/* e.g., 0x1DEADB0BFAFAF001 */
+};
+
+
+/*
+ *	Define all the constants needed for the communication interface
+ */
+
+/*
+ *	Define how many queue entries each queue will have and the total
+ *	number of entries for the entire communication interface. Also define
+ *	how many queues we support.
+ *
+ *	This has to match the controller
+ */
+
+#define NUMBER_OF_COMM_QUEUES  8   // 4 command; 4 response
+#define HOST_HIGH_CMD_ENTRIES  4
+#define HOST_NORM_CMD_ENTRIES  8
+#define ADAP_HIGH_CMD_ENTRIES  4
+#define ADAP_NORM_CMD_ENTRIES  512
+#define HOST_HIGH_RESP_ENTRIES 4
+#define HOST_NORM_RESP_ENTRIES 512
+#define ADAP_HIGH_RESP_ENTRIES 4
+#define ADAP_NORM_RESP_ENTRIES 8
+
+#define TOTAL_QUEUE_ENTRIES  \
+    (HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \
+	    HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES)
+
+
+/*
+ *	Set the queues on a 16 byte alignment
+ */
+ 
+#define QUEUE_ALIGNMENT		16
+
+/*
+ *	The queue headers define the Communication Region queues. These
+ *	are physically contiguous and accessible by both the adapter and the
+ *	host. Even though all queue headers are in the same contiguous block
+ *	they will be represented as individual units in the data structures.
+ */
+
+struct aac_entry {
+	u32 size;       /* Size in bytes of the Fib which this QE points to */
+	u32 addr;	/* Receiver addressable address of the FIB (low 32 address bits) */
+};
+
+/*
+ *	The adapter assumes the ProducerIndex and ConsumerIndex are grouped
+ *	adjacently and in that order.
+ */
+ 
+struct aac_qhdr {
+	u64 header_addr;		/* Address to hand the adapter to access to this queue head */
+	u32 *producer;			/* The producer index for this queue (host address) */
+	u32 *consumer;			/* The consumer index for this queue (host address) */
+};
+
+/*
+ *	Define all the events which the adapter would like to notify
+ *	the host of.
+ */
+ 
+#define		HostNormCmdQue		1	/* Change in host normal priority command queue */
+#define		HostHighCmdQue		2	/* Change in host high priority command queue */
+#define		HostNormRespQue		3	/* Change in host normal priority response queue */
+#define		HostHighRespQue		4	/* Change in host high priority response queue */
+#define		AdapNormRespNotFull	5
+#define		AdapHighRespNotFull	6
+#define		AdapNormCmdNotFull	7
+#define		AdapHighCmdNotFull	8
+#define		SynchCommandComplete	9
+#define		AdapInternalError	0xfe    /* The adapter detected an internal error shutting down */
+
+/*
+ *	Define all the events the host wishes to notify the
+ *	adapter of. The first four values much match the Qid the
+ *	corresponding queue.
+ */
+
+#define		AdapNormCmdQue		2
+#define		AdapHighCmdQue		3
+#define		AdapNormRespQue		6
+#define		AdapHighRespQue		7
+#define		HostShutdown		8
+#define		HostPowerFail		9
+#define		FatalCommError		10
+#define		HostNormRespNotFull	11
+#define		HostHighRespNotFull	12
+#define		HostNormCmdNotFull	13
+#define		HostHighCmdNotFull	14
+#define		FastIo			15
+#define		AdapPrintfDone		16
+
+/*
+ *	Define all the queues that the adapter and host use to communicate
+ *	Number them to match the physical queue layout.
+ */
+
+enum aac_queue_types {
+        HostNormCmdQueue = 0,	/* Adapter to host normal priority command traffic */
+        HostHighCmdQueue,	/* Adapter to host high priority command traffic */
+        AdapNormCmdQueue,	/* Host to adapter normal priority command traffic */
+        AdapHighCmdQueue,	/* Host to adapter high priority command traffic */
+        HostNormRespQueue,	/* Adapter to host normal priority response traffic */
+        HostHighRespQueue,	/* Adapter to host high priority response traffic */
+        AdapNormRespQueue,	/* Host to adapter normal priority response traffic */
+        AdapHighRespQueue	/* Host to adapter high priority response traffic */
+};
+
+/*
+ *	Assign type values to the FSA communication data structures
+ */
+
+#define		FIB_MAGIC	0x0001
+
+/*
+ *	Define the priority levels the FSA communication routines support.
+ */
+
+#define		FsaNormal	1
+#define		FsaHigh		2
+
+//
+// Define the FIB. The FIB is the where all the requested data and
+// command information are put to the application on the FSA adapter.
+//
+
+struct aac_fibhdr {
+	u32 XferState;			// Current transfer state for this CCB
+	u16 Command;			// Routing information for the destination
+	u8 StructType;			// Type FIB
+	u8 Flags;			// Flags for FIB
+	u16 Size;			// Size of this FIB in bytes
+	u16 SenderSize;			// Size of the FIB in the sender (for response sizing)
+	u32 SenderFibAddress;		// Host defined data in the FIB
+	u32 ReceiverFibAddress;		// Logical address of this FIB for the adapter
+	u32 SenderData;			// Place holder for the sender to store data
+	union {
+		struct {
+		    u32 _ReceiverTimeStart; 	// Timestamp for receipt of fib
+		    u32 _ReceiverTimeDone;	// Timestamp for completion of fib
+		} _s;
+		struct list_head _FibLinks;	// Used to link Adapter Initiated Fibs on the host
+	} _u;
+};
+
+#define FibLinks			_u._FibLinks
+
+#define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
+
+
+struct hw_fib {
+	struct aac_fibhdr header;
+	u8 data[FIB_DATA_SIZE_IN_BYTES];		// Command specific data
+};
+
+/*
+ *	FIB commands
+ */
+
+#define 	TestCommandResponse		1
+#define		TestAdapterCommand		2
+/*
+ *	Lowlevel and comm commands
+ */
+#define		LastTestCommand			100
+#define		ReinitHostNormCommandQueue	101
+#define		ReinitHostHighCommandQueue	102
+#define		ReinitHostHighRespQueue		103
+#define		ReinitHostNormRespQueue		104
+#define		ReinitAdapNormCommandQueue	105
+#define		ReinitAdapHighCommandQueue	107
+#define		ReinitAdapHighRespQueue		108
+#define		ReinitAdapNormRespQueue		109
+#define		InterfaceShutdown		110
+#define		DmaCommandFib			120
+#define		StartProfile			121
+#define		TermProfile			122
+#define		SpeedTest			123
+#define		TakeABreakPt			124
+#define		RequestPerfData			125
+#define		SetInterruptDefTimer		126
+#define		SetInterruptDefCount		127
+#define		GetInterruptDefStatus		128
+#define		LastCommCommand			129
+/*
+ *	Filesystem commands
+ */
+#define		NuFileSystem			300
+#define		UFS				301
+#define		HostFileSystem			302
+#define		LastFileSystemCommand		303
+/*
+ *	Container Commands
+ */
+#define		ContainerCommand		500
+#define		ContainerCommand64		501
+/*
+ *	Cluster Commands
+ */
+#define		ClusterCommand	 		550
+/*
+ *	Scsi Port commands (scsi passthrough)
+ */
+#define		ScsiPortCommand			600
+/*
+ *	Misc house keeping and generic adapter initiated commands
+ */
+#define		AifRequest			700
+#define		CheckRevision			701
+#define		FsaHostShutdown			702
+#define		RequestAdapterInfo		703
+#define		IsAdapterPaused			704
+#define		SendHostTime			705
+#define		LastMiscCommand			706
+
+//
+// Commands that will target the failover level on the FSA adapter
+//
+
+enum fib_xfer_state {
+	HostOwned 			= (1<<0),
+	AdapterOwned 			= (1<<1),
+	FibInitialized 			= (1<<2),
+	FibEmpty 			= (1<<3),
+	AllocatedFromPool 		= (1<<4),
+	SentFromHost 			= (1<<5),
+	SentFromAdapter 		= (1<<6),
+	ResponseExpected 		= (1<<7),
+	NoResponseExpected 		= (1<<8),
+	AdapterProcessed 		= (1<<9),
+	HostProcessed 			= (1<<10),
+	HighPriority 			= (1<<11),
+	NormalPriority 			= (1<<12),
+	Async				= (1<<13),
+	AsyncIo				= (1<<13),	// rpbfix: remove with new regime
+	PageFileIo			= (1<<14),	// rpbfix: remove with new regime
+	ShutdownRequest			= (1<<15),
+	LazyWrite			= (1<<16),	// rpbfix: remove with new regime
+	AdapterMicroFib			= (1<<17),
+	BIOSFibPath			= (1<<18),
+	FastResponseCapable		= (1<<19),
+	ApiFib				= (1<<20)	// Its an API Fib.
+};
+
+/*
+ *	The following defines needs to be updated any time there is an
+ *	incompatible change made to the aac_init structure.
+ */
+
+#define ADAPTER_INIT_STRUCT_REVISION		3
+
+struct aac_init
+{
+	u32	InitStructRevision;
+	u32	MiniPortRevision;
+	u32	fsrev;
+	void *	CommHeaderAddress;
+	void *	FastIoCommAreaAddress;
+	void *	AdapterFibsPhysicalAddress;
+	void *	AdapterFibsVirtualAddress;
+	u32	AdapterFibsSize;
+	u32	AdapterFibAlign;
+	void *	printfbuf;
+	u32	printfbufsiz;
+	u32	HostPhysMemPages;		// number of 4k pages of host physical memory
+	u32	HostElapsedSeconds;		// number of seconds since 1970.
+};
+
+enum aac_log_level {
+	LOG_INIT			= 10,
+	LOG_INFORMATIONAL		= 20,
+	LOG_WARNING			= 30,
+	LOG_LOW_ERROR			= 40,
+	LOG_MEDIUM_ERROR		= 50,
+	LOG_HIGH_ERROR			= 60,
+	LOG_PANIC			= 70,
+	LOG_DEBUG			= 80,
+	LOG_WINDBG_PRINT		= 90
+};
+
+#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT	0x030b
+#define FSAFS_NTC_FIB_CONTEXT			0x030c
+
+struct aac_dev;
+
+struct adapter_ops
+{
+	void (*adapter_interrupt)(struct aac_dev *dev);
+	void (*adapter_notify)(struct aac_dev *dev, u32 event);
+	void (*adapter_enable_int)(struct aac_dev *dev, u32 event);
+	void (*adapter_disable_int)(struct aac_dev *dev, u32 event);
+};
+
+/*
+ *	Define which interrupt handler needs to be installed
+ */
+
+struct aac_driver_ident
+{
+	u16	vendor;
+	u16	device;
+	u16	subsystem_vendor;
+	u16	subsystem_device;
+	int 	(*init)(struct aac_dev *dev, unsigned long num);
+	char *	name;
+	char *	vname;
+	char *	model;
+};
+
+/*
+ *	The adapter interface specs all queues to be located in the same
+ *	physically contigous block. The host structure that defines the
+ *	commuication queues will assume they are each a seperate physically
+ *	contigous memory region that will support them all being one big
+ *	contigous block. 
+ *	There is a command and response queue for each level and direction of
+ *	commuication. These regions are accessed by both the host and adapter.
+ */
+ 
+struct aac_queue {
+	u64		 	logical;		/* This is the address we give the adapter */
+	struct aac_entry	*base;		   	/* This is the system virtual address */
+	struct aac_qhdr 	headers;       		/* A pointer to the producer and consumer queue headers for this queue */
+	u32	 		entries;	   	/* Number of queue entries on this queue */
+	wait_queue_head_t	qfull;		      	/* Event to wait on if the queue is full */
+	wait_queue_head_t	cmdready;	  	/* Indicates there is a Command ready from the adapter on this queue. */
+                                        		/* This is only valid for adapter to host command queues. */                      
+	spinlock_t	 	*lock;		     	/* Spinlock for this queue must take this lock before accessing the lock */
+	spinlock_t		lockdata;		/* Actual lock (used only on one side of the lock) */
+	unsigned long		SavedIrql;      	/* Previous IRQL when the spin lock is taken */
+	u32			padding;		/* Padding - FIXME - can remove I believe */
+	struct list_head 	cmdq;		   	/* A queue of FIBs which need to be prcessed by the FS thread. This is */
+                                		        /* only valid for command queues which receive entries from the adapter. */
+	struct list_head	pendingq;		/* A queue of outstanding fib's to the adapter. */
+	unsigned long		numpending;		/* Number of entries on outstanding queue. */
+	struct aac_dev *	dev;			/* Back pointer to adapter structure */
+};
+
+/*
+ *	Message queues. The order here is important, see also the 
+ *	queue type ordering
+ */
+
+struct aac_queue_block
+{
+	struct aac_queue queue[8];
+};
+
+/*
+ *	SaP1 Message Unit Registers
+ */
+ 
+struct sa_drawbridge_CSR {
+						//	 Offset |	Name
+	u32	reserved[10];			//	00h-27h |   Reserved
+	u8	LUT_Offset;			//		28h	|	Looup Table Offset
+	u8	reserved1[3];			// 	29h-2bh	|	Reserved
+	u32	LUT_Data;			//		2ch	|	Looup Table Data	
+	u32	reserved2[26];			//	30h-97h	|	Reserved
+	u16	PRICLEARIRQ;			//		98h	|	Primary Clear Irq
+	u16	SECCLEARIRQ;			//		9ah	|	Secondary Clear Irq
+	u16	PRISETIRQ;			//		9ch	|	Primary Set Irq
+	u16	SECSETIRQ;			//		9eh	|	Secondary Set Irq
+	u16	PRICLEARIRQMASK;		//		a0h	|	Primary Clear Irq Mask
+	u16	SECCLEARIRQMASK;		//		a2h	|	Secondary Clear Irq Mask
+	u16	PRISETIRQMASK;			//		a4h	|	Primary Set Irq Mask
+	u16	SECSETIRQMASK;			//		a6h	|	Secondary Set Irq Mask
+	u32	MAILBOX0;			//		a8h	|	Scratchpad 0
+	u32	MAILBOX1;			//		ach	|	Scratchpad 1
+	u32	MAILBOX2;			//		b0h	|	Scratchpad 2
+	u32	MAILBOX3;			//		b4h	|	Scratchpad 3
+	u32	MAILBOX4;			//		b8h	|	Scratchpad 4
+	u32	MAILBOX5;			//		bch	|	Scratchpad 5
+	u32	MAILBOX6;			//		c0h	|	Scratchpad 6
+	u32	MAILBOX7;			//		c4h	|	Scratchpad 7
+
+	u32	ROM_Setup_Data;			//		c8h | 	Rom Setup and Data
+	u32	ROM_Control_Addr;		//		cch | 	Rom Control and Address
+
+	u32	reserved3[12];			//	d0h-ffh	| 	reserved
+	u32	LUT[64];			// 100h-1ffh|	Lookup Table Entries
+
+	//
+	//  TO DO
+	//	need to add DMA, I2O, UART, etc registers form 80h to 364h
+	//
+
+};
+
+#define Mailbox0	SaDbCSR.MAILBOX0
+#define Mailbox1	SaDbCSR.MAILBOX1
+#define Mailbox2	SaDbCSR.MAILBOX2
+#define Mailbox3	SaDbCSR.MAILBOX3
+#define Mailbox4	SaDbCSR.MAILBOX4
+#define Mailbox5	SaDbCSR.MAILBOX5
+#define Mailbox7	SaDbCSR.MAILBOX7
+	
+#define DoorbellReg_p SaDbCSR.PRISETIRQ
+#define DoorbellReg_s SaDbCSR.SECSETIRQ
+#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
+
+
+#define	DOORBELL_0	0x00000001
+#define DOORBELL_1	0x00000002
+#define DOORBELL_2	0x00000004
+#define DOORBELL_3	0x00000008
+#define DOORBELL_4	0x00000010
+#define DOORBELL_5	0x00000020
+#define DOORBELL_6	0x00000040
+
+	
+#define PrintfReady			DOORBELL_5
+#define PrintfDone			DOORBELL_5
+	
+struct sa_registers {
+	struct sa_drawbridge_CSR	SaDbCSR;			/* 98h - c4h */
+};
+	
+
+#define Sa_MINIPORT_REVISION			1
+
+#define sa_readw(AEP, CSR)		readl(&((AEP)->regs.sa->CSR))
+#define sa_readl(AEP,  CSR)		readl(&((AEP)->regs.sa->CSR))
+#define sa_writew(AEP, CSR, value)	writew(value, &((AEP)->regs.sa->CSR))
+#define sa_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.sa->CSR))
+
+/*
+ *	Rx Message Unit Registers
+ */
+
+struct rx_mu_registers {
+						//	 Local	|   PCI*	|	Name
+						//			|		|
+	u32	ARSR;				//	1300h	|	00h	|	APIC Register Select Register
+	u32	reserved0;			//	1304h	|	04h	|	Reserved
+	u32	AWR;				//	1308h	|	08h	|	APIC Window Register
+	u32	reserved1;			//	130Ch	|	0Ch	|	Reserved
+	u32	IMRx[2];			//	1310h	|	10h	|	Inbound Message Registers
+	u32	OMRx[2];			//	1318h	|	18h	|	Outbound Message Registers
+	u32	IDR;				//	1320h	|	20h	|	Inbound Doorbell Register
+	u32	IISR;				//	1324h	|	24h	|	Inbound Interrupt Status Register
+	u32	IIMR;				//	1328h	|	28h	|	Inbound Interrupt Mask Register
+	u32	ODR;				//	132Ch	|	2Ch	|	Outbound Doorbell Register
+	u32	OISR;				//	1330h	|	30h	|	Outbound Interrupt Status Register
+	u32	OIMR;				//	1334h	|	34h	|	Outbound Interrupt Mask Register
+						// * Must access through ATU Inbound Translation Window
+};
+
+struct rx_inbound {
+	u32	Mailbox[8];
+};
+
+#define	InboundMailbox0		IndexRegs.Mailbox[0]
+#define	InboundMailbox1		IndexRegs.Mailbox[1]
+#define	InboundMailbox2		IndexRegs.Mailbox[2]
+#define	InboundMailbox3		IndexRegs.Mailbox[3]
+#define	InboundMailbox4		IndexRegs.Mailbox[4]
+
+#define	INBOUNDDOORBELL_0	0x00000001
+#define INBOUNDDOORBELL_1	0x00000002
+#define INBOUNDDOORBELL_2	0x00000004
+#define INBOUNDDOORBELL_3	0x00000008
+#define INBOUNDDOORBELL_4	0x00000010
+#define INBOUNDDOORBELL_5	0x00000020
+#define INBOUNDDOORBELL_6	0x00000040
+
+#define	OUTBOUNDDOORBELL_0	0x00000001
+#define OUTBOUNDDOORBELL_1	0x00000002
+#define OUTBOUNDDOORBELL_2	0x00000004
+#define OUTBOUNDDOORBELL_3	0x00000008
+#define OUTBOUNDDOORBELL_4	0x00000010
+
+#define InboundDoorbellReg	MUnit.IDR
+#define OutboundDoorbellReg	MUnit.ODR
+
+struct rx_registers {
+	struct rx_mu_registers		MUnit;		// 1300h - 1334h
+	u32				reserved1[6];	// 1338h - 134ch
+	struct rx_inbound		IndexRegs;
+};
+
+#define rx_readb(AEP, CSR)		readb(&((AEP)->regs.rx->CSR))
+#define rx_readl(AEP, CSR)		readl(&((AEP)->regs.rx->CSR))
+#define rx_writeb(AEP, CSR, value)	writeb(value, &((AEP)->regs.rx->CSR))
+#define rx_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.rx->CSR))
+
+struct fib;
+
+typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
+
+struct aac_fib_context {
+	s16	 		type;		// used for verification of structure	
+	s16	 		size;
+	u32			jiffies;	// used for cleanup
+	struct list_head	next;		// used to link context's into a linked list
+	struct semaphore 	wait_sem;	// this is used to wait for the next fib to arrive.
+	int			wait;		// Set to true when thread is in WaitForSingleObject
+	unsigned long		count;		// total number of FIBs on FibList
+	struct list_head	fibs;
+};
+
+#define MAXIMUM_NUM_CONTAINERS	64		// 4 Luns * 16 Targets
+#define MAXIMUM_NUM_ADAPTERS	8
+
+struct fsa_scsi_hba {
+	unsigned long		size[MAXIMUM_NUM_CONTAINERS];
+	unsigned long		type[MAXIMUM_NUM_CONTAINERS];
+	unsigned char		valid[MAXIMUM_NUM_CONTAINERS];
+	unsigned char		ro[MAXIMUM_NUM_CONTAINERS];
+	unsigned char		locked[MAXIMUM_NUM_CONTAINERS];
+	unsigned char		deleted[MAXIMUM_NUM_CONTAINERS];
+	long			devno[MAXIMUM_NUM_CONTAINERS];
+};
+
+struct fib {
+	void			*next;	/* this is used by the allocator */
+	s16			type;
+	s16			size;
+	/*
+	 *	The Adapter that this I/O is destined for.
+	 */
+	struct aac_dev 		*dev;
+	u64			logicaladdr;	/* 64 bit */
+	/*
+	 *	This is the event the sendfib routine will wait on if the
+	 *	caller did not pass one and this is synch io.
+	 */
+	struct semaphore 	event_wait;
+	spinlock_t		event_lock;
+
+	unsigned long		done;	/* gets set to 1 when fib is complete */
+	fib_callback 		callback;
+	void 			*callback_data;
+	unsigned long		flags;
+	/*
+	 *	The following is used to put this fib context onto the 
+	 *	Outstanding I/O queue.
+	 */
+	struct list_head	queue;
+
+	void 			*data;
+	struct hw_fib		*fib;		/* Actual shared object */
+};
+
+struct aac_dev
+{
+	struct aac_dev		*next;
+	const char		*name;
+	int			id;
+
+	u16			irq_mask;
+	/*
+	 *	Map for 128 fib objects (64k)
+	 */	
+	dma_addr_t		hw_fib_pa;
+	struct hw_fib		*hw_fib_va;
+	/*
+	 *	Fib Headers
+	 */
+	struct fib		fibs[AAC_NUM_FIB];
+	struct fib		*free_fib;
+	struct fib		*timeout_fib;
+	spinlock_t		fib_lock;
+	
+	struct aac_queue_block *queues;
+	/*
+	 *	The user API will use an IOCTL to register itself to receive
+	 *	FIBs from the adapter.  The following list is used to keep
+	 *	track of all the threads that have requested these FIBs.  The
+	 *	mutex is used to synchronize access to all data associated 
+	 *	with the adapter fibs.
+	 */
+	struct list_head	fib_list;
+
+	struct adapter_ops	a_ops;
+	unsigned long		fsrev;		/* Main driver's revision number */
+	
+	struct aac_init		*init;		/* Holds initialization info to communicate with adapter */
+	void *			init_pa; 	/* Holds physical address of the init struct */
+	
+	struct pci_dev		*pdev;		/* Our PCI interface */
+	void *			printfbuf;	/* pointer to buffer used for printf's from the adapter */
+	void *			comm_addr;	/* Base address of Comm area */
+	dma_addr_t		comm_phys;	/* Physical Address of Comm area */
+	size_t			comm_size;
+
+	struct Scsi_Host	*scsi_host_ptr;
+	struct fsa_scsi_hba	fsa_dev;
+	int			thread_pid;
+	int			cardtype;
+	
+	/*
+	 *	The following is the device specific extension.
+	 */
+	union
+	{
+		struct sa_registers *sa;
+		struct rx_registers *rx;
+	} regs;
+	/*
+	 *	The following is the number of the individual adapter
+	 */
+	long			devnum;
+	int			aif_thread;
+	struct completion	aif_completion;
+};
+
+#define AllocateAndMapFibSpace(dev, MapFibContext) \
+	dev->a_ops.AllocateAndMapFibSpace(dev, MapFibContext)
+
+#define UnmapAndFreeFibSpace(dev, MapFibContext) \
+	dev->a_ops.UnmapAndFreeFibSpace(dev, MapFibContext)
+
+#define aac_adapter_interrupt(dev) \
+	dev->a_ops.adapter_interrupt(dev)
+
+#define aac_adapter_notify(dev, event) \
+	dev->a_ops.adapter_notify(dev, event)
+
+#define aac_adapter_enable_int(dev, event) \
+	dev->a_ops.adapter_enable_int(dev, event)
+
+#define aac_adapter_disable_int(dev, event) \
+	dev->a_ops.adapter_disable_int(dev, event)
+
+
+
+#define FIB_CONTEXT_FLAG_TIMED_OUT		(0x00000001)
+
+/*
+ *	Define the command values
+ */
+ 
+#define		Null			0
+#define 	GetAttributes		1
+#define 	SetAttributes		2
+#define 	Lookup			3
+#define 	ReadLink		4
+#define 	Read			5
+#define 	Write			6
+#define		Create			7
+#define		MakeDirectory		8
+#define		SymbolicLink		9
+#define		MakeNode		10
+#define		Removex			11
+#define		RemoveDirectoryx	12
+#define		Rename			13
+#define		Link			14
+#define		ReadDirectory		15
+#define		ReadDirectoryPlus	16
+#define		FileSystemStatus	17
+#define		FileSystemInfo		18
+#define		PathConfigure		19
+#define		Commit			20
+#define		Mount			21
+#define		UnMount			22
+#define		Newfs			23
+#define		FsCheck			24
+#define		FsSync			25
+#define		SimReadWrite		26
+#define		SetFileSystemStatus	27
+#define		BlockRead		28
+#define		BlockWrite		29
+#define		NvramIoctl		30
+#define		FsSyncWait		31
+#define		ClearArchiveBit		32
+#define		SetAcl			33
+#define		GetAcl			34
+#define		AssignAcl		35
+#define		FaultInsertion		36	/* Fault Insertion Command */
+#define		CrazyCache		37	/* Crazycache */
+
+#define		MAX_FSACOMMAND_NUM	38
+
+
+/*
+ *	Define the status returns. These are very unixlike although
+ *	most are not in fact used
+ */
+
+#define		ST_OK		0
+#define		ST_PERM		1
+#define		ST_NOENT	2
+#define		ST_IO		5
+#define		ST_NXIO		6
+#define		ST_E2BIG	7
+#define		ST_ACCES	13
+#define		ST_EXIST	17
+#define		ST_XDEV		18
+#define		ST_NODEV	19
+#define		ST_NOTDIR	20
+#define		ST_ISDIR	21
+#define		ST_INVAL	22
+#define		ST_FBIG		27
+#define		ST_NOSPC	28
+#define		ST_ROFS		30
+#define		ST_MLINK	31
+#define		ST_WOULDBLOCK	35
+#define		ST_NAMETOOLONG	63
+#define		ST_NOTEMPTY	66
+#define		ST_DQUOT	69
+#define		ST_STALE	70
+#define		ST_REMOTE	71
+#define		ST_BADHANDLE	10001
+#define		ST_NOT_SYNC	10002
+#define		ST_BAD_COOKIE	10003
+#define		ST_NOTSUPP	10004
+#define		ST_TOOSMALL	10005
+#define		ST_SERVERFAULT	10006
+#define		ST_BADTYPE	10007
+#define		ST_JUKEBOX	10008
+#define		ST_NOTMOUNTED	10009
+#define		ST_MAINTMODE	10010
+#define		ST_STALEACL	10011
+
+/*
+ *	On writes how does the client want the data written.
+ */
+
+#define	CACHE_CSTABLE		1
+#define CACHE_UNSTABLE		2
+
+/*
+ *	Lets the client know at which level the data was commited on
+ *	a write request
+ */
+
+#define	CMFILE_SYNCH_NVRAM	1
+#define	CMDATA_SYNCH_NVRAM	2
+#define	CMFILE_SYNCH		3
+#define CMDATA_SYNCH		4
+#define CMUNSTABLE		5
+
+struct aac_read
+{
+	u32	 	command;
+	u32 		cid;
+	u32 		block;
+	u32 		count;
+	struct sgmap	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_read_reply
+{
+	u32	 	status;
+	u32 		count;
+};
+
+struct aac_write
+{
+	u32		command;
+	u32 		cid;
+	u32 		block;
+	u32 		count;
+	u32	 	stable;
+	struct sgmap	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_write_reply
+{
+	u32		status;
+	u32 		count;
+	u32		committed;
+};
+
+
+/*
+ * Object-Server / Volume-Manager Dispatch Classes
+ */
+
+#define		VM_Null			0
+#define		VM_NameServe		1
+#define		VM_ContainerConfig	2
+#define		VM_Ioctl		3
+#define		VM_FilesystemIoctl	4
+#define		VM_CloseAll		5
+#define		VM_CtBlockRead		6
+#define		VM_CtBlockWrite		7
+#define		VM_SliceBlockRead	8	/* raw access to configured "storage objects" */
+#define		VM_SliceBlockWrite	9
+#define		VM_DriveBlockRead	10	/* raw access to physical devices */
+#define		VM_DriveBlockWrite	11
+#define		VM_EnclosureMgt		12	/* enclosure management */
+#define		VM_Unused		13	/* used to be diskset management */
+#define		VM_CtBlockVerify	14
+#define		VM_CtPerf		15	/* performance test */
+#define		VM_CtBlockRead64	16
+#define		VM_CtBlockWrite64	17
+#define		VM_CtBlockVerify64	18
+
+#define		MAX_VMCOMMAND_NUM	19	/* used for sizing stats array - leave last */
+
+/*
+ *	Descriptive information (eg, vital stats)
+ *	that a content manager might report.  The
+ *	FileArray filesystem component is one example
+ *	of a content manager.  Raw mode might be
+ *	another.
+ */
+
+struct aac_fsinfo {
+	u32  fsTotalSize;	/* Consumed by fs, incl. metadata */
+	u32  fsBlockSize;
+	u32  fsFragSize;
+	u32  fsMaxExtendSize;
+	u32  fsSpaceUnits;
+	u32  fsMaxNumFiles;
+	u32  fsNumFreeFiles;
+	u32  fsInodeDensity;
+};	/* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+
+union aac_contentinfo {
+	struct aac_fsinfo filesys;	/* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+};
+
+/*
+ *	Query for "mountable" objects, ie, objects that are typically
+ *	associated with a drive letter on the client (host) side.
+ */
+
+struct aac_mntent {
+	u32    			oid;
+	char			name[16];	// if applicable
+	struct creation_info	create_info;	// if applicable
+	u32			capacity;
+	u32			vol;    	// substrate structure
+	u32			obj;	        // FT_FILESYS, FT_DATABASE, etc.
+	u32			state;		// unready for mounting, readonly, etc.
+	union aac_contentinfo	fileinfo;	// Info specific to content manager (eg, filesystem)
+	u32			altoid;		// != oid <==> snapshot or broken mirror exists
+};
+
+#define FSCS_READONLY	0x0002	/*	possible result of broken mirror */
+
+struct aac_query_mount {
+	u32		command;
+	u32		type;
+	u32		count;
+};
+
+struct aac_mount {
+	u32		status;
+	u32	   	type;           /* should be same as that requested */
+	u32		count;
+	struct aac_mntent mnt[1];
+};
+
+/*
+ * The following command is sent to shut down each container.
+ */
+
+struct aac_close {
+	u32	command;
+	u32	cid;
+};
+
+struct aac_query_disk
+{
+	s32	cnum;
+	s32	bus;
+	s32	target;
+	s32	lun;
+	u32	valid;
+	u32	locked;
+	u32	deleted;
+	s32	instance;
+	s8	name[10];
+	u32	unmapped;
+};
+
+struct aac_delete_disk {
+	u32	disknum;
+	u32	cnum;
+};
+
+struct fib_ioctl
+{
+	char	*fibctx;
+	int	wait;
+	char	*fib;
+};
+
+/*
+ * 	Ugly - non Linux like ioctl coding for back compat.
+ */
+
+#define CTL_CODE(function, method) (                 \
+    (4<< 16) | ((function) << 2) | (method) \
+)
+
+/*
+ *	Define the method codes for how buffers are passed for I/O and FS 
+ *	controls
+ */
+
+#define METHOD_BUFFERED                 0
+#define METHOD_NEITHER                  3
+
+/*
+ *	Filesystem ioctls
+ */
+
+#define FSACTL_SENDFIB                  	CTL_CODE(2050, METHOD_BUFFERED)
+#define FSACTL_DELETE_DISK			0x163
+#define FSACTL_QUERY_DISK			0x173
+#define FSACTL_OPEN_GET_ADAPTER_FIB		CTL_CODE(2100, METHOD_BUFFERED)
+#define FSACTL_GET_NEXT_ADAPTER_FIB		CTL_CODE(2101, METHOD_BUFFERED)
+#define FSACTL_CLOSE_GET_ADAPTER_FIB		CTL_CODE(2102, METHOD_BUFFERED)
+#define FSACTL_FORCE_DELETE_DISK		CTL_CODE(2120, METHOD_NEITHER)
+
+struct aac_common
+{
+	/*
+	 *	If this value is set to 1 then interrupt moderation will occur 
+	 *	in the base commuication support.
+	 */
+	unsigned long irq_mod;
+	int peak_fibs;
+	int zero_fibs;
+	unsigned long fib_timeouts;
+	/*
+	 *	Statistical counters in debug mode
+	 */
+#ifdef DBG
+	unsigned long FibsSent;
+	unsigned long FibRecved;
+	unsigned long NoResponseSent;
+	unsigned long NoResponseRecved;
+	unsigned long AsyncSent;
+	unsigned long AsyncRecved;
+	unsigned long NormalSent;
+	unsigned long NormalRecved;
+#endif
+};
+
+extern struct aac_common aac_config;
+
+
+/*
+ *	The following macro is used when sending and receiving FIBs. It is
+ *	only used for debugging.
+ */
+ 
+#if DBG
+#define	FIB_COUNTER_INCREMENT(counter)		(counter)++
+#else
+#define	FIB_COUNTER_INCREMENT(counter)		
+#endif
+
+/*
+ *	Adapter direct commands
+ */
+
+#define	BREAKPOINT_REQUEST		0x00000004
+#define	INIT_STRUCT_BASE_ADDRESS	0x00000005
+#define	SEND_SYNCHRONOUS_FIB		0x0000000c
+
+/*
+ *	Adapter Status Register
+ *
+ *  Phase Staus mailbox is 32bits:
+ *	<31:16> = Phase Status
+ *	<15:0>  = Phase
+ *
+ *	The adapter reports is present state through the phase.  Only
+ *	a single phase should be ever be set.  Each phase can have multiple
+ *	phase status bits to provide more detailed information about the 
+ *	state of the board.  Care should be taken to ensure that any phase 
+ *	status bits that are set when changing the phase are also valid
+ *	for the new phase or be cleared out.  Adapter software (monitor,
+ *	iflash, kernel) is responsible for properly maintining the phase 
+ *	status mailbox when it is running.
+ *											
+ *	MONKER_API Phases							
+ *
+ *	Phases are bit oriented.  It is NOT valid  to have multiple bits set						
+ */					
+
+#define	SELF_TEST_FAILED		0x00000004
+#define	KERNEL_UP_AND_RUNNING		0x00000080
+#define	KERNEL_PANIC			0x00000100
+
+/*
+ *	Doorbell bit defines
+ */
+
+#define DoorBellPrintfDone		(1<<5)	// Host -> Adapter
+#define DoorBellAdapterNormCmdReady	(1<<1)	// Adapter -> Host
+#define DoorBellAdapterNormRespReady	(1<<2)	// Adapter -> Host
+#define DoorBellAdapterNormCmdNotFull	(1<<3)	// Adapter -> Host
+#define DoorBellAdapterNormRespNotFull	(1<<4)	// Adapter -> Host
+#define DoorBellPrintfReady		(1<<5)	// Adapter -> Host
+
+/*
+ *	For FIB communication, we need all of the following things
+ *	to send back to the user.
+ */
+ 
+#define 	AifCmdEventNotify	1	/* Notify of event */
+#define		AifCmdJobProgress	2	/* Progress report */
+#define		AifCmdAPIReport		3	/* Report from other user of API */
+#define		AifCmdDriverNotify	4	/* Notify host driver of event */
+#define		AifReqJobList		100	/* Gets back complete job list */
+#define		AifReqJobsForCtr	101	/* Gets back jobs for specific container */
+#define		AifReqJobsForScsi	102	/* Gets back jobs for specific SCSI device */ 
+#define		AifReqJobReport		103	/* Gets back a specific job report or list of them */ 
+#define		AifReqTerminateJob	104	/* Terminates job */
+#define		AifReqSuspendJob	105	/* Suspends a job */
+#define		AifReqResumeJob		106	/* Resumes a job */ 
+#define		AifReqSendAPIReport	107	/* API generic report requests */
+#define		AifReqAPIJobStart	108	/* Start a job from the API */
+#define		AifReqAPIJobUpdate	109	/* Update a job report from the API */
+#define		AifReqAPIJobFinish	110	/* Finish a job from the API */
+
+/*
+ *	Adapter Initiated FIB command structures. Start with the adapter
+ *	initiated FIBs that really come from the adapter, and get responded
+ *	to by the host.
+ */
+
+struct aac_aifcmd {
+	u32 command;		/* Tell host what type of notify this is */
+	u32 seqnum;		/* To allow ordering of reports (if necessary) */
+	u8 data[1];		/* Undefined length (from kernel viewpoint) */
+};
+
+/*
+ *	Adapter Information Block
+ *
+ *	This is returned by the RequestAdapterInfo block
+ */
+ 
+struct aac_adapter_info
+{
+	u32	platform;
+	u32	cpu;
+	u32	subcpu;
+	u32	clock;
+	u32	execmem;
+	u32	buffermem;
+	u32	totalmem;
+	u32	kernelrev;
+	u32	kernelbuild;
+	u32	monitorrev;
+	u32	monitorbuild;
+	u32	hwrev;
+	u32	hwbuild;
+	u32	biosrev;
+	u32	biosbuild;
+	u32	clustering;
+	u32	clustermask;
+	u64	serial;
+	u32	battery;
+	u32	options;
+	u32	OEM;
+};
+	
+const char *aac_driverinfo(struct Scsi_Host *);
+struct fib *fib_alloc(struct aac_dev *dev);
+int fib_setup(struct aac_dev *dev);
+void fib_map_free(struct aac_dev *dev);
+void fib_free(struct fib * context);
+void fib_init(struct fib * context);
+void fib_dealloc(struct fib * context);
+void aac_printf(struct aac_dev *dev, u32 val);
+int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
+int aac_consumer_avail(struct aac_dev * dev, struct aac_queue * q);
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
+int fib_complete(struct fib * context);
+#define fib_data(fibctx) ((void *)(fibctx)->fib->data)
+int aac_detach(struct aac_dev *dev);
+struct aac_dev *aac_init_adapter(struct aac_dev *dev);
+int aac_get_containers(struct aac_dev *dev);
+int aac_scsi_cmd(Scsi_Cmnd *scsi_cmnd_ptr);
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg);
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void *arg);
+int aac_rx_init(struct aac_dev *dev, unsigned long devNumber);
+int aac_sa_init(struct aac_dev *dev, unsigned long devNumber);
+unsigned int aac_response_normal(struct aac_queue * q);
+unsigned int aac_command_normal(struct aac_queue * q);
+int aac_command_thread(struct aac_dev * dev);
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
+int fib_adapter_complete(struct fib * fibptr, unsigned short size);
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/linit.c linux.gamma/drivers/scsi/aacraid/linit.c
--- linux.15p3/drivers/scsi/aacraid/linit.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/linit.c	Fri Nov 30 13:56:49 2001
@@ -0,0 +1,677 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *   linit.c
+ *
+ * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
+ *				
+ *	Provides the following driver entry points:
+ *		aac_detect()
+ *		aac_release()
+ *		aac_queuecommand()
+ *		aac_resetcommand()
+ *		aac_biosparm()
+ *	
+ */
+
+#define AAC_DRIVER_VERSION		"0.9.6ac1"
+#define AAC_DRIVER_BUILD_DATE		__DATE__
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <asm/semaphore.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+#include "sd.h"
+
+#define AAC_DRIVERNAME	"aacraid"
+
+MODULE_AUTHOR("Red Hat Inc and Adaptec OEM RAID Solutions");
+MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, and HP NetRAID-4M devices.  http://domsch.com/linux/");
+MODULE_LICENSE("GPL");
+
+struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS];
+
+static unsigned aac_count = 0;
+static int aac_cfg_major = -1;
+static int single_command_done = 0;
+
+/*
+ * Because of the way Linux names scsi devices, the order in this table has
+ * become important.  Check for on-board Raid first, add-in cards second.
+ */
+
+/* FIXME static */struct aac_driver_ident aac_drivers[] = {
+	{ 0x0000, 0x0000, 0x0000, 0x0000, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* Dell unknown - uses perc_pciid */
+	{ 0x0000, 0x0000, 0x0000, 0x0000, aac_rx_init, "aacraid", "ADAPTEC  ", "AACRAID         " }, /* unknown - uses rx_pciid */
+	{ 0x0000, 0x0000, 0x0000, 0x0000, aac_sa_init, "aacraid", "ADAPTEC  ", "AACRAID         " }, /* unknown - uses sa_pciid */
+	{ 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 2/Si */
+	{ 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Di */
+	{ 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Si */
+	{ 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Si */
+	{ 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Di */
+	{ 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Di */
+	{ 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Di */
+	{ 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Di */
+	{ 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL    ", "PERCRAID        " }, /* PERC 3/Di */
+	{ 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL    ", "PERCRAID        " }, /* Dell PERC2 "Quad Channel" */
+	{ 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   " }, /* Adaptec 5400S */
+	{ 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid",  "HP      ", "NetRAID-4M      " }  /* HP NetRAID-4M */
+};
+
+#define NUM_AACTYPES	(sizeof(aac_drivers) / sizeof(struct aac_driver_ident))
+static int num_aacdrivers = NUM_AACTYPES;
+
+static int aac_cfg_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg);
+static int aac_cfg_open(struct inode * inode, struct file * file);
+static int aac_cfg_release(struct inode * inode,struct file * file);
+
+static struct file_operations aac_cfg_fops = {
+	owner: THIS_MODULE,
+	ioctl: aac_cfg_ioctl,
+	open: aac_cfg_open,
+	release: aac_cfg_release
+};
+
+static int aac_detect(Scsi_Host_Template *);
+static int aac_release(struct Scsi_Host *);
+static int aac_queuecommand(Scsi_Cmnd *, void (*CompletionRoutine)(Scsi_Cmnd *));
+static int aac_command(Scsi_Cmnd *);
+static int aac_abortcommand(Scsi_Cmnd *scsi_cmnd_ptr);
+static int aac_resetcommand(Scsi_Cmnd *, unsigned int);
+static int aac_biosparm(Scsi_Disk *, kdev_t, int *);
+static int aac_procinfo(char *, char **, off_t, int, int, int);
+static int aac_ioctl(Scsi_Device *, int, void *);
+
+static void aac_queuedepth(struct Scsi_Host *, Scsi_Device *);
+
+/**
+ *	aac_detect	-	Probe for aacraid cards
+ *	@template: SCSI driver template
+ *
+ *	Probe for AAC Host Adapters initialize, register, and report the 
+ *	configuration of each AAC Host Adapter found.
+ *	Returns the number of adapters successfully initialized and 
+ *	registered.
+ *	Initializes all data necessary for this particular SCSI driver.
+ *	Notes:
+ *	The detect routine must not call any of the mid level functions 
+ *	to queue commands because things are not guaranteed to be set 
+ *	up yet. The detect routine can send commands to the host adapter 
+ *	as long as the program control will not be passed to scsi.c in 
+ *	the processing of the command. Note especially that 
+ *	scsi_malloc/scsi_free must not be called.
+ *
+ */
+ 
+static int aac_detect(Scsi_Host_Template *template)
+{
+	int index;
+	int container;
+	u16 vendor_id, device_id, sub_vendor_id = 0, sub_system_id = 0;
+	struct Scsi_Host *host_ptr;
+	struct pci_dev *dev = NULL;
+	struct aac_dev *aac;
+	struct fsa_scsi_hba *fsa_dev_ptr;
+	char *name = NULL;
+	
+	printk(KERN_INFO "Red Hat/Adaptec aacraid driver, %s\n", AAC_DRIVER_BUILD_DATE);
+
+	/* setting up the proc directory structure */
+	template->proc_name = "aacraid";
+
+	for( index = 0; index != num_aacdrivers; index++ )
+	{
+		device_id = aac_drivers[index].device;
+		vendor_id = aac_drivers[index].vendor;
+		name = aac_drivers[index].name;
+		dprintk((KERN_DEBUG "Checking %s %x/%x/%x/%x.\n", 
+			name, vendor_id, device_id,
+			aac_drivers[index].subsystem_vendor,
+			aac_drivers[index].subsystem_device));
+
+		/* If vendor and device ID are 0, this is an unused entry, so skip! */
+		if ( vendor_id == 0 && device_id == 0 ) 
+			continue;
+		dev = NULL;
+		while((dev = pci_find_device(vendor_id, device_id, dev)))
+		{
+			if (pci_enable_device(dev))
+				continue;
+			pci_set_master(dev);
+			pci_set_dma_mask(dev, 0xFFFFFFFFULL);
+
+			if((dev->subsystem_vendor != aac_drivers[index].subsystem_vendor) || 
+			   (dev->subsystem_device != aac_drivers[index].subsystem_device))
+					continue;
+
+			dprintk((KERN_DEBUG "%s device detected.\n", name));
+			dprintk((KERN_DEBUG "%x/%x/%x/%x.\n", vendor_id, device_id, sub_vendor_id, sub_system_id));
+			/* Increment the host adapter count */
+			aac_count++;
+			/*
+			 * scsi_register() allocates memory for a Scsi_Hosts structure and
+			 * links it into the linked list of host adapters. This linked list
+			 * contains the data for all possible <supported> scsi hosts.
+			 * This is similar to the Scsi_Host_Template, except that we have
+			 * one entry for each actual physical host adapter on the system,
+			 * stored as a linked list. If there are two AAC boards, then we
+			 * will need to make two Scsi_Host entries, but there will be only
+			 * one Scsi_Host_Template entry. The second argument to scsi_register()
+			 * specifies the size of the extra memory we want to hold any device 
+			 * specific information.
+			 */
+			host_ptr = scsi_register( template, sizeof(struct aac_dev) );
+			/* 
+			 * These three parameters can be used to allow for wide SCSI 
+			 * and for host adapters that support multiple buses.
+			 */
+			host_ptr->max_id = 17;
+			host_ptr->max_lun = 8;
+			host_ptr->max_channel = 1;
+			host_ptr->irq = dev->irq;		/* Adapter IRQ number */
+			/* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
+			host_ptr->base = dev->resource[0].start;
+			scsi_set_pci_device(host_ptr, dev);
+			dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", host_ptr->base, dev->resource[0].start));
+			dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
+			/*
+			 * The unique_id field is a unique identifier that must
+			 * be assigned so that we have some way of identifying
+			 * each host adapter properly and uniquely. For hosts 
+			 * that do not support more than one card in the
+			 * system, this does not need to be set. It is
+			 * initialized to zero in scsi_register(). This is the 
+			 * value returned as aac->id.
+			 */
+			host_ptr->unique_id = aac_count - 1;
+			/*
+			 *	This function is called after the device list has
+			 *	been built to find the tagged queueing depth 
+			 *	supported for each device.
+			 */
+			host_ptr->select_queue_depths = aac_queuedepth;
+			aac = (struct aac_dev *)host_ptr->hostdata;
+			/* attach a pointer back to Scsi_Host */
+			aac->scsi_host_ptr = host_ptr;	
+			aac->pdev = dev;
+			aac->cardtype =  index;
+			aac->name = aac->scsi_host_ptr->hostt->name;
+			aac->id = aac->scsi_host_ptr->unique_id;
+			/* Initialize the ordinal number of the device to -1 */
+			fsa_dev_ptr = &(aac->fsa_dev);
+			for( container = 0; container < MAXIMUM_NUM_CONTAINERS; container++ )
+				fsa_dev_ptr->devno[container] = -1;
+
+			dprintk((KERN_DEBUG "Initializing Hardware...\n"));
+			if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
+			{
+				/* device initialization failed */
+				printk(KERN_WARNING "aacraid: device initialization failed.\n");
+				scsi_unregister(host_ptr);
+				aac_count--;
+			} 
+			else
+			{
+				dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", name, host_ptr->unique_id));
+				aac_get_containers(aac);
+				aac_devices[aac_count-1] = aac;
+			}
+		}
+	}
+
+	if( aac_count ){
+		if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
+			printk(KERN_WARNING "aacraid: unable to register \"aac\" device.\n");
+	}
+
+	template->present = aac_count; /* # of cards of this type found */
+	return aac_count;
+}
+
+/**
+ *	aac_release	-	release SCSI host resources
+ *	@host_ptr: SCSI host to clean up
+ *
+ *	Release all resources previously acquired to support a specific Host 
+ *	Adapter and unregister the AAC Host Adapter.
+ *
+ *	BUGS: Does not wait for the thread it kills to die.
+ */
+
+static int aac_release(struct Scsi_Host *host_ptr)
+{
+	struct aac_dev *dev;
+	dprintk((KERN_DEBUG "aac_release.\n"));
+	dev = (struct aac_dev *)host_ptr->hostdata;
+	/*
+	 *	kill any threads we started
+	 */
+	kill_proc(dev->thread_pid, SIGKILL, 0);
+	wait_for_completion(&dev->aif_completion);
+	/*
+	 *	Call the comm layer to detach from this adapter
+	 */
+	aac_detach(dev);
+	/* Check free orderings... */
+	/* remove interrupt binding */
+	free_irq(host_ptr->irq, dev);
+	iounmap((void * )dev->regs.sa);
+	/* unregister adapter */
+	scsi_unregister(host_ptr);
+	/*
+	 *	FIXME: This assumes no hot plugging is going on...
+	 */
+	if( aac_cfg_major >= 0 )
+	{
+		unregister_chrdev(aac_cfg_major, "aac");
+		aac_cfg_major = -1;
+	}
+	return 0;
+}
+
+/**
+ *	aac_queuecommand	-	queue a SCSI command
+ *	@scsi_cmnd_ptr:	SCSI command to queue
+ *	@CompletionRoutine: Function to call on command completion
+ *
+ *	Queues a command for execution by the associated Host Adapter.
+ */ 
+
+static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*CompletionRoutine)(Scsi_Cmnd *))
+{
+	int ret;
+
+	scsi_cmnd_ptr->scsi_done = CompletionRoutine;
+	/*
+	 *	aac_scsi_cmd() handles command processing, setting the 
+	 *	result code and calling completion routine. 
+	 */
+	if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
+		dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
+	return ret;
+} 
+
+
+/**
+ *	aac_done	-	Callback function for a non-queued command.
+ *	@scsi_cmnd_ptr:	SCSI command block to wait for
+ *
+ *	Sets single_command done to 1. This lets aac_command complete. 
+ *	This function is obsolete.
+ *
+ *	Bugs: Doesn't actually work properly with multiple controllers
+ */
+ 
+static void aac_done(Scsi_Cmnd * scsi_cmnd_ptr) 
+{
+	single_command_done = 1;
+}
+
+/**
+ *	aac_command	-	synchronous SCSI command execution
+ *	@scsi_cmnd_ptr:	SCSI command to issue
+ *
+ *	Accepts a single command for execution by the associated Host Adapter.
+ *	Waits until it completes an then returns an int where:
+ *		Byte 0 = SCSI status code
+ *		Byte 1 = SCSI 1 byte message
+ *		Byte 2 = host error return
+ *		Byte 3 = mid level error return
+ */
+ 
+static int aac_command(Scsi_Cmnd *scsi_cmnd_ptr )
+{
+	scsi_cmnd_ptr->scsi_done = aac_done;
+	dprintk((KERN_DEBUG "aac_command.\n"));
+
+	/*
+	 *	aac_scsi_cmd() handles command processing, setting the 
+	 *	result code and calling completion routine.
+	 */
+	single_command_done = 0;
+	aac_scsi_cmd(scsi_cmnd_ptr);
+	while(!single_command_done)
+		rmb();
+	return scsi_cmnd_ptr->result;
+} 
+
+/**
+ *	aac_abortcommand	-	Abort command if possible.
+ *	@scsi_cmnd_ptr:	SCSI command block to abort
+ *
+ *	Called when the midlayer wishes to abort a command. We don't support
+ *	this facility, and our firmware looks after life for us. We just
+ *	report the command as busy. 
+ */
+ 
+static int aac_abortcommand(Scsi_Cmnd *scsi_cmnd_ptr )
+{
+	return SCSI_ABORT_BUSY;
+}
+
+/**
+ *	aac_resetcommand	-	Reset command handling
+ *	@scsi_cmnd_ptr:	SCSI command block causing the reset
+ *	@reset_flags: Reset hints from the midlayer code
+ *
+ *	Issue a reset of a SCSI command. We are ourselves not truely a SCSI
+ *	controller and our firmware will do the work for us anyway. Thus this
+ *	is a no-op. We just return SCSI_RESET_PUNT
+ */
+ 
+static int aac_resetcommand(struct scsi_cmnd *scsi_cmnd_ptr, unsigned int reset_flags )
+{
+	return SCSI_RESET_PUNT;
+}
+
+/**
+ *	aac_driverinfo		-	Returns the host adapter name
+ *	@host_ptr:	Scsi host to report on
+ *
+ *	Returns a static string describing the device in question
+ */
+
+const char *aac_driverinfo(struct Scsi_Host *host_ptr)
+{
+	struct aac_dev *dev = (struct aac_dev *)host_ptr->hostdata;
+	return aac_drivers[dev->cardtype].name;
+}
+
+/**
+ *	aac_biosparm	-	return BIOS parameters for disk
+ *	@disk: SCSI disk object to process
+ *	@device: kdev_t of the disk in question
+ *	@geom: geometry block to fill in
+ *
+ *	Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.  
+ *	The default disk geometry is 64 heads, 32 sectors, and the appropriate 
+ *	number of cylinders so as not to exceed drive capacity.  In order for 
+ *	disks equal to or larger than 1 GB to be addressable by the BIOS
+ *	without exceeding the BIOS limitation of 1024 cylinders, Extended 
+ *	Translation should be enabled.   With Extended Translation enabled, 
+ *	drives between 1 GB inclusive and 2 GB exclusive are given a disk 
+ *	geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive 
+ *	are given a disk geometry of 255 heads and 63 sectors.  However, if 
+ *	the BIOS detects that the Extended Translation setting does not match 
+ *	the geometry in the partition table, then the translation inferred 
+ *	from the partition table will be used by the BIOS, and a warning may 
+ *	be displayed.
+ */
+ 
+static int aac_biosparm(Scsi_Disk *disk, kdev_t device, int *geom )
+{
+	struct diskparm *param = (struct diskparm *)geom;
+	struct buffer_head * buf;
+
+	dprintk((KERN_DEBUG "aac_biosparm.\n"));
+
+	/*
+	 *	Assuming extended translation is enabled - #REVISIT#
+	 */
+	if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
+	{
+		if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
+		{
+			param->heads = 255;
+			param->sectors = 63;
+		}
+		else
+		{
+			param->heads = 128;
+			param->sectors = 32;
+		}
+	}
+	else
+	{
+		param->heads = 64;
+		param->sectors = 32;
+	}
+
+	param->cylinders = disk->capacity/(param->heads * param->sectors);
+
+	/*
+	 *	Read the first 1024 bytes from the disk device
+	 */
+	
+#warning "Block size assumption is wrong to start with"
+	buf = bread(MKDEV(MAJOR(device), MINOR(device)&~0x0F), 0, 1024);
+	if(buf == NULL)
+		return 0;
+	/* 
+	 *	If the boot sector partition table is valid, search for a partition 
+	 *	table entry whose end_head matches one of the standard geometry 
+	 *	translations ( 64/32, 128/32, 255/63 ).
+	 */
+	 
+	if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
+	{
+		struct partition *first = (struct partition * )(buf->b_data + 0x1be);
+		struct partition *entry = first;
+		int saved_cylinders = param->cylinders;
+		int num;
+		unsigned char end_head, end_sec;
+
+		for(num = 0; num < 4; num++)
+		{
+			end_head = entry->end_head;
+			end_sec = entry->end_sector & 0x3f;
+
+			if(end_head == 63)
+			{
+				param->heads = 64;
+				param->sectors = 32;
+				break;
+			}
+			else if(end_head == 127)
+			{
+				param->heads = 128;
+				param->sectors = 32;
+				break;
+			}
+			else if(end_head == 254) 
+			{
+				param->heads = 255;
+				param->sectors = 63;
+				break;
+			}
+			entry++;
+		}
+
+		if(num == 4)
+		{
+			end_head = first->end_head;
+			end_sec = first->end_sector & 0x3f;
+		}
+
+		param->cylinders = disk->capacity / (param->heads * param->sectors);
+
+		if(num < 4 && end_sec == param->sectors)
+		{
+			if(param->cylinders != saved_cylinders)
+				dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
+					param->heads, param->sectors, num));
+		}
+		else if(end_head > 0 || end_sec > 0)
+		{
+			dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
+				end_head + 1, end_sec, num));
+			dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
+					param->heads, param->sectors));
+		}
+	}
+	brelse(buf);
+	return 0;
+}
+
+/**
+ *	aac_queuedepth		-	compute queue depths
+ *	@host:	SCSI host in question
+ *	@dev:	SCSI device we are considering
+ *
+ *	Selects queue depths for each target device based on the host adapter's
+ *	total capacity and the queue depth supported by the target device.
+ *	A queue depth of one automatically disables tagged queueing.
+ */
+
+static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
+{
+	Scsi_Device * dptr;
+
+	dprintk((KERN_DEBUG "aac_queuedepth.\n"));
+	dprintk((KERN_DEBUG "Device #   Q Depth   Online\n"));
+	dprintk((KERN_DEBUG "---------------------------\n"));
+	for(dptr = dev; dptr != NULL; dptr = dptr->next)
+	{
+		if(dptr->host == host)
+		{
+			dptr->queue_depth = 10;		
+			dprintk((KERN_DEBUG "  %2d         %d        %d\n", 
+				dptr->id, dptr->queue_depth, dptr->online));
+		}
+	}
+}
+
+/*------------------------------------------------------------------------------
+	aac_ioctl()
+
+		Handle SCSI ioctls
+ *----------------------------------------------------------------------------*/
+static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
+/*----------------------------------------------------------------------------*/
+{
+	struct aac_dev *dev;
+	dprintk((KERN_DEBUG "aac_ioctl.\n"));
+	dev = (struct aac_dev *)scsi_dev_ptr->host->hostdata;
+	return aac_do_ioctl(dev, cmd, arg);
+}
+
+/**
+ *	aac_cfg_open		-	open a configuration file
+ *	@inode: inode being opened
+ *	@file: file handle attached
+ *
+ *	Called when the configuration device is opened. Does the needed
+ *	set up on the handle and then returns
+ *
+ *	Bugs: This needs extending to check a given adapter is present
+ *	so we can support hot plugging, and to ref count adapters.
+ */
+
+static int aac_cfg_open(struct inode * inode, struct file * file )
+{
+	unsigned minor_number = MINOR(inode->i_rdev);
+	if(minor_number >= aac_count)
+		return -ENODEV;
+	return 0;
+}
+
+/**
+ *	aac_cfg_release		-	close down an AAC config device
+ *	@inode: inode of configuration file
+ *	@file: file handle of configuration file
+ *	
+ *	Called when the last close of the configuration file handle
+ *	is performed.
+ */
+ 
+static int aac_cfg_release(struct inode * inode, struct file * file )
+{
+	return 0;
+}
+
+/**
+ *	aac_cfg_ioctl		-	AAC configuration request
+ *	@inode: inode of device
+ *	@file: file handle
+ *	@cmd: ioctl command code
+ *	@arg: argument
+ *
+ *	Handles a configuration ioctl. Currently this involves wrapping it
+ *	up and feeding it into the nasty windowsalike glue layer.
+ *
+ *	Bugs: Needs locking against parallel ioctls lower down
+ *	Bugs: Needs to handle hot plugging
+ */
+ 
+static int aac_cfg_ioctl(struct inode * inode,  struct file * file, unsigned int cmd, unsigned long arg )
+{
+	struct aac_dev *dev = aac_devices[MINOR(inode->i_rdev)];
+	return aac_do_ioctl(dev, cmd, (void *)arg);
+}
+
+/*
+ *	To use the low level SCSI driver support using the linux kernel loadable 
+ *	module interface we should initialize the global variable driver_interface  
+ *	(datatype Scsi_Host_Template) and then include the file scsi_module.c.
+ */
+ 
+static Scsi_Host_Template driver_template = AAC_HOST_TEMPLATE_ENTRY;
+
+#include "scsi_module.c"
+
+/**
+ *	aac_procinfo	-	Implement /proc/scsi/<drivername>/<n>
+ *	@proc_buffer: memory buffer for I/O
+ *	@start_ptr: pointer to first valid data
+ *	@offset: offset into file
+ *	@bytes_available: space left
+ *	@host_no: scsi host ident
+ *	@write: direction of I/O
+ *
+ *	Used to export driver statistics and other infos to the world outside 
+ *	the kernel using the proc file system. Also provides an interface to
+ *	feed the driver with information.
+ *
+ *		For reads
+ *			- if offset > 0 return 0
+ *			- if offset == 0 write data to proc_buffer and set the start_ptr to
+ *			beginning of proc_buffer, return the number of characters written.
+ *		For writes
+ *			- writes currently not supported, return 0
+ *
+ *	Bugs:	Only offset zero is handled
+ */
+
+static int aac_procinfo(char *proc_buffer, char **start_ptr,off_t offset,
+			int bytes_available, int host_no, int write)
+{
+	if(write || offset > 0)
+		return 0;
+	*start_ptr = proc_buffer;
+	return sprintf(proc_buffer, "%s  %d\n", "Raid Controller, scsi hba number", host_no);
+}
+
+EXPORT_NO_SYMBOLS;
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/rx.c linux.gamma/drivers/scsi/aacraid/rx.c
--- linux.15p3/drivers/scsi/aacraid/rx.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/rx.c	Fri Nov 30 13:57:53 2001
@@ -0,0 +1,415 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  rx.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blk.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <asm/semaphore.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+static void aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned long bellbits;
+	u8 intstat, mask;
+
+	intstat = rx_readb(dev, MUnit.OISR);
+	/*
+	 *	Read mask and invert because drawbridge is reversed.
+	 *	This allows us to only service interrupts that have 
+	 *	been enabled.
+	 */
+	mask = ~(rx_readb(dev, MUnit.OIMR));
+	/* Check to see if this is our interrupt.  If it isn't just return */
+	if (intstat & mask) 
+	{
+		bellbits = rx_readl(dev, OutboundDoorbellReg);
+		if (bellbits & DoorBellPrintfReady)
+		{
+			aac_printf(dev, rx_readl (dev, IndexRegs.Mailbox[5]));
+			rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
+			rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
+		}
+		else if (bellbits & DoorBellAdapterNormCmdReady)
+		{
+			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
+		}
+		else if (bellbits & DoorBellAdapterNormRespReady) 
+		{
+			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+			rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+		}
+		else if (bellbits & DoorBellAdapterNormCmdNotFull)
+		{
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+		}
+		else if (bellbits & DoorBellAdapterNormRespNotFull)
+		{
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+		}
+	}
+}
+
+/**
+ *	aac_rx_enable_interrupt	-	Enable event reporting
+ *	@dev: Adapter
+ *	@event: Event to enable
+ *
+ *	Enable event reporting from the i960 for a given event.
+ */
+ 
+static void aac_rx_enable_interrupt(struct aac_dev * dev, u32 event)
+{
+	switch (event) {
+
+	case HostNormCmdQue:
+		dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
+		break;
+
+	case HostNormRespQue:
+		dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
+		break;
+
+	case AdapNormCmdNotFull:
+		dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
+		break;
+
+	case AdapNormRespNotFull:
+		dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
+		break;
+	}
+}
+
+/**
+ *	aac_rx_disable_interrupt	-	Disable event reporting
+ *	@dev: Adapter
+ *	@event: Event to enable
+ *
+ *	Disable event reporting from the i960 for a given event.
+ */
+
+static void aac_rx_disable_interrupt(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case HostNormCmdQue:
+		dev->irq_mask |= (OUTBOUNDDOORBELL_1);
+		break;
+
+	case HostNormRespQue:
+		dev->irq_mask |= (OUTBOUNDDOORBELL_2);
+		break;
+
+	case AdapNormCmdNotFull:
+		dev->irq_mask |= (OUTBOUNDDOORBELL_3);
+		break;
+
+	case AdapNormRespNotFull:
+		dev->irq_mask |= (OUTBOUNDDOORBELL_4);
+		break;
+	}
+}
+
+/**
+ *	rx_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@p2: second parameter
+ *	@p3: third parameter
+ *	@p4: fourth parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous comamnd to the adapter and wait 
+ *	for its	completion.
+ */
+
+static int rx_sync_cmd(struct aac_dev *dev, unsigned long command, unsigned long p1, unsigned long p2, unsigned long p3, unsigned long p4, unsigned long *status)
+{
+	unsigned long start;
+	int ok;
+	/*
+	 *	Write the command into Mailbox 0
+	 */
+	rx_writel(dev, InboundMailbox0, command);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 4
+	 */
+	rx_writel(dev, InboundMailbox1, p1);
+	rx_writel(dev, InboundMailbox2, p2);
+	rx_writel(dev, InboundMailbox3, p3);
+	rx_writel(dev, InboundMailbox4, p4);
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Disable doorbell interrupts
+	 */
+	rx_writeb(dev, MUnit.OIMR, rx_readb(dev, MUnit.OIMR) | 0x04);
+	/*
+	 *	Force the completion of the mask register write before issuing
+	 *	the interrupt.
+	 */
+	rx_readb (dev, MUnit.OIMR);
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
+
+	ok = 0;
+	start = jiffies;
+
+	/*
+	 *	Wait up to 30 seconds
+	 */
+	while (time_before(start+30*HZ, jiffies)) 
+	{
+		udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
+		/*
+		 *	Mon960 will set doorbell0 bit when it has completed the command.
+		 */
+		if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
+			/*
+			 *	Clear the doorbell.
+			 */
+			rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+			ok = 1;
+			break;
+		}
+		/*
+		 *	Yield the processor in case we are slow 
+		 */
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+	if (ok != 1) {
+		/*
+		 *	Restore interrupt mask even though we timed out
+		 */
+		rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
+		return -ETIMEDOUT;
+	}
+	/*
+	 *	Pull the synch status from Mailbox 0.
+	 */
+	*status = rx_readl(dev, IndexRegs.Mailbox[0]);
+	/*
+	 *	Clear the synch command doorbell.
+	 */
+	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Restore interrupt mask
+	 */
+	rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
+	return 0;
+
+}
+
+/**
+ *	aac_rx_interrupt_adapter	-	interrupt adapter
+ *	@dev: Adapter
+ *
+ *	Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_rx_interrupt_adapter(struct aac_dev *dev)
+{
+	unsigned long ret;
+	rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, &ret);
+}
+
+/**
+ *	aac_rx_notify_adapter		-	send an event to the adapter
+ *	@dev: Adapter
+ *	@event: Event to send
+ *
+ *	Notify the i960 that something it probably cares about has
+ *	happened.
+ */
+
+static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
+		break;
+	case HostNormRespNotFull:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
+		break;
+	case AdapNormRespQue:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
+		break;
+	case HostNormCmdNotFull:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
+		break;
+	case HostShutdown:
+//		rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
+		break;
+	case FastIo:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
+		break;
+	case AdapPrintfDone:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+/**
+ *	aac_rx_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_rx_start_adapter(struct aac_dev *dev)
+{
+	unsigned long status;
+	struct aac_init *init;
+
+	init = dev->init;
+	init->HostElapsedSeconds = jiffies/HZ;
+	/*
+	 *	Tell the adapter we are back and up and running so it will scan
+	 *	its command queues and enable our interrupts
+	 */
+	dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	rx_writeb(dev, MUnit.OIMR, 0xff);
+	rx_writel(dev, MUnit.ODR, 0xffffffff);
+//	rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
+	rx_writeb(dev, MUnit.OIMR, 0xfb);
+
+	rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (unsigned long) dev->init_pa, 0, 0, 0, &status);
+}
+
+/**
+ *	aac_rx_init	-	initialize an i960 based AAC card
+ *	@dev: device to configure
+ *	@devnum: adapter number
+ *
+ *	Allocate and set up resources for the i960 based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int aac_rx_init(struct aac_dev *dev, unsigned long num)
+{
+	unsigned long start, end;
+	unsigned long status;
+	int instance;
+	const char * name;
+
+	dev->devnum = num;
+
+	instance = dev->id;
+	name     = dev->name;
+
+	/*
+	 *	Map in the registers from the adapter.
+	 */
+	if((dev->regs.rx = (struct rx_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+	{	
+		printk(KERN_WARNING "aacraid: unable to map i960.\n" );
+		return -1;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (rx_readl(dev, IndexRegs.Mailbox[7])&SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+		return -1;
+	}
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_PANIC) {
+		printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
+		return -1;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes.
+	 */
+	while (!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 
+	{
+		end = jiffies;
+		if(time_after(end, start+30*HZ))
+		{
+			status = rx_readl(dev, IndexRegs.Mailbox[7]) >> 16;
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %ld.\n", dev->name, instance, status);
+			return -1;
+		}
+	}
+	if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 
+	{
+		printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
+		return -1;
+	}
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
+	dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt;
+	dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
+	dev->a_ops.adapter_notify = aac_rx_notify_adapter;
+
+	if (aac_init_adapter(dev) == NULL)
+		return -1;
+	/*
+	 *	Start any kernel threads needed
+	 */
+	dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+	/*
+	 *	Tell the adapter that all is configured, and it can start
+	 *	accepting requests
+	 */
+	aac_rx_start_adapter(dev);
+	return 0;
+}
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/aacraid/sap1sup.c linux.gamma/drivers/scsi/aacraid/sap1sup.c
--- linux.15p3/drivers/scsi/aacraid/sap1sup.c	Thu Jan  1 01:00:00 1970
+++ linux.gamma/drivers/scsi/aacraid/sap1sup.c	Fri Nov 30 13:58:03 2001
@@ -0,0 +1,397 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  sap1sup.c
+ *
+ * Abstract: Drawbridge specific support functions
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blk.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <asm/semaphore.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#include "aacraid.h"
+
+static void aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned short intstat, mask;
+
+	intstat = sa_readw(dev, DoorbellReg_p);
+	/*
+	 *	Read mask and invert because drawbridge is reversed.
+	 *	This allows us to only service interrupts that have been enabled.
+	 */
+	mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
+
+	/* Check to see if this is our interrupt.  If it isn't just return */
+
+	if (intstat & mask) {
+		if (intstat & PrintfReady) {
+			aac_printf(dev, sa_readl(dev, Mailbox5));
+			sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
+			sa_writew(dev, DoorbellReg_s, PrintfDone);
+		} else if (intstat & DOORBELL_1) {	// dev -> Host Normal Command Ready
+			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
+		} else if (intstat & DOORBELL_2) {	// dev -> Host Normal Response Ready
+			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
+		} else if (intstat & DOORBELL_3) {	// dev -> Host Normal Command Not Full
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
+		} else if (intstat & DOORBELL_4) {	// dev -> Host Normal Response Not Full
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
+		}
+	}
+}
+
+/**
+ *	aac_sa_enable_interrupt	-	enable an interrupt event
+ *	@dev: Which adapter to enable.
+ *	@event: Which adapter event.
+ *
+ *	This routine will enable the corresponding adapter event to cause an interrupt on 
+ * 	the host.
+ */
+ 
+void aac_sa_enable_interrupt(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case HostNormCmdQue:
+		sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_1);
+		break;
+
+	case HostNormRespQue:
+		sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_2);
+		break;
+
+	case AdapNormCmdNotFull:
+		sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_3);
+		break;
+
+	case AdapNormRespNotFull:
+		sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, DOORBELL_4);
+		break;
+	}
+}
+
+/**
+ *	aac_sa_disable_interrupt	-	disable an interrupt event
+ *	@dev: Which adapter to enable.
+ *	@event: Which adapter event.
+ *
+ *	This routine will enable the corresponding adapter event to cause an interrupt on 
+ * 	the host.
+ */
+
+void aac_sa_disable_interrupt (struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case HostNormCmdQue:
+		sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_1);
+		break;
+
+	case HostNormRespQue:
+		sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_2);
+		break;
+
+	case AdapNormCmdNotFull:
+		sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_3);
+		break;
+
+	case AdapNormRespNotFull:
+		sa_writew(dev, SaDbCSR.PRISETIRQMASK, DOORBELL_4);
+		break;
+	}
+}
+
+/**
+ *	aac_sa_notify_adapter		-	handle adapter notification
+ *	@dev:	Adapter that notification is for
+ *	@event:	Event to notidy
+ *
+ *	Notify the adapter of an event
+ */
+ 
+void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_1);
+		break;
+	case HostNormRespNotFull:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_4);
+		break;
+	case AdapNormRespQue:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_2);
+		break;
+	case HostNormCmdNotFull:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_3);
+		break;
+	case HostShutdown:
+		//sa_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
+		break;
+	case FastIo:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_6);
+		break;
+	case AdapPrintfDone:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_5);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+
+/**
+ *	sa_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@p2: second parameter
+ *	@p3: third parameter
+ *	@p4: fourth parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous comamnd to the adapter and wait 
+ *	for its	completion.
+ */
+
+static int sa_sync_cmd(struct aac_dev *dev, unsigned long command, unsigned long p1,unsigned long p2, unsigned long p3,unsigned long p4, unsigned long *ret)
+{
+	unsigned long start;
+ 	int ok;
+	/*
+	 *	Write the Command into Mailbox 0
+	 */
+	sa_writel(dev, Mailbox0, command);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 4
+	 */
+	sa_writel(dev, Mailbox1, p1);
+	sa_writel(dev, Mailbox2, p2);
+	sa_writel(dev, Mailbox3, p3);
+	sa_writel(dev, Mailbox4, p4);
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	sa_writew(dev, DoorbellReg_s, DOORBELL_0);
+
+	ok = 0;
+	start = jiffies;
+
+	while(time_before(jiffies, start+30*HZ))
+	{
+		/*
+		 *	Delay 5uS so that the monitor gets access
+		 */
+		udelay(5);
+		/*
+		 *	Mon110 will set doorbell0 bit when it has 
+		 *	completed the command.
+		 */
+		if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0)  {
+			ok = 1;
+			break;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+
+	if (ok != 1)
+		return -ETIMEDOUT;
+	/*
+	 *	Clear the synch command doorbell.
+	 */
+	sa_writew( dev, DoorbellClrReg_p, DOORBELL_0);
+	/*
+	 *	Pull the synch status from Mailbox 0.
+	 */
+	*ret = sa_readl(dev, Mailbox0);
+	return 0;
+}
+
+/**
+ *	aac_sa_interrupt_adapter	-	interrupt an adapter
+ *	@dev: Which adapter to enable.
+ *
+ *	Breakpoint an adapter.
+ */
+ 
+static void aac_sa_interrupt_adapter (struct aac_dev *dev)
+{
+	unsigned long ret;
+	sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, &ret);
+}
+
+/**
+ *	aac_sa_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an ARM based AAC adapter
+ */
+
+static void aac_sa_start_adapter(struct aac_dev *dev)
+{
+	unsigned long ret;
+	struct aac_init *init;
+	/*
+	 * Fill in the remaining pieces of the init.
+	 */
+	init = dev->init;
+	init->HostElapsedSeconds = jiffies/HZ;
+
+	dprintk(("INIT\n"));
+	/*
+	 * Tell the adapter we are back and up and running so it will scan its command
+	 * queues and enable our interrupts
+	 */
+	dev->irq_mask =	(PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that 
+	 *	we can handle.
+	 */
+	dprintk(("MASK\n"));
+	sa_writew(dev, SaDbCSR.PRISETIRQMASK, (unsigned short) 0xffff);
+	sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
+	dprintk(("SYNCCMD\n"));
+	sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (unsigned long) dev->init_pa, 0, 0, 0, &ret);
+}
+
+/**
+ *	aac_sa_init	-	initialize an ARM based AAC card
+ *	@dev: device to configure
+ *	@devnum: adapter number
+ *
+ *	Allocate and set up resources for the ARM based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
+{
+	unsigned long start;
+	unsigned long status;
+	int instance;
+	const char *name;
+
+	dev->devnum = devnum;
+
+	dprintk(("PREINST\n"));
+	instance = dev->id;
+	name     = dev->name;
+
+	/*
+	 *	Map in the registers from the adapter.
+	 */
+	dprintk(("PREMAP\n"));
+
+	if((dev->regs.sa = (struct sa_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+	{	
+		printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
+		return -1;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
+		printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
+		return -1;
+	}
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
+		printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
+		return -1;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes.
+	 */
+	while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
+		if (time_after(start+180*HZ, jiffies)) {
+			status = sa_readl(dev, Mailbox7) >> 16;
+			printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %ld.\n", name, instance, status);
+			return -1;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+
+	dprintk(("ATIRQ\n"));
+	if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
+		printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
+		return FAILURE;
+	}
+
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+
+	dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+	dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+	dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+	dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+
+	dprintk(("FUNCDONE\n"));
+
+	if(aac_init_adapter(dev) == NULL)
+		return -1;
+
+	dprintk(("NEWADAPTDONE\n"));
+	/*
+	 *	Start any kernel threads needed
+	 */
+	dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+	/*
+	 *	Tell the adapter that all is configure, and it can start 
+	 *	accepting requests
+	 */
+	dprintk(("STARTING\n"));
+	aac_sa_start_adapter(dev);
+	dprintk(("STARTED\n"));
+	return 0;
+}
+
diff -u --new-file --recursive --exclude-from /usr/src/exclude linux.15p3/drivers/scsi/scsi_scan.c linux.gamma/drivers/scsi/scsi_scan.c
--- linux.15p3/drivers/scsi/scsi_scan.c	Mon Nov 12 09:11:52 2001
+++ linux.gamma/drivers/scsi/scsi_scan.c	Mon Nov 19 11:42:23 2001
@@ -160,6 +160,8 @@
 	{"SONY", "TSL",       "*", BLIST_FORCELUN},  // DDS3 & DDS4 autoloaders
 	{"DELL", "PERCRAID", "*", BLIST_FORCELUN},
 	{"HP", "NetRAID-4M", "*", BLIST_FORCELUN},
+	{"ADAPTEC", "AACRAID", "*", BLIST_FORCELUN},
+	{"ADAPTEC", "Adaptec 5400S", "*", BLIST_FORCELUN},
 
 	/*
 	 * Must be at end of list...
