GIT 04c573e1d1625b48b3c90f988579d7835f4c55f3 master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git#test

---

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 arch/ia64/Kconfig                          |    6 
 arch/ia64/hp/sim/boot/boot_head.S          |   31 
 arch/ia64/hp/sim/boot/fw-emu.c             |   11 
 arch/ia64/ia32/ia32_signal.c               |    1 
 arch/ia64/kernel/Makefile                  |    1 
 arch/ia64/kernel/asm-offsets.c             |   40 
 arch/ia64/kernel/cpufreq/Kconfig           |   29 
 arch/ia64/kernel/cpufreq/Makefile          |    1 
 arch/ia64/kernel/cpufreq/acpi-cpufreq.c    |  499 +++++++++++
 arch/ia64/kernel/mca.c                     |  808 ++++++++++--------
 arch/ia64/kernel/mca_asm.S                 | 1281 ++++++++++++++---------------
 arch/ia64/kernel/mca_drv.c                 |   37 
 arch/ia64/kernel/minstate.h                |   88 -
 arch/ia64/kernel/palinfo.c                 |  123 +-
 arch/ia64/kernel/salinfo.c                 |   62 -
 arch/ia64/kernel/sys_ia64.c                |    2 
 arch/ia64/kernel/uncached.c                |    4 
 arch/ia64/kernel/unwind.c                  |   22 
 arch/ia64/kernel/vmlinux.lds.S             |    1 
 arch/ia64/lib/Makefile                     |    2 
 arch/ia64/lib/swiotlb.c                    |    5 
 arch/ia64/mm/hugetlbpage.c                 |    8 
 arch/ia64/mm/init.c                        |   15 
 arch/ia64/pci/pci.c                        |    1 
 arch/ia64/sn/include/tio.h                 |    6 
 arch/ia64/sn/include/xtalk/hubdev.h        |   11 
 arch/ia64/sn/kernel/bte.c                  |   83 +
 arch/ia64/sn/kernel/huberror.c             |    2 
 arch/ia64/sn/kernel/io_init.c              |   35 
 arch/ia64/sn/kernel/irq.c                  |   75 -
 arch/ia64/sn/kernel/setup.c                |   37 
 arch/ia64/sn/kernel/sn2/ptc_deadlock.S     |   13 
 arch/ia64/sn/kernel/sn2/sn2_smp.c          |  256 +++++
 arch/ia64/sn/kernel/sn2/sn_hwperf.c        |  315 +++++--
 arch/ia64/sn/kernel/sn2/sn_proc_fs.c       |    4 
 arch/ia64/sn/kernel/sn2/timer_interrupt.c  |   22 
 arch/ia64/sn/pci/Makefile                  |    2 
 arch/ia64/sn/pci/pcibr/pcibr_dma.c         |   60 -
 arch/ia64/sn/pci/pcibr/pcibr_provider.c    |   40 
 arch/ia64/sn/pci/tioca_provider.c          |    7 
 arch/ia64/sn/pci/tioce_provider.c          |  771 +++++++++++++++++
 drivers/char/snsc_event.c                  |   11 
 include/asm-ia64/acpi.h                    |    5 
 include/asm-ia64/fcntl.h                   |    3 
 include/asm-ia64/io.h                      |    4 
 include/asm-ia64/mca.h                     |  102 +-
 include/asm-ia64/mca_asm.h                 |  131 --
 include/asm-ia64/mmu.h                     |    8 
 include/asm-ia64/mmu_context.h             |   59 -
 include/asm-ia64/page.h                    |   27 
 include/asm-ia64/pal.h                     |   21 
 include/asm-ia64/pgtable.h                 |   13 
 include/asm-ia64/ptrace.h                  |    2 
 include/asm-ia64/rwsem.h                   |   35 
 include/asm-ia64/sn/addrs.h                |  112 +-
 include/asm-ia64/sn/geo.h                  |    3 
 include/asm-ia64/sn/intr.h                 |    3 
 include/asm-ia64/sn/nodepda.h              |    3 
 include/asm-ia64/sn/pcibus_provider_defs.h |    8 
 include/asm-ia64/sn/pda.h                  |    1 
 include/asm-ia64/sn/sn2/sn_hwperf.h        |   10 
 include/asm-ia64/sn/sn_feature_sets.h      |   57 +
 include/asm-ia64/sn/sn_sal.h               |   96 --
 include/asm-ia64/sn/tioce.h                |  740 ++++++++++++++++
 include/asm-ia64/sn/tioce_provider.h       |   66 +
 include/asm-ia64/spinlock.h                |   33 
 include/asm-ia64/system.h                  |    5 
 include/asm-ia64/thread_info.h             |    2 
 include/asm-ia64/unwind.h                  |    7 
 include/linux/sched.h                      |    2 
 kernel/sched.c                             |   28 
 71 files changed, 4655 insertions(+), 1759 deletions(-)

diff -puN arch/ia64/hp/sim/boot/boot_head.S~git-ia64 arch/ia64/hp/sim/boot/boot_head.S
--- 25/arch/ia64/hp/sim/boot/boot_head.S~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/hp/sim/boot/boot_head.S	2005-09-01 05:34:24.000000000 -0600
@@ -4,6 +4,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/pal.h>
 
 	.bss
 	.align 16
@@ -49,7 +50,11 @@ GLOBAL_ENTRY(jmp_to_kernel)
 	br.sptk.few b7
 END(jmp_to_kernel)
 
-
+/*
+ * r28 contains the index of the PAL function
+ * r29--31 the args
+ * Return values in ret0--3 (r8--11)
+ */
 GLOBAL_ENTRY(pal_emulator_static)
 	mov r8=-1
 	mov r9=256
@@ -62,7 +67,7 @@ GLOBAL_ENTRY(pal_emulator_static)
 	cmp.gtu p6,p7=r9,r28
 (p6)	br.cond.sptk.few stacked
 	;;
-static:	cmp.eq p6,p7=6,r28		/* PAL_PTCE_INFO */
+static:	cmp.eq p6,p7=PAL_PTCE_INFO,r28
 (p7)	br.cond.sptk.few 1f
 	;;
 	mov r8=0			/* status = 0 */
@@ -70,21 +75,21 @@ static:	cmp.eq p6,p7=6,r28		/* PAL_PTCE_
 	movl r10=0x0000000200000003	/* count[0], count[1] */
 	movl r11=0x1000000000002000	/* stride[0], stride[1] */
 	br.cond.sptk.few rp
-1:	cmp.eq p6,p7=14,r28		/* PAL_FREQ_RATIOS */
+1:	cmp.eq p6,p7=PAL_FREQ_RATIOS,r28
 (p7)	br.cond.sptk.few 1f
 	mov r8=0			/* status = 0 */
 	movl r9 =0x100000064		/* proc_ratio (1/100) */
 	movl r10=0x100000100		/* bus_ratio<<32 (1/256) */
 	movl r11=0x100000064		/* itc_ratio<<32 (1/100) */
 	;;
-1:	cmp.eq p6,p7=19,r28		/* PAL_RSE_INFO */
+1:	cmp.eq p6,p7=PAL_RSE_INFO,r28
 (p7)	br.cond.sptk.few 1f
 	mov r8=0			/* status = 0 */
 	mov r9=96			/* num phys stacked */
 	mov r10=0			/* hints */
 	mov r11=0
 	br.cond.sptk.few rp
-1:	cmp.eq p6,p7=1,r28		/* PAL_CACHE_FLUSH */
+1:	cmp.eq p6,p7=PAL_CACHE_FLUSH,r28		/* PAL_CACHE_FLUSH */
 (p7)	br.cond.sptk.few 1f
 	mov r9=ar.lc
 	movl r8=524288			/* flush 512k million cache lines (16MB) */
@@ -102,7 +107,7 @@ static:	cmp.eq p6,p7=6,r28		/* PAL_PTCE_
 	mov ar.lc=r9
 	mov r8=r0
 	;;
-1:	cmp.eq p6,p7=15,r28		/* PAL_PERF_MON_INFO */
+1:	cmp.eq p6,p7=PAL_PERF_MON_INFO,r28
 (p7)	br.cond.sptk.few 1f
 	mov r8=0			/* status = 0 */
 	movl r9 =0x08122f04		/* generic=4 width=47 retired=8 cycles=18 */
@@ -138,6 +143,20 @@ static:	cmp.eq p6,p7=6,r28		/* PAL_PTCE_
 	st8 [r29]=r0,16			/* clear remaining bits  */
 	st8 [r18]=r0,16			/* clear remaining bits  */
 	;;
+1:	cmp.eq p6,p7=PAL_VM_SUMMARY,r28
+(p7)	br.cond.sptk.few 1f
+	mov	r8=0			/* status = 0  */
+	movl	r9=0x2044040020F1865	/* num_tc_levels=2, num_unique_tcs=4 */
+					/* max_itr_entry=64, max_dtr_entry=64 */
+					/* hash_tag_id=2, max_pkr=15 */
+					/* key_size=24, phys_add_size=50, vw=1 */
+	movl	r10=0x183C		/* rid_size=24, impl_va_msb=60 */
+	;;
+1:	cmp.eq p6,p7=PAL_MEM_ATTRIB,r28
+(p7)	br.cond.sptk.few 1f
+	mov	r8=0			/* status = 0 */
+	mov	r9=0x80|0x01		/* NatPage|WB */
+	;;
 1:	br.cond.sptk.few rp
 stacked:
 	br.ret.sptk.few rp
diff -puN arch/ia64/hp/sim/boot/fw-emu.c~git-ia64 arch/ia64/hp/sim/boot/fw-emu.c
--- 25/arch/ia64/hp/sim/boot/fw-emu.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/hp/sim/boot/fw-emu.c	2005-09-01 05:34:24.000000000 -0600
@@ -237,17 +237,6 @@ sal_emulator (long index, unsigned long 
 	return ((struct sal_ret_values) {status, r9, r10, r11});
 }
 
-
-/*
- * This is here to work around a bug in egcs-1.1.1b that causes the
- * compiler to crash (seems like a bug in the new alias analysis code.
- */
-void *
-id (long addr)
-{
-	return (void *) addr;
-}
-
 struct ia64_boot_param *
 sys_fw_init (const char *args, int arglen)
 {
diff -puN arch/ia64/ia32/ia32_signal.c~git-ia64 arch/ia64/ia32/ia32_signal.c
--- 25/arch/ia64/ia32/ia32_signal.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/ia32/ia32_signal.c	2005-09-01 05:34:24.000000000 -0600
@@ -29,7 +29,6 @@
 #include <asm/uaccess.h>
 #include <asm/rse.h>
 #include <asm/sigcontext.h>
-#include <asm/segment.h>
 
 #include "ia32priv.h"
 
diff -puN arch/ia64/Kconfig~git-ia64 arch/ia64/Kconfig
--- 25/arch/ia64/Kconfig~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/Kconfig	2005-09-01 05:34:24.000000000 -0600
@@ -379,6 +379,12 @@ source "drivers/pci/hotplug/Kconfig"
 
 source "drivers/pcmcia/Kconfig"
 
+if PM
+
+source "arch/ia64/kernel/cpufreq/Kconfig"
+
+endif
+
 endmenu
 
 endif
diff -puN arch/ia64/kernel/asm-offsets.c~git-ia64 arch/ia64/kernel/asm-offsets.c
--- 25/arch/ia64/kernel/asm-offsets.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/asm-offsets.c	2005-09-01 05:34:24.000000000 -0600
@@ -211,17 +211,41 @@ void foo(void)
 #endif
 
 	BLANK();
-	DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET,
-	       offsetof (struct ia64_mca_cpu, proc_state_dump));
-	DEFINE(IA64_MCA_CPU_STACK_OFFSET,
-	       offsetof (struct ia64_mca_cpu, stack));
-	DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
-	       offsetof (struct ia64_mca_cpu, stackframe));
-	DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
-	       offsetof (struct ia64_mca_cpu, rbstore));
+	DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET,
+	       offsetof (struct ia64_mca_cpu, mca_stack));
 	DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
 	       offsetof (struct ia64_mca_cpu, init_stack));
 	BLANK();
+	DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
+	       offsetof (struct ia64_sal_os_state, sal_ra));
+	DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
+	       offsetof (struct ia64_sal_os_state, os_gp));
+	DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
+	       offsetof (struct ia64_sal_os_state, pal_min_state));
+	DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
+	       offsetof (struct ia64_sal_os_state, proc_state_param));
+	DEFINE(IA64_SAL_OS_STATE_SIZE,
+	       sizeof (struct ia64_sal_os_state));
+	DEFINE(IA64_PMSA_GR_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_gr));
+	DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_bank1_gr));
+	DEFINE(IA64_PMSA_PR_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_pr));
+	DEFINE(IA64_PMSA_BR0_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_br0));
+	DEFINE(IA64_PMSA_RSC_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_rsc));
+	DEFINE(IA64_PMSA_IIP_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_iip));
+	DEFINE(IA64_PMSA_IPSR_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_ipsr));
+	DEFINE(IA64_PMSA_IFS_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_ifs));
+	DEFINE(IA64_PMSA_XIP_OFFSET,
+	       offsetof (struct pal_min_state_area_s, pmsa_xip));
+	BLANK();
+
 	/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
 	DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
 	DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
diff -puN /dev/null arch/ia64/kernel/cpufreq/acpi-cpufreq.c
--- /dev/null	2004-08-10 19:55:00.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/cpufreq/acpi-cpufreq.c	2005-09-01 05:34:24.000000000 -0600
@@ -0,0 +1,499 @@
+/*
+ * arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+ * This file provides the ACPI based P-state support. This
+ * module works with generic cpufreq infrastructure. Most of
+ * the code is based on i386 version
+ * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
+ *
+ * Copyright (C) 2005 Intel Corp
+ *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pal.h>
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
+
+MODULE_AUTHOR("Venkatesh Pallipadi");
+MODULE_DESCRIPTION("ACPI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+
+struct cpufreq_acpi_io {
+	struct acpi_processor_performance	acpi_data;
+	struct cpufreq_frequency_table		*freq_table;
+	unsigned int				resume;
+};
+
+static struct cpufreq_acpi_io	*acpi_io_data[NR_CPUS];
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+
+static int
+processor_set_pstate (
+	u32	value)
+{
+	s64 retval;
+
+	dprintk("processor_set_pstate\n");
+
+	retval = ia64_pal_set_pstate((u64)value);
+
+	if (retval) {
+		dprintk("Failed to set freq to 0x%x, with error 0x%x\n",
+		        value, retval);
+		return -ENODEV;
+	}
+	return (int)retval;
+}
+
+
+static int
+processor_get_pstate (
+	u32	*value)
+{
+	u64	pstate_index = 0;
+	s64 	retval;
+
+	dprintk("processor_get_pstate\n");
+
+	retval = ia64_pal_get_pstate(&pstate_index);
+	*value = (u32) pstate_index;
+
+	if (retval)
+		dprintk("Failed to get current freq with "
+		        "error 0x%x, idx 0x%x\n", retval, *value);
+
+	return (int)retval;
+}
+
+
+/* To be used only after data->acpi_data is initialized */
+static unsigned
+extract_clock (
+	struct cpufreq_acpi_io *data,
+	unsigned value,
+	unsigned int cpu)
+{
+	unsigned long i;
+
+	dprintk("extract_clock\n");
+
+	for (i = 0; i < data->acpi_data.state_count; i++) {
+		if (value >= data->acpi_data.states[i].control)
+			return data->acpi_data.states[i].core_frequency;
+	}
+	return data->acpi_data.states[i-1].core_frequency;
+}
+
+
+static unsigned int
+processor_get_freq (
+	struct cpufreq_acpi_io	*data,
+	unsigned int		cpu)
+{
+	int			ret = 0;
+	u32			value = 0;
+	cpumask_t		saved_mask;
+	unsigned long 		clock_freq;
+
+	dprintk("processor_get_freq\n");
+
+	saved_mask = current->cpus_allowed;
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	if (smp_processor_id() != cpu) {
+		ret = -EAGAIN;
+		goto migrate_end;
+	}
+
+	/*
+	 * processor_get_pstate gets the average frequency since the
+	 * last get. So, do two PAL_get_freq()...
+	 */
+	ret = processor_get_pstate(&value);
+	ret = processor_get_pstate(&value);
+
+	if (ret) {
+		set_cpus_allowed(current, saved_mask);
+		printk(KERN_WARNING "get performance failed with error %d\n",
+		       ret);
+		ret = -EAGAIN;
+		goto migrate_end;
+	}
+	clock_freq = extract_clock(data, value, cpu);
+	ret = (clock_freq*1000);
+
+migrate_end:
+	set_cpus_allowed(current, saved_mask);
+	return ret;
+}
+
+
+static int
+processor_set_freq (
+	struct cpufreq_acpi_io	*data,
+	unsigned int		cpu,
+	int			state)
+{
+	int			ret = 0;
+	u32			value = 0;
+	struct cpufreq_freqs    cpufreq_freqs;
+	cpumask_t		saved_mask;
+	int			retval;
+
+	dprintk("processor_set_freq\n");
+
+	saved_mask = current->cpus_allowed;
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	if (smp_processor_id() != cpu) {
+		retval = -EAGAIN;
+		goto migrate_end;
+	}
+
+	if (state == data->acpi_data.state) {
+		if (unlikely(data->resume)) {
+			dprintk("Called after resume, resetting to P%d\n", state);
+			data->resume = 0;
+		} else {
+			dprintk("Already at target state (P%d)\n", state);
+			retval = 0;
+			goto migrate_end;
+		}
+	}
+
+	dprintk("Transitioning from P%d to P%d\n",
+		data->acpi_data.state, state);
+
+	/* cpufreq frequency struct */
+	cpufreq_freqs.cpu = cpu;
+	cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
+	cpufreq_freqs.new = data->freq_table[state].frequency;
+
+	/* notify cpufreq */
+	cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+
+	/*
+	 * First we write the target state's 'control' value to the
+	 * control_register.
+	 */
+
+	value = (u32) data->acpi_data.states[state].control;
+
+	dprintk("Transitioning to state: 0x%08x\n", value);
+
+	ret = processor_set_pstate(value);
+	if (ret) {
+		unsigned int tmp = cpufreq_freqs.new;
+		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+		cpufreq_freqs.new = cpufreq_freqs.old;
+		cpufreq_freqs.old = tmp;
+		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+		printk(KERN_WARNING "Transition failed with error %d\n", ret);
+		retval = -ENODEV;
+		goto migrate_end;
+	}
+
+	cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+
+	data->acpi_data.state = state;
+
+	retval = 0;
+
+migrate_end:
+	set_cpus_allowed(current, saved_mask);
+	return (retval);
+}
+
+
+static unsigned int
+acpi_cpufreq_get (
+	unsigned int		cpu)
+{
+	struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+
+	dprintk("acpi_cpufreq_get\n");
+
+	return processor_get_freq(data, cpu);
+}
+
+
+static int
+acpi_cpufreq_target (
+	struct cpufreq_policy   *policy,
+	unsigned int target_freq,
+	unsigned int relation)
+{
+	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+	unsigned int next_state = 0;
+	unsigned int result = 0;
+
+	dprintk("acpi_cpufreq_setpolicy\n");
+
+	result = cpufreq_frequency_table_target(policy,
+			data->freq_table, target_freq, relation, &next_state);
+	if (result)
+		return (result);
+
+	result = processor_set_freq(data, policy->cpu, next_state);
+
+	return (result);
+}
+
+
+static int
+acpi_cpufreq_verify (
+	struct cpufreq_policy   *policy)
+{
+	unsigned int result = 0;
+	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+	dprintk("acpi_cpufreq_verify\n");
+
+	result = cpufreq_frequency_table_verify(policy,
+			data->freq_table);
+
+	return (result);
+}
+
+
+/*
+ * processor_init_pdc - let BIOS know about the SMP capabilities
+ * of this driver
+ * @perf: processor-specific acpi_io_data struct
+ * @cpu: CPU being initialized
+ *
+ * To avoid issues with legacy OSes, some BIOSes require to be informed of
+ * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
+ * accordingly. Actual call to _PDC is done in driver/acpi/processor.c
+ */
+static void
+processor_init_pdc (
+		struct acpi_processor_performance *perf,
+		unsigned int cpu,
+		struct acpi_object_list *obj_list
+		)
+{
+	union acpi_object *obj;
+	u32 *buf;
+
+	dprintk("processor_init_pdc\n");
+
+	perf->pdc = NULL;
+	/* Initialize pdc. It will be used later. */
+	if (!obj_list)
+		return;
+
+	if (!(obj_list->count && obj_list->pointer))
+		return;
+
+	obj = obj_list->pointer;
+	if ((obj->buffer.length == 12) && obj->buffer.pointer) {
+		buf = (u32 *)obj->buffer.pointer;
+       		buf[0] = ACPI_PDC_REVISION_ID;
+       		buf[1] = 1;
+       		buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
+		perf->pdc = obj_list;
+	}
+	return;
+}
+
+
+static int
+acpi_cpufreq_cpu_init (
+	struct cpufreq_policy   *policy)
+{
+	unsigned int		i;
+	unsigned int		cpu = policy->cpu;
+	struct cpufreq_acpi_io	*data;
+	unsigned int		result = 0;
+
+	union acpi_object		arg0 = {ACPI_TYPE_BUFFER};
+	u32				arg0_buf[3];
+	struct acpi_object_list 	arg_list = {1, &arg0};
+
+	dprintk("acpi_cpufreq_cpu_init\n");
+	/* setup arg_list for _PDC settings */
+        arg0.buffer.length = 12;
+        arg0.buffer.pointer = (u8 *) arg0_buf;
+
+	data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+	if (!data)
+		return (-ENOMEM);
+
+	memset(data, 0, sizeof(struct cpufreq_acpi_io));
+
+	acpi_io_data[cpu] = data;
+
+	processor_init_pdc(&data->acpi_data, cpu, &arg_list);
+	result = acpi_processor_register_performance(&data->acpi_data, cpu);
+	data->acpi_data.pdc = NULL;
+
+	if (result)
+		goto err_free;
+
+	/* capability check */
+	if (data->acpi_data.state_count <= 1) {
+		dprintk("No P-States\n");
+		result = -ENODEV;
+		goto err_unreg;
+	}
+
+	if ((data->acpi_data.control_register.space_id !=
+					ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+	    (data->acpi_data.status_register.space_id !=
+					ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+		dprintk("Unsupported address space [%d, %d]\n",
+			(u32) (data->acpi_data.control_register.space_id),
+			(u32) (data->acpi_data.status_register.space_id));
+		result = -ENODEV;
+		goto err_unreg;
+	}
+
+	/* alloc freq_table */
+	data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+	                           (data->acpi_data.state_count + 1),
+	                           GFP_KERNEL);
+	if (!data->freq_table) {
+		result = -ENOMEM;
+		goto err_unreg;
+	}
+
+	/* detect transition latency */
+	policy->cpuinfo.transition_latency = 0;
+	for (i=0; i<data->acpi_data.state_count; i++) {
+		if ((data->acpi_data.states[i].transition_latency * 1000) >
+		    policy->cpuinfo.transition_latency) {
+			policy->cpuinfo.transition_latency =
+			    data->acpi_data.states[i].transition_latency * 1000;
+		}
+	}
+	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+
+	policy->cur = processor_get_freq(data, policy->cpu);
+
+	/* table init */
+	for (i = 0; i <= data->acpi_data.state_count; i++)
+	{
+		data->freq_table[i].index = i;
+		if (i < data->acpi_data.state_count) {
+			data->freq_table[i].frequency =
+			      data->acpi_data.states[i].core_frequency * 1000;
+		} else {
+			data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+		}
+	}
+
+	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+	if (result) {
+		goto err_freqfree;
+	}
+
+	/* notify BIOS that we exist */
+	acpi_processor_notify_smm(THIS_MODULE);
+
+	printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
+	       "activated.\n", cpu);
+
+	for (i = 0; i < data->acpi_data.state_count; i++)
+		dprintk("     %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
+			(i == data->acpi_data.state?'*':' '), i,
+			(u32) data->acpi_data.states[i].core_frequency,
+			(u32) data->acpi_data.states[i].power,
+			(u32) data->acpi_data.states[i].transition_latency,
+			(u32) data->acpi_data.states[i].bus_master_latency,
+			(u32) data->acpi_data.states[i].status,
+			(u32) data->acpi_data.states[i].control);
+
+	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
+
+	/* the first call to ->target() should result in us actually
+	 * writing something to the appropriate registers. */
+	data->resume = 1;
+
+	return (result);
+
+ err_freqfree:
+	kfree(data->freq_table);
+ err_unreg:
+	acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ err_free:
+	kfree(data);
+	acpi_io_data[cpu] = NULL;
+
+	return (result);
+}
+
+
+static int
+acpi_cpufreq_cpu_exit (
+	struct cpufreq_policy   *policy)
+{
+	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+	dprintk("acpi_cpufreq_cpu_exit\n");
+
+	if (data) {
+		cpufreq_frequency_table_put_attr(policy->cpu);
+		acpi_io_data[policy->cpu] = NULL;
+		acpi_processor_unregister_performance(&data->acpi_data,
+		                                      policy->cpu);
+		kfree(data);
+	}
+
+	return (0);
+}
+
+
+static struct freq_attr* acpi_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+	.verify 	= acpi_cpufreq_verify,
+	.target 	= acpi_cpufreq_target,
+	.get 		= acpi_cpufreq_get,
+	.init		= acpi_cpufreq_cpu_init,
+	.exit		= acpi_cpufreq_cpu_exit,
+	.name		= "acpi-cpufreq",
+	.owner		= THIS_MODULE,
+	.attr           = acpi_cpufreq_attr,
+};
+
+
+static int __init
+acpi_cpufreq_init (void)
+{
+	dprintk("acpi_cpufreq_init\n");
+
+ 	return cpufreq_register_driver(&acpi_cpufreq_driver);
+}
+
+
+static void __exit
+acpi_cpufreq_exit (void)
+{
+	dprintk("acpi_cpufreq_exit\n");
+
+	cpufreq_unregister_driver(&acpi_cpufreq_driver);
+	return;
+}
+
+
+late_initcall(acpi_cpufreq_init);
+module_exit(acpi_cpufreq_exit);
+
diff -puN /dev/null arch/ia64/kernel/cpufreq/Kconfig
--- /dev/null	2004-08-10 19:55:00.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/cpufreq/Kconfig	2005-09-01 05:34:24.000000000 -0600
@@ -0,0 +1,29 @@
+
+#
+# CPU Frequency scaling
+#
+
+menu "CPU Frequency scaling"
+
+source "drivers/cpufreq/Kconfig"
+
+if CPU_FREQ
+
+comment "CPUFreq processor drivers"
+
+config IA64_ACPI_CPUFREQ
+	tristate "ACPI Processor P-States driver"
+	select CPU_FREQ_TABLE
+	depends on ACPI_PROCESSOR
+	help
+	This driver adds a CPUFreq driver which utilizes the ACPI
+	Processor Performance States.
+
+	For details, take a look at <file:Documentation/cpu-freq/>.
+
+	If in doubt, say N.
+
+endif   # CPU_FREQ
+
+endmenu
+
diff -puN /dev/null arch/ia64/kernel/cpufreq/Makefile
--- /dev/null	2004-08-10 19:55:00.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/cpufreq/Makefile	2005-09-01 05:34:24.000000000 -0600
@@ -0,0 +1 @@
+obj-$(CONFIG_IA64_ACPI_CPUFREQ)		+= acpi-cpufreq.o
diff -puN arch/ia64/kernel/Makefile~git-ia64 arch/ia64/kernel/Makefile
--- 25/arch/ia64/kernel/Makefile~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/Makefile	2005-09-01 05:34:24.000000000 -0600
@@ -20,6 +20,7 @@ obj-$(CONFIG_SMP)		+= smp.o smpboot.o do
 obj-$(CONFIG_NUMA)		+= numa.o
 obj-$(CONFIG_PERFMON)		+= perfmon_default_smpl.o
 obj-$(CONFIG_IA64_CYCLONE)	+= cyclone.o
+obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
 obj-$(CONFIG_IA64_MCA_RECOVERY)	+= mca_recovery.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o jprobes.o
 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR)	+= uncached.o
diff -puN arch/ia64/kernel/mca_asm.S~git-ia64 arch/ia64/kernel/mca_asm.S
--- 25/arch/ia64/kernel/mca_asm.S~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/mca_asm.S	2005-09-01 05:34:24.000000000 -0600
@@ -16,6 +16,9 @@
 // 04/11/12 Russ Anderson <rja@sgi.com>
 //		   Added per cpu MCA/INIT stack save areas.
 //
+// 12/08/05 Keith Owens <kaos@sgi.com>
+//		   Use per cpu MCA/INIT stacks for all data.
+//
 #include <linux/config.h>
 #include <linux/threads.h>
 
@@ -25,96 +28,23 @@
 #include <asm/mca_asm.h>
 #include <asm/mca.h>
 
-/*
- * When we get a machine check, the kernel stack pointer is no longer
- * valid, so we need to set a new stack pointer.
- */
-#define	MINSTATE_PHYS	/* Make sure stack access is physical for MINSTATE */
-
-/*
- * Needed for return context to SAL
- */
-#define IA64_MCA_SAME_CONTEXT	0
-#define IA64_MCA_COLD_BOOT	-2
-
-#include "minstate.h"
-
-/*
- * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
- *		1. GR1 = OS GP
- *		2. GR8 = PAL_PROC physical address
- *		3. GR9 = SAL_PROC physical address
- *		4. GR10 = SAL GP (physical)
- *		5. GR11 = Rendez state
- *		6. GR12 = Return address to location within SAL_CHECK
- */
-#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
-	LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
-	st8	[_tmp]=r1,0x08;;			\
-	st8	[_tmp]=r8,0x08;;			\
-	st8	[_tmp]=r9,0x08;;			\
-	st8	[_tmp]=r10,0x08;;			\
-	st8	[_tmp]=r11,0x08;;			\
-	st8	[_tmp]=r12,0x08;;			\
-	st8	[_tmp]=r17,0x08;;			\
-	st8	[_tmp]=r18,0x08
-
-/*
- * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
- * (p6) is executed if we never entered virtual mode (TLB error)
- * (p7) is executed if we entered virtual mode as expected (normal case)
- *	1. GR8 = OS_MCA return status
- *	2. GR9 = SAL GP (physical)
- *	3. GR10 = 0/1 returning same/new context
- *	4. GR22 = New min state save area pointer
- *	returns ptr to SAL rtn save loc in _tmp
- */
-#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)	\
-	movl	_tmp=ia64_os_to_sal_handoff_state;;	\
-	DATA_VA_TO_PA(_tmp);;				\
-	ld8	r8=[_tmp],0x08;;			\
-	ld8	r9=[_tmp],0x08;;			\
-	ld8	r10=[_tmp],0x08;;			\
-	ld8	r22=[_tmp],0x08;;
-	// now _tmp is pointing to SAL rtn save location
-
-/*
- * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
- *	imots_os_status=IA64_MCA_COLD_BOOT
- *	imots_sal_gp=SAL GP
- *	imots_context=IA64_MCA_SAME_CONTEXT
- *	imots_new_min_state=Min state save area pointer
- *	imots_sal_check_ra=Return address to location within SAL_CHECK
- *
- */
-#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
-	movl	tmp=IA64_MCA_COLD_BOOT;					\
-	movl	sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);	\
-	movl	os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;	\
-	st8	[os_to_sal_handoff]=tmp,8;;				\
-	ld8	tmp=[sal_to_os_handoff],48;;				\
-	st8	[os_to_sal_handoff]=tmp,8;;				\
-	movl	tmp=IA64_MCA_SAME_CONTEXT;;				\
-	st8	[os_to_sal_handoff]=tmp,8;;				\
-	ld8	tmp=[sal_to_os_handoff],-8;;				\
-	st8     [os_to_sal_handoff]=tmp,8;;				\
-	ld8	tmp=[sal_to_os_handoff];;				\
-	st8     [os_to_sal_handoff]=tmp;;
+#include "entry.h"
 
 #define GET_IA64_MCA_DATA(reg)						\
 	GET_THIS_PADDR(reg, ia64_mca_data)				\
 	;;								\
 	ld8 reg=[reg]
 
-	.global ia64_os_mca_dispatch
-	.global ia64_os_mca_dispatch_end
-	.global ia64_sal_to_os_handoff_state
-	.global	ia64_os_to_sal_handoff_state
 	.global ia64_do_tlb_purge
+	.global ia64_os_mca_dispatch
+	.global ia64_os_init_dispatch_monarch
+	.global ia64_os_init_dispatch_slave
 
 	.text
 	.align 16
 
+//StartMain////////////////////////////////////////////////////////////////////
+
 /*
  * Just the TLB purge part is moved to a separate function
  * so we can re-use the code for cpu hotplug code as well
@@ -207,34 +137,31 @@ ia64_do_tlb_purge:
 	br.sptk.many b1
 	;;
 
-ia64_os_mca_dispatch:
+//EndMain//////////////////////////////////////////////////////////////////////
+
+//StartMain////////////////////////////////////////////////////////////////////
 
+ia64_os_mca_dispatch:
 	// Serialize all MCA processing
 	mov	r3=1;;
 	LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
 ia64_os_mca_spin:
-	xchg8	r4=[r2],r3;;
+	xchg4	r4=[r2],r3;;
 	cmp.ne	p6,p0=r4,r0
 (p6)	br ia64_os_mca_spin
 
-	// Save the SAL to OS MCA handoff state as defined
-	// by SAL SPEC 3.0
-	// NOTE : The order in which the state gets saved
-	//	  is dependent on the way the C-structure
-	//	  for ia64_mca_sal_to_os_state_t has been
-	//	  defined in include/asm/mca.h
-	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
-	;;
-
-	// LOG PROCESSOR STATE INFO FROM HERE ON..
-begin_os_mca_dump:
-	br	ia64_os_mca_proc_state_dump;;
-
-ia64_os_mca_done_dump:
+	mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET	// use the MCA stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	mov r19=1				// All MCA events are treated as monarch (for now)
+	br.sptk ia64_state_save			// save the state that is not in minstate
+1:
 
-	LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
+	GET_IA64_MCA_DATA(r2)
+	// Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
+	;;
+	add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2
 	;;
-	ld8 r18=[r16]		// Get processor state parameter on existing PALE_CHECK.
+	ld8 r18=[r3]				// Get processor state parameter on existing PALE_CHECK.
 	;;
 	tbit.nz p6,p7=r18,60
 (p7)	br.spnt done_tlb_purge_and_reload
@@ -323,624 +250,710 @@ ia64_reload_tr:
 	itr.d dtr[r20]=r16
 	;;
 	srlz.d
-	;;
-	br.sptk.many done_tlb_purge_and_reload
-err:
-	COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
-	br.sptk.many ia64_os_mca_done_restore
 
 done_tlb_purge_and_reload:
 
-	// Setup new stack frame for OS_MCA handling
-	GET_IA64_MCA_DATA(r2)
-	;;
-	add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
-	add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
-	;;
-	rse_switch_context(r6,r3,r2);;	// RSC management in this new context
+	// switch to per cpu MCA stack
+	mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET	// use the MCA stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_new_stack
+1:
+
+	// everything saved, now we can set the kernel registers
+	mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET	// use the MCA stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_set_kernel_registers
+1:
 
+	// This must be done in physical mode
 	GET_IA64_MCA_DATA(r2)
 	;;
-	add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
-	;;
-	mov r12=r2		// establish new stack-pointer
+	mov r7=r2
 
         // Enter virtual mode from physical mode
 	VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
-ia64_os_mca_virtual_begin:
+
+	// This code returns to SAL via SOS r2, in general SAL has no unwind
+	// data.  To get a clean termination when backtracing the C MCA/INIT
+	// handler, create a dummy return address of 0 in this routine.  That
+	// requires that ia64_os_mca_virtual_begin be a global function.
+ENTRY(ia64_os_mca_virtual_begin)
+	.prologue ASM_UNW_PRLG_RP, ASM_UNW_PRLG_GRSAVE(0)
+
+	mov ar.rsc=3				// set eager mode for C handler
+	mov r2=r7				// see GET_IA64_MCA_DATA above
+	;;
 
 	// Call virtual mode handler
-	movl		r2=ia64_mca_ucmc_handler;;
-	mov		b6=r2;;
-	br.call.sptk.many    b0=b6;;
-.ret0:
+	alloc r14=ar.pfs,0,1,3,0
+	mov loc0=r0				// dummy rp of 0 to terminate backtrace
+	.body
+	;;
+	DATA_PA_TO_VA(r2,r7)
+	;;
+	add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
+	add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
+	add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
+	br.call.sptk.many    b0=ia64_mca_handler
+
 	// Revert back to physical mode before going back to SAL
 	PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
 ia64_os_mca_virtual_end:
 
-	// restore the original stack frame here
-	GET_IA64_MCA_DATA(r2)
+END(ia64_os_mca_virtual_begin)
+
+	// switch back to previous stack
+	alloc r14=ar.pfs,0,0,0,0		// remove the MCA handler frame
+	mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET	// use the MCA stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_old_stack
+1:
+
+	mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET	// use the MCA stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_state_restore		// restore the SAL state
+1:
+
+	mov		b0=r12			// SAL_CHECK return address
+
+	// release lock
+	LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);;
+	st4.rel		[r3]=r0
 	;;
-	add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
+	mov r31=-1
+	LOAD_PHYSICAL(p0,r3,ia64_mca_init_leave);;
 	;;
-	movl    r4=IA64_PSR_MC
+	st4.rel		[r3]=r31
+
+	br		b0
+
+//EndMain//////////////////////////////////////////////////////////////////////
+
+//StartMain////////////////////////////////////////////////////////////////////
+
+//
+// SAL to OS entry point for INIT on all processors.  This has been defined for
+// registration purposes with SAL as a part of ia64_mca_init.  Monarch and
+// slave INIT have identical processing, except for the value of the
+// sos->monarch flag in r19.
+//
+
+ia64_os_init_dispatch_monarch:
+	mov r19=1				// Bow, bow, ye lower middle classes!
+	br.sptk ia64_os_init_dispatch
+
+ia64_os_init_dispatch_slave:
+	mov r19=0				// <igor>yeth, mathter</igor>
+
+ia64_os_init_dispatch:
+
+	mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET	// use the INIT stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_state_save			// save the state that is not in minstate
+1:
+
+	// switch to per cpu INIT stack
+	mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET	// use the INIT stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_new_stack
+1:
+
+	// everything saved, now we can set the kernel registers
+	mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET	// use the INIT stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_set_kernel_registers
+1:
+
+	// This must be done in physical mode
+	GET_IA64_MCA_DATA(r2)
 	;;
-	rse_return_context(r4,r3,r2)	// switch from interrupt context for RSE
+	mov r7=r2
+
+        // Enter virtual mode from physical mode
+	VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
+
+	// This code returns to SAL via SOS r2, in general SAL has no unwind
+	// data.  To get a clean termination when backtracing the C MCA/INIT
+	// handler, create a dummy return address of 0 in this routine.  That
+	// requires that ia64_os_init_virtual_begin be a global function.
+ENTRY(ia64_os_init_virtual_begin)
+	.prologue ASM_UNW_PRLG_RP, ASM_UNW_PRLG_GRSAVE(0)
 
-	// let us restore all the registers from our PSI structure
-	mov	r8=gp
+	mov ar.rsc=3				// set eager mode for C handler
+	mov r2=r7				// see GET_IA64_MCA_DATA above
 	;;
-begin_os_mca_restore:
-	br	ia64_os_mca_proc_state_restore;;
 
-ia64_os_mca_done_restore:
-	OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
-	// branch back to SALE_CHECK
-	ld8		r3=[r2];;
-	mov		b0=r3;;		// SAL_CHECK return address
+	// Call virtual mode handler
+	alloc r14=ar.pfs,0,1,3,0
+	mov loc0=r0				// dummy rp of 0 to terminate backtrace
+	.body
+	;;
+	DATA_PA_TO_VA(r2,r7)
+	;;
+	add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
+	add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
+	add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
+	br.call.sptk.many    b0=ia64_init_handler
 
-	// release lock
-	movl		r3=ia64_mca_serialize;;
-	DATA_VA_TO_PA(r3);;
-	st8.rel		[r3]=r0
+	// Revert back to physical mode before going back to SAL
+	PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
+ia64_os_init_virtual_end:
 
-	br		b0
+END(ia64_os_init_virtual_begin)
+
+	mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET	// use the INIT stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_state_restore		// restore the SAL state
+1:
+
+	// switch back to previous stack
+	alloc r14=ar.pfs,0,0,0,0		// remove the INIT handler frame
+	mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET	// use the INIT stack
+	LOAD_PHYSICAL(p0,r2,1f)			// return address
+	br.sptk ia64_old_stack
+1:
+
+	mov		b0=r12			// SAL_CHECK return address
+	;;
+	mov r31=-1
+	LOAD_PHYSICAL(p0,r3,ia64_mca_init_leave);;
 	;;
-ia64_os_mca_dispatch_end:
+	st4.rel		[r3]=r31
+	br		b0
+
 //EndMain//////////////////////////////////////////////////////////////////////
 
+// common defines for the stubs
+#define	ms		r4
+#define	regs		r5
+#define	temp1		r2	/* careful, it overlaps with input registers */
+#define	temp2		r3	/* careful, it overlaps with input registers */
+#define	temp3		r7
+#define	temp4		r14
+
 
 //++
 // Name:
-//      ia64_os_mca_proc_state_dump()
+//	ia64_state_save()
 //
 // Stub Description:
 //
-//       This stub dumps the processor state during MCHK to a data area
+//	Save the state that is not in minstate.  This is sensitive to the layout of
+//	struct ia64_sal_os_state in mca.h.
+//
+//	r2 contains the return address, r3 contains either
+//	IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+//	The OS to SAL section of struct ia64_sal_os_state is set to a default
+//	value of cold boot (MCA) or warm boot (INIT) and return to the same
+//	context.  ia64_sal_os_state is also used to hold some registers that
+//	need to be saved and restored across the stack switches.
+//
+//	Most input registers to this stub come from PAL/SAL
+//	r1  os gp, physical
+//	r8  pal_proc entry point
+//	r9  sal_proc entry point
+//	r10 sal gp
+//	r11 MCA - rendevzous state, INIT - reason code
+//	r12 sal return address
+//	r17 pal min_state
+//	r18 processor state parameter
+//	r19 monarch flag, set by the caller of this routine
+//
+//	In addition to the SAL to OS state, this routine saves all the
+//	registers that appear in struct pt_regs and struct switch_stack,
+//	excluding those that are already in the PAL minstate area.  This
+//	results in a partial pt_regs and switch_stack, the C code copies the
+//	remaining registers from PAL minstate to pt_regs and switch_stack.  The
+//	resulting structures contain all the state of the original process when
+//	MCA/INIT occurred.
 //
 //--
 
-ia64_os_mca_proc_state_dump:
-// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
-//  to virtual addressing mode.
-	GET_IA64_MCA_DATA(r2)
+ia64_state_save:
+	add regs=MCA_SOS_OFFSET, r3
+	add ms=MCA_SOS_OFFSET+8, r3
+	mov b0=r2		// save return address
+	cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
+	;;
+	GET_IA64_MCA_DATA(temp2)
+	;;
+	add temp1=temp2, regs	// struct ia64_sal_os_state on MCA or INIT stack
+	add temp2=temp2, ms	// struct ia64_sal_os_state+8 on MCA or INIT stack
+	;;
+	mov regs=temp1		// save the start of sos
+	st8 [temp1]=r1,16	// os_gp
+	st8 [temp2]=r8,16	// pal_proc
+	;;
+	st8 [temp1]=r9,16	// sal_proc
+	st8 [temp2]=r11,16	// rv_rc
+	mov r11=cr.iipa
+	;;
+	st8 [temp1]=r18,16	// proc_state_param
+	st8 [temp2]=r19,16	// monarch
+	mov r6=IA64_KR(CURRENT)
+	;;
+	st8 [temp1]=r12,16	// sal_ra
+	st8 [temp2]=r10,16	// sal_gp
+	mov r12=cr.isr
+	;;
+	st8 [temp1]=r17,16	// pal_min_state
+	st8 [temp2]=r6,16	// prev_IA64_KR_CURRENT
+	mov r6=cr.ifa
+	;;
+	st8 [temp1]=r0,16	// prev_task, starts off as NULL
+	st8 [temp2]=r12,16	// cr.isr
+	mov r12=cr.itir
+	;;
+	st8 [temp1]=r6,16	// cr.ifa
+	st8 [temp2]=r12,16	// cr.itir
+	mov r12=cr.iim
+	;;
+	st8 [temp1]=r11,16	// cr.iipa
+	st8 [temp2]=r12,16	// cr.iim
+	mov r6=cr.iha
+(p1)	mov r12=IA64_MCA_COLD_BOOT
+(p2)	mov r12=IA64_INIT_WARM_BOOT
+	;;
+	st8 [temp1]=r6,16	// cr.iha
+	st8 [temp2]=r12		// os_status, default is cold boot
+	mov r6=IA64_MCA_SAME_CONTEXT
+	;;
+	st8 [temp1]=r6		// context, default is same context
+
+	// Save the pt_regs data that is not in minstate.  The previous code
+	// left regs at sos.
+	add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
+	;;
+	add temp1=PT(B6), regs
+	mov temp3=b6
+	mov temp4=b7
+	add temp2=PT(B7), regs
+	;;
+	st8 [temp1]=temp3,PT(AR_CSD)-PT(B6)		// save b6
+	st8 [temp2]=temp4,PT(AR_SSD)-PT(B7)		// save b7
+	mov temp3=ar.csd
+	mov temp4=ar.ssd
+	cover						// must be last in group
+	;;
+	st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD)	// save ar.csd
+	st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD)		// save ar.ssd
+	mov temp3=ar.unat
+	mov temp4=ar.pfs
+	;;
+	st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT)	// save ar.unat
+	st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS)	// save ar.pfs
+	mov temp3=ar.rnat
+	mov temp4=ar.bspstore
+	;;
+	st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT)	// save ar.rnat
+	st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE)	// save ar.bspstore
+	mov temp3=ar.bsp
+	;;
+	sub temp3=temp3, temp4	// ar.bsp - ar.bspstore
+	mov temp4=ar.fpsr
+	;;
+	shl temp3=temp3,16	// compute ar.rsc to be used for "loadrs"
 	;;
-	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
+	st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS)		// save loadrs
+	st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR)		// save ar.fpsr
+	mov temp3=ar.ccv
 	;;
-// save ar.NaT
-	mov		r5=ar.unat                  // ar.unat
+	st8 [temp1]=temp3,PT(F7)-PT(AR_CCV)		// save ar.ccv
+	stf.spill [temp2]=f6,PT(F8)-PT(F6)
+	;;
+	stf.spill [temp1]=f7,PT(F9)-PT(F7)
+	stf.spill [temp2]=f8,PT(F10)-PT(F8)
+	;;
+	stf.spill [temp1]=f9,PT(F11)-PT(F9)
+	stf.spill [temp2]=f10
+	;;
+	stf.spill [temp1]=f11
 
-// save banked GRs 16-31 along with NaT bits
-	bsw.1;;
-	st8.spill	[r2]=r16,8;;
-	st8.spill	[r2]=r17,8;;
-	st8.spill	[r2]=r18,8;;
-	st8.spill	[r2]=r19,8;;
-	st8.spill	[r2]=r20,8;;
-	st8.spill	[r2]=r21,8;;
-	st8.spill	[r2]=r22,8;;
-	st8.spill	[r2]=r23,8;;
-	st8.spill	[r2]=r24,8;;
-	st8.spill	[r2]=r25,8;;
-	st8.spill	[r2]=r26,8;;
-	st8.spill	[r2]=r27,8;;
-	st8.spill	[r2]=r28,8;;
-	st8.spill	[r2]=r29,8;;
-	st8.spill	[r2]=r30,8;;
-	st8.spill	[r2]=r31,8;;
-
-	mov		r4=ar.unat;;
-	st8		[r2]=r4,8                // save User NaT bits for r16-r31
-	mov		ar.unat=r5                  // restore original unat
-	bsw.0;;
-
-//save BRs
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2                // duplicate r2 in r4
-
-	mov		r3=b0
-	mov		r5=b1
-	mov		r7=b2;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=b3
-	mov		r5=b4
-	mov		r7=b5;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=b6
-	mov		r5=b7;;
-	st8		[r2]=r3,2*8
-	st8		[r4]=r5,2*8;;
-
-cSaveCRs:
-// save CRs
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2                // duplicate r2 in r4
-
-	mov		r3=cr.dcr
-	mov		r5=cr.itm
-	mov		r7=cr.iva;;
-
-	st8		[r2]=r3,8*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;            // 48 byte rements
-
-	mov		r3=cr.pta;;
-	st8		[r2]=r3,8*8;;            // 64 byte rements
-
-// if PSR.ic=0, reading interruption registers causes an illegal operation fault
-	mov		r3=psr;;
-	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
-(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
-begin_skip_intr_regs:
-(p6)	br		SkipIntrRegs;;
-
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2                // duplicate r2 in r6
-
-	mov		r3=cr.ipsr
-	mov		r5=cr.isr
-	mov		r7=r0;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=cr.iip
-	mov		r5=cr.ifa
-	mov		r7=cr.itir;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=cr.iipa
-	mov		r5=cr.ifs
-	mov		r7=cr.iim;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=cr25;;                   // cr.iha
-	st8		[r2]=r3,160;;               // 160 byte rement
-
-SkipIntrRegs:
-	st8		[r2]=r0,152;;               // another 152 byte .
-
-	add		r4=8,r2                     // duplicate r2 in r4
-	add		r6=2*8,r2                   // duplicate r2 in r6
-
-	mov		r3=cr.lid
-//	mov		r5=cr.ivr                     // cr.ivr, don't read it
-	mov		r7=cr.tpr;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=r0                       // cr.eoi => cr67
-	mov		r5=r0                       // cr.irr0 => cr68
-	mov		r7=r0;;                     // cr.irr1 => cr69
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=r0                       // cr.irr2 => cr70
-	mov		r5=r0                       // cr.irr3 => cr71
-	mov		r7=cr.itv;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=cr.pmv
-	mov		r5=cr.cmcv;;
-	st8		[r2]=r3,7*8
-	st8		[r4]=r5,7*8;;
-
-	mov		r3=r0                       // cr.lrr0 => cr80
-	mov		r5=r0;;                     // cr.lrr1 => cr81
-	st8		[r2]=r3,23*8
-	st8		[r4]=r5,23*8;;
-
-	adds		r2=25*8,r2;;
-
-cSaveARs:
-// save ARs
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2                // duplicate r2 in r6
-
-	mov		r3=ar.k0
-	mov		r5=ar.k1
-	mov		r7=ar.k2;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=ar.k3
-	mov		r5=ar.k4
-	mov		r7=ar.k5;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=ar.k6
-	mov		r5=ar.k7
-	mov		r7=r0;;                     // ar.kr8
-	st8		[r2]=r3,10*8
-	st8		[r4]=r5,10*8
-	st8		[r6]=r7,10*8;;           // rement by 72 bytes
-
-	mov		r3=ar.rsc
-	mov		ar.rsc=r0			    // put RSE in enforced lazy mode
-	mov		r5=ar.bsp
-	;;
-	mov		r7=ar.bspstore;;
-	st8		[r2]=r3,3*8
-	st8		[r4]=r5,3*8
-	st8		[r6]=r7,3*8;;
-
-	mov		r3=ar.rnat;;
-	st8		[r2]=r3,8*13             // increment by 13x8 bytes
-
-	mov		r3=ar.ccv;;
-	st8		[r2]=r3,8*4
-
-	mov		r3=ar.unat;;
-	st8		[r2]=r3,8*4
-
-	mov		r3=ar.fpsr;;
-	st8		[r2]=r3,8*4
-
-	mov		r3=ar.itc;;
-	st8		[r2]=r3,160                 // 160
-
-	mov		r3=ar.pfs;;
-	st8		[r2]=r3,8
-
-	mov		r3=ar.lc;;
-	st8		[r2]=r3,8
-
-	mov		r3=ar.ec;;
-	st8		[r2]=r3
-	add		r2=8*62,r2               //padding
-
-// save RRs
-	mov		ar.lc=0x08-1
-	movl		r4=0x00;;
-
-cStRR:
-	dep.z		r5=r4,61,3;;
-	mov		r3=rr[r5];;
-	st8		[r2]=r3,8
-	add		r4=1,r4
-	br.cloop.sptk.few	cStRR
+	// Save the switch_stack data that is not in minstate nor pt_regs.  The
+	// previous code left regs at pt_regs.
+	add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
+	;;
+	add temp1=SW(F2), regs
+	add temp2=SW(F3), regs
+	;;
+	stf.spill [temp1]=f2,32
+	stf.spill [temp2]=f3,32
+	;;
+	stf.spill [temp1]=f4,32
+	stf.spill [temp2]=f5,32
+	;;
+	stf.spill [temp1]=f12,32
+	stf.spill [temp2]=f13,32
+	;;
+	stf.spill [temp1]=f14,32
+	stf.spill [temp2]=f15,32
+	;;
+	stf.spill [temp1]=f16,32
+	stf.spill [temp2]=f17,32
+	;;
+	stf.spill [temp1]=f18,32
+	stf.spill [temp2]=f19,32
+	;;
+	stf.spill [temp1]=f20,32
+	stf.spill [temp2]=f21,32
+	;;
+	stf.spill [temp1]=f22,32
+	stf.spill [temp2]=f23,32
+	;;
+	stf.spill [temp1]=f24,32
+	stf.spill [temp2]=f25,32
 	;;
-end_os_mca_dump:
-	br	ia64_os_mca_done_dump;;
+	stf.spill [temp1]=f26,32
+	stf.spill [temp2]=f27,32
+	;;
+	stf.spill [temp1]=f28,32
+	stf.spill [temp2]=f29,32
+	;;
+	stf.spill [temp1]=f30,SW(B2)-SW(F30)
+	stf.spill [temp2]=f31,SW(B3)-SW(F31)
+	mov temp3=b2
+	mov temp4=b3
+	;;
+	st8 [temp1]=temp3,16	// save b2
+	st8 [temp2]=temp4,16	// save b3
+	mov temp3=b4
+	mov temp4=b5
+	;;
+	st8 [temp1]=temp3,SW(AR_LC)-SW(B4)	// save b4
+	st8 [temp2]=temp4	// save b5
+	mov temp3=ar.lc
+	;;
+	st8 [temp1]=temp3	// save ar.lc
+
+	br.sptk b0
 
 //EndStub//////////////////////////////////////////////////////////////////////
 
 
 //++
 // Name:
-//       ia64_os_mca_proc_state_restore()
+//	ia64_state_restore()
 //
 // Stub Description:
 //
-//       This is a stub to restore the saved processor state during MCHK
+//	Restore the SAL/OS state.  This is sensitive to the layout of struct
+//	ia64_sal_os_state in mca.h.
+//
+//	r2 contains the return address, r3 contains either
+//	IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+//	In addition to the SAL to OS state, this routine restores all the
+//	registers that appear in struct pt_regs and struct switch_stack,
+//	excluding those in the PAL minstate area.
 //
 //--
 
-ia64_os_mca_proc_state_restore:
+ia64_state_restore:
+	// Restore the switch_stack data that is not in minstate nor pt_regs.
+	add regs=MCA_SWITCH_STACK_OFFSET, r3
+	mov b0=r2		// save return address
+	;;
+	GET_IA64_MCA_DATA(temp2)
+	;;
+	add regs=temp2, regs
+	;;
+	add temp1=SW(F2), regs
+	add temp2=SW(F3), regs
+	;;
+	ldf.fill f2=[temp1],32
+	ldf.fill f3=[temp2],32
+	;;
+	ldf.fill f4=[temp1],32
+	ldf.fill f5=[temp2],32
+	;;
+	ldf.fill f12=[temp1],32
+	ldf.fill f13=[temp2],32
+	;;
+	ldf.fill f14=[temp1],32
+	ldf.fill f15=[temp2],32
+	;;
+	ldf.fill f16=[temp1],32
+	ldf.fill f17=[temp2],32
+	;;
+	ldf.fill f18=[temp1],32
+	ldf.fill f19=[temp2],32
+	;;
+	ldf.fill f20=[temp1],32
+	ldf.fill f21=[temp2],32
+	;;
+	ldf.fill f22=[temp1],32
+	ldf.fill f23=[temp2],32
+	;;
+	ldf.fill f24=[temp1],32
+	ldf.fill f25=[temp2],32
+	;;
+	ldf.fill f26=[temp1],32
+	ldf.fill f27=[temp2],32
+	;;
+	ldf.fill f28=[temp1],32
+	ldf.fill f29=[temp2],32
+	;;
+	ldf.fill f30=[temp1],SW(B2)-SW(F30)
+	ldf.fill f31=[temp2],SW(B3)-SW(F31)
+	;;
+	ld8 temp3=[temp1],16	// restore b2
+	ld8 temp4=[temp2],16	// restore b3
+	;;
+	mov b2=temp3
+	mov b3=temp4
+	ld8 temp3=[temp1],SW(AR_LC)-SW(B4)	// restore b4
+	ld8 temp4=[temp2]	// restore b5
+	;;
+	mov b4=temp3
+	mov b5=temp4
+	ld8 temp3=[temp1]	// restore ar.lc
+	;;
+	mov ar.lc=temp3
 
-// Restore bank1 GR16-31
-	GET_IA64_MCA_DATA(r2)
+	// Restore the pt_regs data that is not in minstate.  The previous code
+	// left regs at switch_stack.
+	add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
+	;;
+	add temp1=PT(B6), regs
+	add temp2=PT(B7), regs
+	;;
+	ld8 temp3=[temp1],PT(AR_CSD)-PT(B6)		// restore b6
+	ld8 temp4=[temp2],PT(AR_SSD)-PT(B7)		// restore b7
+	;;
+	mov b6=temp3
+	mov b7=temp4
+	ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD)	// restore ar.csd
+	ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD)		// restore ar.ssd
+	;;
+	mov ar.csd=temp3
+	mov ar.ssd=temp4
+	ld8 temp3=[temp1]				// restore ar.unat
+	add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
+	ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS)	// restore ar.pfs
 	;;
-	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
+	mov ar.unat=temp3
+	mov ar.pfs=temp4
+	// ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
+	ld8 temp3=[temp1],PT(F6)-PT(AR_CCV)		// restore ar.ccv
+	ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR)		// restore ar.fpsr
+	;;
+	mov ar.ccv=temp3
+	mov ar.fpsr=temp4
+	ldf.fill f6=[temp1],PT(F8)-PT(F6)
+	ldf.fill f7=[temp2],PT(F9)-PT(F7)
+	;;
+	ldf.fill f8=[temp1],PT(F10)-PT(F8)
+	ldf.fill f9=[temp2],PT(F11)-PT(F9)
+	;;
+	ldf.fill f10=[temp1]
+	ldf.fill f11=[temp2]
 
-restore_GRs:                                    // restore bank-1 GRs 16-31
-	bsw.1;;
-	add		r3=16*8,r2;;                // to get to NaT of GR 16-31
-	ld8		r3=[r3];;
-	mov		ar.unat=r3;;                // first restore NaT
-
-	ld8.fill	r16=[r2],8;;
-	ld8.fill	r17=[r2],8;;
-	ld8.fill	r18=[r2],8;;
-	ld8.fill	r19=[r2],8;;
-	ld8.fill	r20=[r2],8;;
-	ld8.fill	r21=[r2],8;;
-	ld8.fill	r22=[r2],8;;
-	ld8.fill	r23=[r2],8;;
-	ld8.fill	r24=[r2],8;;
-	ld8.fill	r25=[r2],8;;
-	ld8.fill	r26=[r2],8;;
-	ld8.fill	r27=[r2],8;;
-	ld8.fill	r28=[r2],8;;
-	ld8.fill	r29=[r2],8;;
-	ld8.fill	r30=[r2],8;;
-	ld8.fill	r31=[r2],8;;
-
-	ld8		r3=[r2],8;;              // increment to skip NaT
-	bsw.0;;
-
-restore_BRs:
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2;;              // duplicate r2 in r4
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-	mov		b0=r3
-	mov		b1=r5
-	mov		b2=r7;;
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-	mov		b3=r3
-	mov		b4=r5
-	mov		b5=r7;;
-
-	ld8		r3=[r2],2*8
-	ld8		r5=[r4],2*8;;
-	mov		b6=r3
-	mov		b7=r5;;
-
-restore_CRs:
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2;;              // duplicate r2 in r4
-
-	ld8		r3=[r2],8*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;            // 48 byte increments
-	mov		cr.dcr=r3
-	mov		cr.itm=r5
-	mov		cr.iva=r7;;
-
-	ld8		r3=[r2],8*8;;            // 64 byte increments
-//      mov		cr.pta=r3
-
-
-// if PSR.ic=1, reading interruption registers causes an illegal operation fault
-	mov		r3=psr;;
-	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
-(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
-
-begin_rskip_intr_regs:
-(p6)	br		rSkipIntrRegs;;
-
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2;;              // duplicate r2 in r4
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-	mov		cr.ipsr=r3
-//	mov		cr.isr=r5                   // cr.isr is read only
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-	mov		cr.iip=r3
-	mov		cr.ifa=r5
-	mov		cr.itir=r7;;
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-	mov		cr.iipa=r3
-	mov		cr.ifs=r5
-	mov		cr.iim=r7
-
-	ld8		r3=[r2],160;;               // 160 byte increment
-	mov		cr.iha=r3
-
-rSkipIntrRegs:
-	ld8		r3=[r2],152;;               // another 152 byte inc.
-
-	add		r4=8,r2                     // duplicate r2 in r4
-	add		r6=2*8,r2;;                 // duplicate r2 in r6
-
-	ld8		r3=[r2],8*3
-	ld8		r5=[r4],8*3
-	ld8		r7=[r6],8*3;;
-	mov		cr.lid=r3
-//	mov		cr.ivr=r5                   // cr.ivr is read only
-	mov		cr.tpr=r7;;
-
-	ld8		r3=[r2],8*3
-	ld8		r5=[r4],8*3
-	ld8		r7=[r6],8*3;;
-//	mov		cr.eoi=r3
-//	mov		cr.irr0=r5                  // cr.irr0 is read only
-//	mov		cr.irr1=r7;;                // cr.irr1 is read only
-
-	ld8		r3=[r2],8*3
-	ld8		r5=[r4],8*3
-	ld8		r7=[r6],8*3;;
-//	mov		cr.irr2=r3                  // cr.irr2 is read only
-//	mov		cr.irr3=r5                  // cr.irr3 is read only
-	mov		cr.itv=r7;;
-
-	ld8		r3=[r2],8*7
-	ld8		r5=[r4],8*7;;
-	mov		cr.pmv=r3
-	mov		cr.cmcv=r5;;
-
-	ld8		r3=[r2],8*23
-	ld8		r5=[r4],8*23;;
-	adds		r2=8*23,r2
-	adds		r4=8*23,r4;;
-//	mov		cr.lrr0=r3
-//	mov		cr.lrr1=r5
-
-	adds		r2=8*2,r2;;
-
-restore_ARs:
-	add		r4=8,r2                  // duplicate r2 in r4
-	add		r6=2*8,r2;;              // duplicate r2 in r4
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-	mov		ar.k0=r3
-	mov		ar.k1=r5
-	mov		ar.k2=r7;;
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-	mov		ar.k3=r3
-	mov		ar.k4=r5
-	mov		ar.k5=r7;;
-
-	ld8		r3=[r2],10*8
-	ld8		r5=[r4],10*8
-	ld8		r7=[r6],10*8;;
-	mov		ar.k6=r3
-	mov		ar.k7=r5
-	;;
-
-	ld8		r3=[r2],3*8
-	ld8		r5=[r4],3*8
-	ld8		r7=[r6],3*8;;
-//	mov		ar.rsc=r3
-//	mov		ar.bsp=r5                   // ar.bsp is read only
-	mov		ar.rsc=r0			    // make sure that RSE is in enforced lazy mode
-	;;
-	mov		ar.bspstore=r7;;
-
-	ld8		r9=[r2],8*13;;
-	mov		ar.rnat=r9
-
-	mov		ar.rsc=r3
-	ld8		r3=[r2],8*4;;
-	mov		ar.ccv=r3
-
-	ld8		r3=[r2],8*4;;
-	mov		ar.unat=r3
-
-	ld8		r3=[r2],8*4;;
-	mov		ar.fpsr=r3
-
-	ld8		r3=[r2],160;;               // 160
-//      mov		ar.itc=r3
-
-	ld8		r3=[r2],8;;
-	mov		ar.pfs=r3
-
-	ld8		r3=[r2],8;;
-	mov		ar.lc=r3
-
-	ld8		r3=[r2];;
-	mov		ar.ec=r3
-	add		r2=8*62,r2;;             // padding
-
-restore_RRs:
-	mov		r5=ar.lc
-	mov		ar.lc=0x08-1
-	movl		r4=0x00;;
-cStRRr:
-	dep.z		r7=r4,61,3
-	ld8		r3=[r2],8;;
-	mov		rr[r7]=r3                   // what are its access previledges?
-	add		r4=1,r4
-	br.cloop.sptk.few	cStRRr
+	// Restore the SAL to OS state. The previous code left regs at pt_regs.
+	add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
+	;;
+	add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs
+	add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs
+	;;
+	ld8 r12=[temp1],16	// sal_ra
+	ld8 r9=[temp2],16	// sal_gp
+	;;
+	ld8 r22=[temp1],24	// pal_min_state, virtual.  skip prev_task
+	ld8 r21=[temp2],16	// prev_IA64_KR_CURRENT
 	;;
-	mov		ar.lc=r5
+	ld8 temp3=[temp1],16	// cr.isr
+	ld8 temp4=[temp2],16	// cr.ifa
 	;;
-end_os_mca_restore:
-	br	ia64_os_mca_done_restore;;
+	mov cr.isr=temp3
+	mov cr.ifa=temp4
+	ld8 temp3=[temp1],16	// cr.itir
+	ld8 temp4=[temp2],16	// cr.iipa
+	;;
+	mov cr.itir=temp3
+	mov cr.iipa=temp4
+	ld8 temp3=[temp1],16	// cr.iim
+	ld8 temp4=[temp2],16	// cr.iha
+	;;
+	mov cr.iim=temp3
+	mov cr.iha=temp4
+	dep r22=0,r22,62,2	// pal_min_state, physical, uncached
+	mov IA64_KR(CURRENT)=r21
+	ld8 r8=[temp1]		// os_status
+	ld8 r10=[temp2]		// context
+
+	br.sptk b0
 
 //EndStub//////////////////////////////////////////////////////////////////////
 
 
-// ok, the issue here is that we need to save state information so
-// it can be useable by the kernel debugger and show regs routines.
-// In order to do this, our best bet is save the current state (plus
-// the state information obtain from the MIN_STATE_AREA) into a pt_regs
-// format.  This way we can pass it on in a useable format.
+//++
+// Name:
+//	ia64_new_stack()
 //
-
+// Stub Description:
 //
-// SAL to OS entry point for INIT on the monarch processor
-// This has been defined for registration purposes with SAL
-// as a part of ia64_mca_init.
+//	Switch to the MCA/INIT stack.
 //
-// When we get here, the following registers have been
-// set by the SAL for our use
+//	r2 contains the return address, r3 contains either
+//	IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
 //
-//		1. GR1 = OS INIT GP
-//		2. GR8 = PAL_PROC physical address
-//		3. GR9 = SAL_PROC physical address
-//		4. GR10 = SAL GP (physical)
-//		5. GR11 = Init Reason
-//			0 = Received INIT for event other than crash dump switch
-//			1 = Received wakeup at the end of an OS_MCA corrected machine check
-//			2 = Received INIT dude to CrashDump switch assertion
+//	On entry RBS is still on the original stack, this routine switches RBS
+//	to use the MCA/INIT stack.
 //
-//		6. GR12 = Return address to location within SAL_INIT procedure
-
+//	On entry, sos->pal_min_state is physical, on exit it is virtual.
+//
+//--
 
-GLOBAL_ENTRY(ia64_monarch_init_handler)
-	.prologue
-	// stash the information the SAL passed to os
-	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
+ia64_new_stack:
+	add regs=MCA_PT_REGS_OFFSET, r3
+	add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3
+	mov b0=r2			// save return address
+	GET_IA64_MCA_DATA(temp1)
+	invala
 	;;
-	SAVE_MIN_WITH_COVER
+	add temp2=temp2, temp1		// struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
+	add regs=regs, temp1		// struct pt_regs on MCA or INIT stack
 	;;
-	mov r8=cr.ifa
-	mov r9=cr.isr
-	adds r3=8,r2				// set up second base pointer
+	// Address of minstate area provided by PAL is physical, uncacheable.
+	// Convert to Linux virtual address in region 6 for C code.
+	ld8 ms=[temp2]			// pal_min_state, physical
+	;;
+	dep temp1=-1,ms,62,2		// set region 6
+	mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
+	;;
+	st8 [temp2]=temp1		// pal_min_state, virtual
+
+	add temp4=temp3, regs		// start of bspstore on new stack
 	;;
-	SAVE_REST
+	mov ar.bspstore=temp4		// switch RBS to MCA/INIT stack
+	;;
+	flushrs				// must be first in group
+	br.sptk b0
+
+//EndStub//////////////////////////////////////////////////////////////////////
+
+
+//++
+// Name:
+//	ia64_old_stack()
+//
+// Stub Description:
+//
+//	Switch to the old stack.
+//
+//	r2 contains the return address, r3 contains either
+//	IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+//	On entry, pal_min_state is virtual, on exit it is physical.
+//
+//	On entry RBS is on the MCA/INIT stack, this routine switches RBS
+//	back to the previous stack.
+//
+//	The psr is set to all zeroes.  SAL return requires either all zeroes or
+//	just psr.mc set.  Leaving psr.mc off allows INIT to be issued if this
+//	code does not perform correctly.
+//
+//	The dirty registers at the time of the event were flushed to the
+//	MCA/INIT stack in ia64_pt_regs_save().  Restore the dirty registers
+//	before reverting to the previous bspstore.
+//--
 
-// ok, enough should be saved at this point to be dangerous, and supply
-// information for a dump
-// We need to switch to Virtual mode before hitting the C functions.
+ia64_old_stack:
+	add regs=MCA_PT_REGS_OFFSET, r3
+	mov b0=r2			// save return address
+	GET_IA64_MCA_DATA(temp2)
+	LOAD_PHYSICAL(p0,temp1,1f)
+	;;
+	mov cr.ipsr=r0
+	mov cr.ifs=r0
+	mov cr.iip=temp1
+	;;
+	invala
+	rfi
+1:
 
-	movl	r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
-	mov	r3=psr	// get the current psr, minimum enabled at this point
+	add regs=regs, temp2		// struct pt_regs on MCA or INIT stack
 	;;
-	or	r2=r2,r3
+	add temp1=PT(LOADRS), regs
 	;;
-	movl	r3=IVirtual_Switch
+	ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS)	// restore loadrs
 	;;
-	mov	cr.iip=r3	// short return to set the appropriate bits
-	mov	cr.ipsr=r2	// need to do an rfi to set appropriate bits
+	ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE)	// restore ar.bspstore
+	mov ar.rsc=temp2
 	;;
-	rfi
+	loadrs
+	ld8 temp4=[temp1]		// restore ar.rnat
 	;;
-IVirtual_Switch:
-	//
-	// We should now be running virtual
-	//
-	// Let's call the C handler to get the rest of the state info
-	//
-	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
+	mov ar.bspstore=temp3		// back to old stack
 	;;
-	adds out0=16,sp				// out0 = pointer to pt_regs
+	mov ar.rnat=temp4
 	;;
-	DO_SAVE_SWITCH_STACK
-	.body
-	adds out1=16,sp				// out0 = pointer to switch_stack
 
-	br.call.sptk.many rp=ia64_init_handler
-.ret1:
+	br.sptk b0
+
+//EndStub//////////////////////////////////////////////////////////////////////
 
-return_from_init:
-	br.sptk return_from_init
-END(ia64_monarch_init_handler)
 
+//++
+// Name:
+//	ia64_set_kernel_registers()
 //
-// SAL to OS entry point for INIT on the slave processor
-// This has been defined for registration purposes with SAL
-// as a part of ia64_mca_init.
+// Stub Description:
 //
+//	Set the registers that are required by the C code in order to run on an
+//	MCA/INIT stack.
+//
+//	r2 contains the return address, r3 contains either
+//	IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
+//
+//--
+
+ia64_set_kernel_registers:
+	add temp3=MCA_SP_OFFSET, r3
+	add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3
+	mov b0=r2		// save return address
+	GET_IA64_MCA_DATA(temp1)
+	;;
+	add temp4=temp4, temp1	// &struct ia64_sal_os_state.os_gp
+	add r12=temp1, temp3	// kernel stack pointer on MCA/INIT stack
+	add r13=temp1, r3	// set current to start of MCA/INIT stack
+	;;
+	ld8 r1=[temp4]		// OS GP from SAL OS state
+	;;
+	DATA_PA_TO_VA(r1,temp1)
+	DATA_PA_TO_VA(r12,temp2)
+	DATA_PA_TO_VA(r13,temp3)
+	;;
+	mov IA64_KR(CURRENT)=r13
+
+	// FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK?
+
+	br.sptk b0
+
+//EndStub//////////////////////////////////////////////////////////////////////
 
-GLOBAL_ENTRY(ia64_slave_init_handler)
-1:	br.sptk 1b
-END(ia64_slave_init_handler)
+#undef	ms
+#undef	regs
+#undef	temp1
+#undef	temp2
+#undef	temp3
+#undef	temp4
+
+
+// Support function for mca.c, it is here to avoid using inline asm.  Given the
+// address of an rnat slot, if that address is below the current ar.bspstore
+// then return the contents of that slot, otherwise return the contents of
+// ar.rnat.
+GLOBAL_ENTRY(ia64_get_rnat)
+	alloc r14=ar.pfs,1,0,0,0
+	mov ar.rsc=0
+	;;
+	mov r14=ar.bspstore
+	;;
+	cmp.lt p6,p7=in0,r14
+	;;
+(p6)	ld8 r8=[in0]
+(p7)	mov r8=ar.rnat
+	mov ar.rsc=3
+	br.ret.sptk.many rp
+END(ia64_get_rnat)
diff -puN arch/ia64/kernel/mca.c~git-ia64 arch/ia64/kernel/mca.c
--- 25/arch/ia64/kernel/mca.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/mca.c	2005-09-01 05:34:24.000000000 -0600
@@ -48,6 +48,9 @@
  *            Delete dead variables and functions.
  *            Reorder to remove the need for forward declarations and to consolidate
  *            related code.
+ *
+ * 2005-08-12 Keith Owens <kaos@sgi.com>
+ *	      Convert MCA/INIT handlers to use per event stacks and SAL/OS state.
  */
 #include <linux/config.h>
 #include <linux/types.h>
@@ -77,6 +80,8 @@
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
 
+#include "entry.h"
+
 #if defined(IA64_MCA_DEBUG_INFO)
 # define IA64_MCA_DEBUG(fmt...)	printk(fmt)
 #else
@@ -84,9 +89,8 @@
 #endif
 
 /* Used by mca_asm.S */
-ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state;
-ia64_mca_os_to_sal_state_t	ia64_os_to_sal_handoff_state;
-u64				ia64_mca_serialize;
+u32				ia64_mca_serialize;
+s32				ia64_mca_init_leave = -1;
 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
 DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
@@ -95,8 +99,10 @@ DEFINE_PER_CPU(u64, ia64_mca_pal_base); 
 unsigned long __per_cpu_mca[NR_CPUS];
 
 /* In mca_asm.S */
-extern void			ia64_monarch_init_handler (void);
-extern void			ia64_slave_init_handler (void);
+extern void			ia64_os_init_dispatch_monarch (void);
+extern void			ia64_os_init_dispatch_slave (void);
+
+static int monarch_cpu = -1;
 
 static ia64_mc_info_t		ia64_mc_info;
 
@@ -234,7 +240,8 @@ ia64_log_get(int sal_info_type, u8 **buf
  *  This function retrieves a specified error record type from SAL
  *  and wakes up any processes waiting for error records.
  *
- *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT)
+ *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE)
+ *              FIXME: remove MCA and irq_safe.
  */
 static void
 ia64_mca_log_sal_error_record(int sal_info_type)
@@ -242,7 +249,7 @@ ia64_mca_log_sal_error_record(int sal_in
 	u8 *buffer;
 	sal_log_record_header_t *rh;
 	u64 size;
-	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
+	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
 #ifdef IA64_MCA_DEBUG_INFO
 	static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
 #endif
@@ -330,191 +337,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
 
 #endif /* CONFIG_ACPI */
 
-static void
-show_min_state (pal_min_state_area_t *minstate)
-{
-	u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
-	u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
-
-	printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
-	printk("pr\t\t%016lx\n", minstate->pmsa_pr);
-	printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
-	printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
-	printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
-	printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
-	printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
-	printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
-	printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
-	printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
-	printk("b1\t\t%016lx ", minstate->pmsa_br1);
-	print_symbol("%s\n", minstate->pmsa_br1);
-
-	printk("\nstatic registers r0-r15:\n");
-	printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
-	       0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
-	printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_gr[3], minstate->pmsa_gr[4],
-	       minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
-	printk(" r8-11 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_gr[7], minstate->pmsa_gr[8],
-	       minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
-	printk("r12-15 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_gr[11], minstate->pmsa_gr[12],
-	       minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
-
-	printk("\nbank 0:\n");
-	printk("r16-19 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
-	       minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
-	printk("r20-23 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
-	       minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
-	printk("r24-27 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
-	       minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
-	printk("r28-31 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
-	       minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
-
-	printk("\nbank 1:\n");
-	printk("r16-19 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
-	       minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
-	printk("r20-23 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
-	       minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
-	printk("r24-27 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
-	       minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
-	printk("r28-31 %016lx %016lx %016lx %016lx\n",
-	       minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
-	       minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
-}
-
-static void
-fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
-{
-	u64 *dst_banked, *src_banked, bit, shift, nat_bits;
-	int i;
-
-	/*
-	 * First, update the pt-regs and switch-stack structures with the contents stored
-	 * in the min-state area:
-	 */
-	if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
-		pt->cr_ipsr = ms->pmsa_xpsr;
-		pt->cr_iip = ms->pmsa_xip;
-		pt->cr_ifs = ms->pmsa_xfs;
-	} else {
-		pt->cr_ipsr = ms->pmsa_ipsr;
-		pt->cr_iip = ms->pmsa_iip;
-		pt->cr_ifs = ms->pmsa_ifs;
-	}
-	pt->ar_rsc = ms->pmsa_rsc;
-	pt->pr = ms->pmsa_pr;
-	pt->r1 = ms->pmsa_gr[0];
-	pt->r2 = ms->pmsa_gr[1];
-	pt->r3 = ms->pmsa_gr[2];
-	sw->r4 = ms->pmsa_gr[3];
-	sw->r5 = ms->pmsa_gr[4];
-	sw->r6 = ms->pmsa_gr[5];
-	sw->r7 = ms->pmsa_gr[6];
-	pt->r8 = ms->pmsa_gr[7];
-	pt->r9 = ms->pmsa_gr[8];
-	pt->r10 = ms->pmsa_gr[9];
-	pt->r11 = ms->pmsa_gr[10];
-	pt->r12 = ms->pmsa_gr[11];
-	pt->r13 = ms->pmsa_gr[12];
-	pt->r14 = ms->pmsa_gr[13];
-	pt->r15 = ms->pmsa_gr[14];
-	dst_banked = &pt->r16;		/* r16-r31 are contiguous in struct pt_regs */
-	src_banked = ms->pmsa_bank1_gr;
-	for (i = 0; i < 16; ++i)
-		dst_banked[i] = src_banked[i];
-	pt->b0 = ms->pmsa_br0;
-	sw->b1 = ms->pmsa_br1;
-
-	/* construct the NaT bits for the pt-regs structure: */
-#	define PUT_NAT_BIT(dst, addr)					\
-	do {								\
-		bit = nat_bits & 1; nat_bits >>= 1;			\
-		shift = ((unsigned long) addr >> 3) & 0x3f;		\
-		dst = ((dst) & ~(1UL << shift)) | (bit << shift);	\
-	} while (0)
-
-	/* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
-	shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
-	nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
-
-	PUT_NAT_BIT(sw->caller_unat, &pt->r1);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r2);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r3);
-	PUT_NAT_BIT(sw->ar_unat, &sw->r4);
-	PUT_NAT_BIT(sw->ar_unat, &sw->r5);
-	PUT_NAT_BIT(sw->ar_unat, &sw->r6);
-	PUT_NAT_BIT(sw->ar_unat, &sw->r7);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r8);	PUT_NAT_BIT(sw->caller_unat, &pt->r9);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r10);	PUT_NAT_BIT(sw->caller_unat, &pt->r11);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r12);	PUT_NAT_BIT(sw->caller_unat, &pt->r13);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r14);	PUT_NAT_BIT(sw->caller_unat, &pt->r15);
-	nat_bits >>= 16;	/* skip over bank0 NaT bits */
-	PUT_NAT_BIT(sw->caller_unat, &pt->r16);	PUT_NAT_BIT(sw->caller_unat, &pt->r17);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r18);	PUT_NAT_BIT(sw->caller_unat, &pt->r19);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r20);	PUT_NAT_BIT(sw->caller_unat, &pt->r21);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r22);	PUT_NAT_BIT(sw->caller_unat, &pt->r23);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r24);	PUT_NAT_BIT(sw->caller_unat, &pt->r25);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r26);	PUT_NAT_BIT(sw->caller_unat, &pt->r27);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r28);	PUT_NAT_BIT(sw->caller_unat, &pt->r29);
-	PUT_NAT_BIT(sw->caller_unat, &pt->r30);	PUT_NAT_BIT(sw->caller_unat, &pt->r31);
-}
-
-static void
-init_handler_platform (pal_min_state_area_t *ms,
-		       struct pt_regs *pt, struct switch_stack *sw)
-{
-	struct unw_frame_info info;
-
-	/* if a kernel debugger is available call it here else just dump the registers */
-
-	/*
-	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
-	 * generated via the BMC's command-line interface, but since the console is on the
-	 * same serial line, the user will need some time to switch out of the BMC before
-	 * the dump begins.
-	 */
-	printk("Delaying for 5 seconds...\n");
-	udelay(5*1000000);
-	show_min_state(ms);
-
-	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
-	fetch_min_state(ms, pt, sw);
-	unw_init_from_interruption(&info, current, pt, sw);
-	ia64_do_show_stack(&info, NULL);
-
-#ifdef CONFIG_SMP
-	/* read_trylock() would be handy... */
-	if (!tasklist_lock.write_lock)
-		read_lock(&tasklist_lock);
-#endif
-	{
-		struct task_struct *g, *t;
-		do_each_thread (g, t) {
-			if (t == current)
-				continue;
-
-			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
-			show_stack(t, NULL);
-		} while_each_thread (g, t);
-	}
-#ifdef CONFIG_SMP
-	if (!tasklist_lock.write_lock)
-		read_unlock(&tasklist_lock);
-#endif
-
-	printk("\nINIT dump complete.  Please reboot now.\n");
-	while (1);			/* hang city if no debugger */
-}
-
 #ifdef CONFIG_ACPI
 /*
  * ia64_mca_register_cpev
@@ -657,42 +479,6 @@ ia64_mca_cmc_vector_enable_keventd(void 
 }
 
 /*
- * ia64_mca_wakeup_ipi_wait
- *
- *	Wait for the inter-cpu interrupt to be sent by the
- *	monarch processor once it is done with handling the
- *	MCA.
- *
- *  Inputs  :   None
- *  Outputs :   None
- */
-static void
-ia64_mca_wakeup_ipi_wait(void)
-{
-	int	irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
-	int	irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
-	u64	irr = 0;
-
-	do {
-		switch(irr_num) {
-		      case 0:
-			irr = ia64_getreg(_IA64_REG_CR_IRR0);
-			break;
-		      case 1:
-			irr = ia64_getreg(_IA64_REG_CR_IRR1);
-			break;
-		      case 2:
-			irr = ia64_getreg(_IA64_REG_CR_IRR2);
-			break;
-		      case 3:
-			irr = ia64_getreg(_IA64_REG_CR_IRR3);
-			break;
-		}
-		cpu_relax();
-	} while (!(irr & (1UL << irr_bit))) ;
-}
-
-/*
  * ia64_mca_wakeup
  *
  *	Send an inter-cpu interrupt to wake-up a particular cpu
@@ -757,11 +543,9 @@ ia64_mca_rendez_int_handler(int rendez_i
 	 */
 	ia64_sal_mc_rendez();
 
-	/* Wait for the wakeup IPI from the monarch
-	 * This waiting is done by polling on the wakeup-interrupt
-	 * vector bit in the processor's IRRs
-	 */
-	ia64_mca_wakeup_ipi_wait();
+	/* Wait for the monarch cpu to exit. */
+	while (monarch_cpu != -1)
+	       cpu_relax();	/* spin until monarch leaves */
 
 	/* Enable all interrupts */
 	local_irq_restore(flags);
@@ -789,53 +573,13 @@ ia64_mca_wakeup_int_handler(int wakeup_i
 	return IRQ_HANDLED;
 }
 
-/*
- * ia64_return_to_sal_check
- *
- *	This is function called before going back from the OS_MCA handler
- *	to the OS_MCA dispatch code which finally takes the control back
- *	to the SAL.
- *	The main purpose of this routine is to setup the OS_MCA to SAL
- *	return state which can be used by the OS_MCA dispatch code
- *	just before going back to SAL.
- *
- *  Inputs  :   None
- *  Outputs :   None
- */
-
-static void
-ia64_return_to_sal_check(int recover)
-{
-
-	/* Copy over some relevant stuff from the sal_to_os_mca_handoff
-	 * so that it can be used at the time of os_mca_to_sal_handoff
-	 */
-	ia64_os_to_sal_handoff_state.imots_sal_gp =
-		ia64_sal_to_os_handoff_state.imsto_sal_gp;
-
-	ia64_os_to_sal_handoff_state.imots_sal_check_ra =
-		ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
-
-	if (recover)
-		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
-	else
-		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
-
-	/* Default = tell SAL to return to same context */
-	ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
-
-	ia64_os_to_sal_handoff_state.imots_new_min_state =
-		(u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
-
-}
-
 /* Function pointer for extra MCA recovery */
 int (*ia64_mca_ucmc_extension)
-	(void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
+	(void*,struct ia64_sal_os_state*)
 	= NULL;
 
 int
-ia64_reg_MCA_extension(void *fn)
+ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
 {
 	if (ia64_mca_ucmc_extension)
 		return 1;
@@ -854,8 +598,324 @@ ia64_unreg_MCA_extension(void)
 EXPORT_SYMBOL(ia64_reg_MCA_extension);
 EXPORT_SYMBOL(ia64_unreg_MCA_extension);
 
+
+static inline void
+copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
+{
+	u64 fslot, tslot, nat;
+	*tr = *fr;
+	fslot = ((unsigned long)fr >> 3) & 63;
+	tslot = ((unsigned long)tr >> 3) & 63;
+	*tnat &= ~(1UL << tslot);
+	nat = (fnat >> fslot) & 1;
+	*tnat |= (nat << tslot);
+}
+
+/* On entry to this routine, we are running on the per cpu stack, see
+ * mca_asm.h.  The original stack has not been touched by this event.  Some of
+ * the original stack's registers will be in the RBS on this stack.  This stack
+ * also contains a partial pt_regs and switch_stack, the rest of the data is in
+ * PAL minstate.
+ *
+ * The first thing to do is modify the original stack to look like a blocked
+ * task so we can run backtrace on the original task.  Also mark the per cpu
+ * stack as current to ensure that we use the correct task state, it also means
+ * that we can do backtrace on the MCA/INIT handler code itself.
+ */
+
+static task_t *
+ia64_mca_modify_original_stack(struct pt_regs *regs,
+		const struct switch_stack *sw,
+		struct ia64_sal_os_state *sos,
+		const char *type)
+{
+	char *p, comm[sizeof(current->comm)];
+	ia64_va va;
+	extern char ia64_leave_kernel[];	/* Need asm address, not function descriptor */
+	const pal_min_state_area_t *ms = sos->pal_min_state;
+	task_t *previous_current;
+	struct pt_regs *old_regs;
+	struct switch_stack *old_sw;
+	unsigned size = sizeof(struct pt_regs) +
+			sizeof(struct switch_stack) + 16;
+	u64 *old_bspstore, *old_bsp;
+	u64 *new_bspstore, *new_bsp;
+	u64 old_unat, old_rnat, new_rnat, nat;
+	u64 slots, loadrs = regs->loadrs;
+	u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
+	u64 ar_bspstore = regs->ar_bspstore;
+	u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
+	const u64 *bank;
+	const char *msg;
+	int cpu = smp_processor_id();
+
+	previous_current = curr_task(cpu);
+	set_curr_task(cpu, current);
+	if ((p = strchr(current->comm, ' ')))
+		*p = '\0';
+
+	/* Best effort attempt to cope with MCA/INIT delivered while in
+	 * physical mode.
+	 */
+	regs->cr_ipsr = ms->pmsa_ipsr;
+	if (ia64_psr(regs)->dt == 0) {
+		va.l = r12;
+		if (va.f.reg == 0) {
+			va.f.reg = 7;
+			r12 = va.l;
+		}
+		va.l = r13;
+		if (va.f.reg == 0) {
+			va.f.reg = 7;
+			r13 = va.l;
+		}
+	}
+	if (ia64_psr(regs)->rt == 0) {
+		va.l = ar_bspstore;
+		if (va.f.reg == 0) {
+			va.f.reg = 7;
+			ar_bspstore = va.l;
+		}
+		va.l = ar_bsp;
+		if (va.f.reg == 0) {
+			va.f.reg = 7;
+			ar_bsp = va.l;
+		}
+	}
+
+	/* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
+	 * have been copied to the old stack, the old stack may fail the
+	 * validation tests below.  So ia64_old_stack() must restore the dirty
+	 * registers from the new stack.  The old and new bspstore probably
+	 * have different alignments, so loadrs calculated on the old bsp
+	 * cannot be used to restore from the new bsp.  Calculate a suitable
+	 * loadrs for the new stack and save it in the new pt_regs, where
+	 * ia64_old_stack() can get it.
+	 */
+	old_bspstore = (u64 *)ar_bspstore;
+	old_bsp = (u64 *)ar_bsp;
+	slots = ia64_rse_num_regs(old_bspstore, old_bsp);
+	new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);
+	new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
+	regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
+
+	/* Verify the previous stack state before we change it */
+	if (user_mode(regs)) {
+		msg = "occurred in user space";
+		goto no_mod;
+	}
+	if ((r12 & -KERNEL_STACK_SIZE) != r13) {
+		msg = "inconsistent r12 and r13";
+		goto no_mod;
+	}
+	if ((ar_bspstore & -KERNEL_STACK_SIZE) != r13) {
+		msg = "inconsistent ar.bspstore and r13";
+		goto no_mod;
+	}
+	va.p = old_bspstore;
+	if (va.f.reg < 5) {
+		msg = "old_bspstore is in the wrong region";
+		goto no_mod;
+	}
+	if ((ar_bsp & -KERNEL_STACK_SIZE) != r13) {
+		msg = "inconsistent ar.bsp and r13";
+		goto no_mod;
+	}
+	size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
+	if (ar_bspstore + size > r12) {
+		msg = "no room for blocked state";
+		goto no_mod;
+	}
+
+	/* Change the comm field on the MCA/INT task to include the pid that
+	 * was interrupted, it makes for easier debugging.  If that pid was 0
+	 * (swapper or nested MCA/INIT) then use the start of the previous comm
+	 * field suffixed with its cpu.
+	 */
+	if (previous_current->pid)
+		snprintf(comm, sizeof(comm), "%s %d",
+			current->comm, previous_current->pid);
+	else {
+		int l;
+		if ((p = strchr(previous_current->comm, ' ')))
+			l = p - previous_current->comm;
+		else
+			l = strlen(previous_current->comm);
+		snprintf(comm, sizeof(comm), "%s %*s %d",
+			current->comm, l, previous_current->comm,
+			previous_current->thread_info->cpu);
+	}
+	memcpy(current->comm, comm, sizeof(current->comm));
+
+	/* Make the original task look blocked.  First stack a struct pt_regs,
+	 * describing the state at the time of interrupt.  mca_asm.S built a
+	 * partial pt_regs, copy it and fill in the blanks using minstate.
+	 */
+	p = (char *)r12 - sizeof(*regs);
+	old_regs = (struct pt_regs *)p;
+	memcpy(old_regs, regs, sizeof(*regs));
+	/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
+	 * pmsa_{xip,xpsr,xfs}
+	 */
+	if (ia64_psr(regs)->ic) {
+		old_regs->cr_iip = ms->pmsa_iip;
+		old_regs->cr_ipsr = ms->pmsa_ipsr;
+		old_regs->cr_ifs = ms->pmsa_ifs;
+	} else {
+		old_regs->cr_iip = ms->pmsa_xip;
+		old_regs->cr_ipsr = ms->pmsa_xpsr;
+		old_regs->cr_ifs = ms->pmsa_xfs;
+	}
+	old_regs->pr = ms->pmsa_pr;
+	old_regs->b0 = ms->pmsa_br0;
+	old_regs->loadrs = loadrs;
+	old_regs->ar_rsc = ms->pmsa_rsc;
+	old_unat = old_regs->ar_unat;
+	copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);
+	copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
+	copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
+	copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
+	copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
+	copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
+	copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
+	copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
+	copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
+	copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
+	copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
+	if (ia64_psr(old_regs)->bn)
+		bank = ms->pmsa_bank1_gr;
+	else
+		bank = ms->pmsa_bank0_gr;
+	copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
+	copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
+	copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
+	copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
+	copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
+	copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
+	copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
+	copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
+	copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
+	copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
+	copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
+	copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
+	copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
+	copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
+	copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
+	copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
+
+	/* Next stack a struct switch_stack.  mca_asm.S built a partial
+	 * switch_stack, copy it and fill in the blanks using pt_regs and
+	 * minstate.
+	 *
+	 * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
+	 * ar.pfs is set to 0.
+	 *
+	 * unwind.c::unw_unwind() does special processing for interrupt frames.
+	 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
+	 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs.  Not
+	 * that this is documented, of course.  Set PRED_NON_SYSCALL in the
+	 * switch_stack on the original stack so it will unwind correctly when
+	 * unwind.c reads pt_regs.
+	 *
+	 * thread.ksp is updated to point to the synthesized switch_stack.
+	 */
+	p -= sizeof(struct switch_stack);
+	old_sw = (struct switch_stack *)p;
+	memcpy(old_sw, sw, sizeof(*sw));
+	old_sw->caller_unat = old_unat;
+	old_sw->ar_fpsr = old_regs->ar_fpsr;
+	copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
+	copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
+	copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
+	copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
+	old_sw->b0 = (u64)ia64_leave_kernel;
+	old_sw->b1 = ms->pmsa_br1;
+	old_sw->ar_pfs = 0;
+	old_sw->ar_unat = old_unat;
+	old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
+	previous_current->thread.ksp = (u64)p - 16;
+
+	/* Finally copy the original stack's registers back to its RBS.
+	 * Registers from ar.bspstore through ar.bsp at the time of the event
+	 * are in the current RBS, copy them back to the original stack.  The
+	 * copy must be done register by register because the original bspstore
+	 * and the current one have different alignments, so the saved RNAT
+	 * data occurs at different places.
+	 *
+	 * mca_asm does cover, so the old_bsp already includes all registers at
+	 * the time of MCA/INIT.  It also does flushrs, so all registers before
+	 * this function have been written to backing store on the MCA/INIT
+	 * stack.
+	 */
+	new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
+	old_rnat = regs->ar_rnat;
+	while (slots--) {
+		if (ia64_rse_is_rnat_slot(new_bspstore)) {
+			new_rnat = ia64_get_rnat(new_bspstore++);
+		}
+		if (ia64_rse_is_rnat_slot(old_bspstore)) {
+			*old_bspstore++ = old_rnat;
+			old_rnat = 0;
+		}
+		nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
+		old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
+		old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
+		*old_bspstore++ = *new_bspstore++;
+	}
+	old_sw->ar_bspstore = (unsigned long)old_bspstore;
+	old_sw->ar_rnat = old_rnat;
+
+	sos->prev_task = previous_current;
+	return previous_current;
+
+no_mod:
+	printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
+			smp_processor_id(), type, msg);
+	return previous_current;
+}
+
+/* The monarch/slave interaction is based on monarch_cpu and requires that all
+ * slaves have entered rendezvous before the monarch leaves.  If any cpu has
+ * not entered rendezvous yet then wait a bit.  The assumption is that any
+ * slave that has not rendezvoused after a reasonable time is never going to do
+ * so.  In this context, slave includes cpus that respond to the MCA rendezvous
+ * interrupt, as well as cpus that receive the INIT slave event.
+ */
+
+static void
+ia64_wait_for_slaves(int monarch)
+{
+	int c, wait = 0;
+	for_each_online_cpu(c) {
+		if (c == monarch)
+			continue;
+		if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
+			udelay(1000);		/* short wait first */
+			wait = 1;
+			break;
+		}
+	}
+	if (!wait)
+		return;
+	for_each_online_cpu(c) {
+		if (c == monarch)
+			continue;
+		if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
+			udelay(5*1000000);	/* wait 5 seconds for slaves (arbitrary) */
+			break;
+		}
+	}
+}
+
+static void
+mca_init_leave(int cpu)
+{
+	 while (cmpxchg_acq(&ia64_mca_init_leave, -1, cpu) != -1)
+		 cpu_relax();
+}
+
 /*
- * ia64_mca_ucmc_handler
+ * ia64_mca_handler
  *
  *	This is uncorrectable machine check handler called from OS_MCA
  *	dispatch code which is in turn called from SAL_CHECK().
@@ -866,16 +926,28 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension);
  *	further MCA logging is enabled by clearing logs.
  *	Monarch also has the duty of sending wakeup-IPIs to pull the
  *	slave processors out of rendezvous spinloop.
- *
- *  Inputs  :   None
- *  Outputs :   None
  */
 void
-ia64_mca_ucmc_handler(void)
+ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
+		 struct ia64_sal_os_state *sos)
 {
 	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
-		&ia64_sal_to_os_handoff_state.proc_state_param;
-	int recover; 
+		&sos->proc_state_param;
+	int recover, cpu = smp_processor_id();
+	task_t *previous_current;
+
+	oops_in_progress = 1;	/* FIXME: make printk NMI/MCA/INIT safe */
+	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
+	monarch_cpu = cpu;
+	ia64_wait_for_slaves(cpu);
+
+	/* Wakeup all the processors which are spinning in the rendezvous loop.
+	 * They will leave SAL, then spin in the OS with interrupts disabled
+	 * until this monarch cpu leaves the MCA handler.  That gets control
+	 * back to the OS so we can backtrace the other cpus, backtrace when
+	 * spinning in SAL does not work.
+	 */
+	ia64_mca_wakeup_all();
 
 	/* Get the MCA error record and log it */
 	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -883,25 +955,21 @@ ia64_mca_ucmc_handler(void)
 	/* TLB error is only exist in this SAL error record */
 	recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
 	/* other error recovery */
-	   || (ia64_mca_ucmc_extension 
+	   || (ia64_mca_ucmc_extension
 		&& ia64_mca_ucmc_extension(
 			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
-			&ia64_sal_to_os_handoff_state,
-			&ia64_os_to_sal_handoff_state)); 
+			sos));
 
 	if (recover) {
 		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
 		rh->severity = sal_log_severity_corrected;
 		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
+		sos->os_status = IA64_MCA_CORRECTED;
 	}
-	/*
-	 *  Wakeup all the processors which are spinning in the rendezvous
-	 *  loop.
-	 */
-	ia64_mca_wakeup_all();
 
-	/* Return to SAL */
-	ia64_return_to_sal_check(recover);
+	set_curr_task(cpu, previous_current);
+	mca_init_leave(cpu);
+	monarch_cpu = -1;
 }
 
 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
@@ -1125,34 +1193,87 @@ ia64_mca_cpe_poll (unsigned long dummy)
 /*
  * C portion of the OS INIT handler
  *
- * Called from ia64_monarch_init_handler
- *
- * Inputs: pointer to pt_regs where processor info was saved.
- *
- * Returns:
- *   0 if SAL must warm boot the System
- *   1 if SAL must return to interrupted context using PAL_MC_RESUME
+ * Called from ia64_os_init_dispatch
  *
+ * Inputs: pointer to pt_regs where processor info was saved.  SAL/OS state for
+ * this event.  This code is used for both monarch and slave INIT events, see
+ * sos->monarch.
+ *
+ * All INIT events switch to the INIT stack and change the previous process to
+ * blocked status.  If one of the INIT events is the monarch then we are
+ * probably processing the nmi button/command.  Use the monarch cpu to dump all
+ * the processes.  The slave INIT events all spin until the monarch cpu
+ * returns.  We can also get INIT slave events for MCA, in which case the MCA
+ * process is the monarch.
  */
+
 void
-ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
+ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
+		  struct ia64_sal_os_state *sos)
 {
-	pal_min_state_area_t *ms;
+	task_t *previous_current;
+	int cpu = smp_processor_id(), c;
+	struct task_struct *g, *t;
 
-	oops_in_progress = 1;	/* avoid deadlock in printk, but it makes recovery dodgy */
+	oops_in_progress = 1;	/* FIXME: make printk NMI/MCA/INIT safe */
 	console_loglevel = 15;	/* make sure printks make it to console */
 
 	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
-		ia64_sal_to_os_handoff_state.proc_state_param);
+		sos->proc_state_param);
+	salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
+
+	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
+	sos->os_status = IA64_INIT_RESUME;
+	if (!sos->monarch) {
+		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
+		while (monarch_cpu == -1)
+		       cpu_relax();	/* spin until monarch enters */
+		while (monarch_cpu != -1)
+		       cpu_relax();	/* spin until monarch leaves */
+		printk("slave returning %d\n", cpu);
+		set_curr_task(cpu, previous_current);
+		mca_init_leave(cpu);
+		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
+		return;
+	}
+
+	monarch_cpu = cpu;
 
 	/*
-	 * Address of minstate area provided by PAL is physical,
-	 * uncacheable (bit 63 set). Convert to Linux virtual
-	 * address in region 6.
+	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
+	 * generated via the BMC's command-line interface, but since the console is on the
+	 * same serial line, the user will need some time to switch out of the BMC before
+	 * the dump begins.
 	 */
-	ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
-
-	init_handler_platform(ms, pt, sw);	/* call platform specific routines */
+	printk("Delaying for 5 seconds...\n");
+	udelay(5*1000000);
+	ia64_wait_for_slaves(cpu);
+	printk(KERN_ERR "Processes interrupted by INIT -");
+	for_each_online_cpu(c) {
+		struct ia64_sal_os_state *s;
+		t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
+		s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
+		g = s->prev_task;
+		if (g) {
+			if (g->pid)
+				printk(" %d", g->pid);
+			else
+				printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
+		}
+	}
+	printk("\n\n");
+	if (read_trylock(&tasklist_lock)) {
+		do_each_thread (g, t) {
+			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
+			show_stack(t, NULL);
+		} while_each_thread (g, t);
+		read_unlock(&tasklist_lock);
+	}
+	printk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
+	set_curr_task(cpu, previous_current);
+	mca_init_leave(cpu);
+	monarch_cpu = -1;
+	return;
 }
 
 static int __init
@@ -1202,6 +1323,34 @@ static struct irqaction mca_cpep_irqacti
 };
 #endif /* CONFIG_ACPI */
 
+/* Minimal format of the MCA/INIT stacks.  The pseudo processes that run on
+ * these stacks can never sleep, they cannot return from the kernel to user
+ * space, they do not appear in a normal ps listing.  So there is no need to
+ * format most of the fields.
+ */
+
+static void
+format_mca_init_stack(void *mca_data, unsigned long offset,
+		const char *type, int cpu)
+{
+	struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
+	struct thread_info *ti;
+	memset(p, 0, KERNEL_STACK_SIZE);
+	ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE);
+	ti->flags = _TIF_MCA_INIT;
+	ti->preempt_count = 1;
+	ti->task = p;
+	ti->cpu = cpu;
+	p->thread_info = ti;
+	p->state = TASK_UNINTERRUPTIBLE;
+	__set_bit(cpu, &p->cpus_allowed);
+	INIT_LIST_HEAD(&p->tasks);
+	p->parent = p->real_parent = p->group_leader = p;
+	INIT_LIST_HEAD(&p->children);
+	INIT_LIST_HEAD(&p->sibling);
+	strncpy(p->comm, type, sizeof(p->comm)-1);
+}
+
 /* Do per-CPU MCA-related initialization.  */
 
 void __devinit
@@ -1214,19 +1363,28 @@ ia64_mca_cpu_init(void *cpu_data)
 		int cpu;
 
 		mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
-					 * NR_CPUS);
+					 * NR_CPUS + KERNEL_STACK_SIZE);
+		mca_data = (void *)(((unsigned long)mca_data +
+					KERNEL_STACK_SIZE - 1) &
+				(-KERNEL_STACK_SIZE));
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
+			format_mca_init_stack(mca_data,
+					offsetof(struct ia64_mca_cpu, mca_stack),
+					"MCA", cpu);
+			format_mca_init_stack(mca_data,
+					offsetof(struct ia64_mca_cpu, init_stack),
+					"INIT", cpu);
 			__per_cpu_mca[cpu] = __pa(mca_data);
 			mca_data += sizeof(struct ia64_mca_cpu);
 		}
 	}
 
-        /*
-         * The MCA info structure was allocated earlier and its
-         * physical address saved in __per_cpu_mca[cpu].  Copy that
-         * address * to ia64_mca_data so we can access it as a per-CPU
-         * variable.
-         */
+	/*
+	 * The MCA info structure was allocated earlier and its
+	 * physical address saved in __per_cpu_mca[cpu].  Copy that
+	 * address * to ia64_mca_data so we can access it as a per-CPU
+	 * variable.
+	 */
 	__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
 
 	/*
@@ -1236,11 +1394,11 @@ ia64_mca_cpu_init(void *cpu_data)
 	__get_cpu_var(ia64_mca_per_cpu_pte) =
 		pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
 
-        /*
-         * Also, stash away a copy of the PAL address and the PTE
-         * needed to map it.
-         */
-        pal_vaddr = efi_get_pal_addr();
+	/*
+	 * Also, stash away a copy of the PAL address and the PTE
+	 * needed to map it.
+	 */
+	pal_vaddr = efi_get_pal_addr();
 	if (!pal_vaddr)
 		return;
 	__get_cpu_var(ia64_mca_pal_base) =
@@ -1272,8 +1430,8 @@ ia64_mca_cpu_init(void *cpu_data)
 void __init
 ia64_mca_init(void)
 {
-	ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
-	ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
+	ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
+	ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
 	ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
 	int i;
 	s64 rc;
@@ -1351,9 +1509,9 @@ ia64_mca_init(void)
 	 * XXX - disable SAL checksum by setting size to 0, should be
 	 * size of the actual init handler in mca_asm.S.
 	 */
-	ia64_mc_info.imi_monarch_init_handler		= ia64_tpa(mon_init_ptr->fp);
+	ia64_mc_info.imi_monarch_init_handler		= ia64_tpa(init_hldlr_ptr_monarch->fp);
 	ia64_mc_info.imi_monarch_init_handler_size	= 0;
-	ia64_mc_info.imi_slave_init_handler		= ia64_tpa(slave_init_ptr->fp);
+	ia64_mc_info.imi_slave_init_handler		= ia64_tpa(init_hldlr_ptr_slave->fp);
 	ia64_mc_info.imi_slave_init_handler_size	= 0;
 
 	IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
diff -puN arch/ia64/kernel/mca_drv.c~git-ia64 arch/ia64/kernel/mca_drv.c
--- 25/arch/ia64/kernel/mca_drv.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/mca_drv.c	2005-09-01 05:34:24.000000000 -0600
@@ -4,6 +4,8 @@
  *
  * Copyright (C) 2004 FUJITSU LIMITED
  * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
+ * Copyright (C) 2005 Silicon Graphics, Inc
+ * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
  */
 #include <linux/config.h>
 #include <linux/types.h>
@@ -38,10 +40,6 @@
 /* max size of SAL error record (default) */
 static int sal_rec_max = 10000;
 
-/* from mca.c */
-static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state;
-static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state;
-
 /* from mca_drv_asm.S */
 extern void *mca_handler_bhhook(void);
 
@@ -316,7 +314,8 @@ init_record_index_pools(void)
  */
 
 static mca_type_t
-is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
+is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
+	      struct ia64_sal_os_state *sos)
 {
 	pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
 
@@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_
 	 * Therefore it is local MCA when rendezvous has not been requested.
 	 * Failed to rendezvous, the system must be down.
 	 */
-	switch (sal_to_os_handoff_state->imsto_rendez_state) {
+	switch (sos->rv_rc) {
 		case -1: /* SAL rendezvous unsuccessful */
 			return MCA_IS_GLOBAL;
 		case  0: /* SAL rendezvous not required */
@@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_
  */
 
 static int
-recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci)
+recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
+			struct ia64_sal_os_state *sos)
 {
 	sal_log_mod_error_info_t *smei;
 	pal_min_state_area_t *pmsa;
@@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *s
 			 *  setup for resume to bottom half of MCA,
 			 * "mca_handler_bhhook"
 			 */
-			pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61));
+			pmsa = sos->pal_min_state;
 			/* pass to bhhook as 1st argument (gr8) */
 			pmsa->pmsa_gr[8-1] = smei->target_identifier;
 			/* set interrupted return address (but no use) */
@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *s
  */
 
 static int
-recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci)
+recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
+			    struct ia64_sal_os_state *sos)
 {
 	int status = 0;
 	pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_
 		case 1: /* partial read */
 		case 3: /* full line(cpu) read */
 		case 9: /* I/O space read */
-			status = recover_from_read_error(slidx, peidx, pbci);
+			status = recover_from_read_error(slidx, peidx, pbci, sos);
 			break;
 		case 0: /* unknown */
 		case 2: /* partial write */
@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_
  */
 
 static int
-recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci)
+recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
+			     struct ia64_sal_os_state *sos)
 {
 	pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
 
@@ -545,7 +547,7 @@ recover_from_processor_error(int platfor
 	 * This means "there are some platform errors".
 	 */
 	if (platform) 
-		return recover_from_platform_error(slidx, peidx, pbci);
+		return recover_from_platform_error(slidx, peidx, pbci, sos);
 	/* 
 	 * On account of strange SAL error record, we cannot recover. 
 	 */
@@ -562,8 +564,7 @@ recover_from_processor_error(int platfor
 
 static int
 mca_try_to_recover(void *rec, 
-	ia64_mca_sal_to_os_state_t *sal_to_os_state,
-	ia64_mca_os_to_sal_state_t *os_to_sal_state)
+	struct ia64_sal_os_state *sos)
 {
 	int platform_err;
 	int n_proc_err;
@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec, 
 	peidx_table_t peidx;
 	pal_bus_check_info_t pbci;
 
-	/* handoff state from/to mca.c */
-	sal_to_os_handoff_state = sal_to_os_state;
-	os_to_sal_handoff_state = os_to_sal_state;
-
 	/* Make index of SAL error record */
 	platform_err = mca_make_slidx(rec, &slidx);
 
@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec, 
 	*((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
 
 	/* Check whether MCA is global or not */
-	if (is_mca_global(&peidx, &pbci))
+	if (is_mca_global(&peidx, &pbci, sos))
 		return 0;
 	
 	/* Try to recover a processor error */
-	return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci);
+	return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos);
 }
 
 /*
diff -puN arch/ia64/kernel/minstate.h~git-ia64 arch/ia64/kernel/minstate.h
--- 25/arch/ia64/kernel/minstate.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/minstate.h	2005-09-01 05:34:24.000000000 -0600
@@ -5,73 +5,6 @@
 #include "entry.h"
 
 /*
- * For ivt.s we want to access the stack virtually so we don't have to disable translation
- * on interrupts.
- *
- *  On entry:
- *	r1:	pointer to current task (ar.k6)
- */
-#define MINSTATE_START_SAVE_MIN_VIRT								\
-(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
-	;;											\
-(pUStk)	mov.m r24=ar.rnat;									\
-(pUStk)	addl r22=IA64_RBS_OFFSET,r1;			/* compute base of RBS */		\
-(pKStk) mov r1=sp;					/* get sp  */				\
-	;;											\
-(pUStk) lfetch.fault.excl.nt1 [r22];								\
-(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
-(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
-	;;											\
-(pUStk)	mov ar.bspstore=r22;				/* switch to kernel RBS */		\
-(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
-	;;											\
-(pUStk)	mov r18=ar.bsp;										\
-(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */
-
-#define MINSTATE_END_SAVE_MIN_VIRT								\
-	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
-	;;
-
-/*
- * For mca_asm.S we want to access the stack physically since the state is saved before we
- * go virtual and don't want to destroy the iip or ipsr.
- */
-#define MINSTATE_START_SAVE_MIN_PHYS								\
-(pKStk) mov r3=IA64_KR(PER_CPU_DATA);;								\
-(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;							\
-(pKStk) ld8 r3 = [r3];;										\
-(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;						\
-(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;						\
-(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
-(pUStk)	addl r22=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	\
-	;;											\
-(pUStk)	mov r24=ar.rnat;									\
-(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
-(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
-(pUStk)	dep r22=-1,r22,61,3;			/* compute kernel virtual addr of RBS */	\
-	;;											\
-(pUStk)	mov ar.bspstore=r22;			/* switch to kernel RBS */			\
-	;;											\
-(pUStk)	mov r18=ar.bsp;										\
-(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
-
-#define MINSTATE_END_SAVE_MIN_PHYS								\
-	dep r12=-1,r12,61,3;		/* make sp a kernel virtual address */			\
-	;;
-
-#ifdef MINSTATE_VIRT
-# define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT)
-# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_VIRT
-# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_VIRT
-#endif
-
-#ifdef MINSTATE_PHYS
-# define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT);; tpa reg=reg
-# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_PHYS
-# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_PHYS
-#endif
-
-/*
  * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  * the minimum state necessary that allows us to turn psr.ic back
  * on.
@@ -97,7 +30,7 @@
  * we can pass interruption state as arguments to a handler.
  */
 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)							\
-	MINSTATE_GET_CURRENT(r16);	/* M (or M;;I) */					\
+	mov r16=IA64_KR(CURRENT);	/* M */							\
 	mov r27=ar.rsc;			/* M */							\
 	mov r20=r1;			/* A */							\
 	mov r25=ar.unat;		/* M */							\
@@ -118,7 +51,21 @@
 	SAVE_IFS;										\
 	cmp.eq pKStk,pUStk=r0,r17;		/* are we in kernel mode already? */		\
 	;;											\
-	MINSTATE_START_SAVE_MIN									\
+(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
+	;;											\
+(pUStk)	mov.m r24=ar.rnat;									\
+(pUStk)	addl r22=IA64_RBS_OFFSET,r1;			/* compute base of RBS */		\
+(pKStk) mov r1=sp;					/* get sp  */				\
+	;;											\
+(pUStk) lfetch.fault.excl.nt1 [r22];								\
+(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
+(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
+	;;											\
+(pUStk)	mov ar.bspstore=r22;				/* switch to kernel RBS */		\
+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
+	;;											\
+(pUStk)	mov r18=ar.bsp;										\
+(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
 	adds r17=2*L1_CACHE_BYTES,r1;		/* really: biggest cache-line size */		\
 	adds r16=PT(CR_IPSR),r1;								\
 	;;											\
@@ -181,7 +128,8 @@
 	EXTRA;											\
 	movl r1=__gp;		/* establish kernel global pointer */				\
 	;;											\
-	MINSTATE_END_SAVE_MIN
+	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
+	;;
 
 /*
  * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
diff -puN arch/ia64/kernel/palinfo.c~git-ia64 arch/ia64/kernel/palinfo.c
--- 25/arch/ia64/kernel/palinfo.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/palinfo.c	2005-09-01 05:34:24.000000000 -0600
@@ -307,11 +307,9 @@ vm_info(char *page)
 
 	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
-		return 0;
-	}
+	} else {
 
-
-	p += sprintf(p,
+		p += sprintf(p,
 		     "Physical Address Space         : %d bits\n"
 		     "Virtual Address Space          : %d bits\n"
 		     "Protection Key Registers(PKR)  : %d\n"
@@ -319,92 +317,99 @@ vm_info(char *page)
 		     "Hash Tag ID                    : 0x%x\n"
 		     "Size of RR.rid                 : %d\n",
 		     vm_info_1.pal_vm_info_1_s.phys_add_size,
-		     vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1,
-		     vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id,
+		     vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
+		     vm_info_1.pal_vm_info_1_s.max_pkr+1,
+		     vm_info_1.pal_vm_info_1_s.key_size,
+		     vm_info_1.pal_vm_info_1_s.hash_tag_id,
 		     vm_info_2.pal_vm_info_2_s.rid_size);
+	}
 
-	if (ia64_pal_mem_attrib(&attrib) != 0)
-		return 0;
-
-	p += sprintf(p, "Supported memory attributes    : ");
-	sep = "";
-	for (i = 0; i < 8; i++) {
-		if (attrib & (1 << i)) {
-			p += sprintf(p, "%s%s", sep, mem_attrib[i]);
-			sep = ", ";
+	if (ia64_pal_mem_attrib(&attrib) == 0) {
+		p += sprintf(p, "Supported memory attributes    : ");
+		sep = "";
+		for (i = 0; i < 8; i++) {
+			if (attrib & (1 << i)) {
+				p += sprintf(p, "%s%s", sep, mem_attrib[i]);
+				sep = ", ";
+			}
 		}
+		p += sprintf(p, "\n");
 	}
-	p += sprintf(p, "\n");
 
 	if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
 		printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
-		return 0;
-	}
-
-	p += sprintf(p,
-		     "\nTLB walker                     : %simplemented\n"
-		     "Number of DTR                  : %d\n"
-		     "Number of ITR                  : %d\n"
-		     "TLB insertable page sizes      : ",
-		     vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
-		     vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
-		     vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
+	} else {
 
+		p += sprintf(p,
+			     "\nTLB walker                     : %simplemented\n"
+			     "Number of DTR                  : %d\n"
+			     "Number of ITR                  : %d\n"
+			     "TLB insertable page sizes      : ",
+			     vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
+			     vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
+			     vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
 
-	p = bitvector_process(p, tr_pages);
 
-	p += sprintf(p, "\nTLB purgeable page sizes       : ");
+		p = bitvector_process(p, tr_pages);
 
-	p = bitvector_process(p, vw_pages);
+		p += sprintf(p, "\nTLB purgeable page sizes       : ");
 
+		p = bitvector_process(p, vw_pages);
+	}
 	if ((status=ia64_get_ptce(&ptce)) != 0) {
 		printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
-		return 0;
-	}
-
-	p += sprintf(p,
+	} else {
+		p += sprintf(p,
 		     "\nPurge base address             : 0x%016lx\n"
 		     "Purge outer loop count         : %d\n"
 		     "Purge inner loop count         : %d\n"
 		     "Purge outer loop stride        : %d\n"
 		     "Purge inner loop stride        : %d\n",
-		     ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]);
+		     ptce.base, ptce.count[0], ptce.count[1],
+		     ptce.stride[0], ptce.stride[1]);
 
-	p += sprintf(p,
+		p += sprintf(p,
 		     "TC Levels                      : %d\n"
 		     "Unique TC(s)                   : %d\n",
 		     vm_info_1.pal_vm_info_1_s.num_tc_levels,
 		     vm_info_1.pal_vm_info_1_s.max_unique_tcs);
 
-	for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
-		for (j=2; j>0 ; j--) {
-			tc_pages = 0; /* just in case */
+		for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
+			for (j=2; j>0 ; j--) {
+				tc_pages = 0; /* just in case */
 
 
-			/* even without unification, some levels may not be present */
-			if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
-				continue;
-			}
+				/* even without unification, some levels may not be present */
+				if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
+					continue;
+				}
 
-			p += sprintf(p,
+				p += sprintf(p,
 				     "\n%s Translation Cache Level %d:\n"
 				     "\tHash sets           : %d\n"
 				     "\tAssociativity       : %d\n"
 				     "\tNumber of entries   : %d\n"
 				     "\tFlags               : ",
-				     cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets,
-				     tc_info.tc_associativity, tc_info.tc_num_entries);
-
-			if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized ");
-			if (tc_info.tc_unified) p += sprintf(p, "Unified ");
-			if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction");
-
-			p += sprintf(p, "\n\tSupported page sizes: ");
-
-			p = bitvector_process(p, tc_pages);
-
-			/* when unified date (j=2) is enough */
-			if (tc_info.tc_unified) break;
+				     cache_types[j+tc_info.tc_unified], i+1,
+				     tc_info.tc_num_sets,
+				     tc_info.tc_associativity,
+				     tc_info.tc_num_entries);
+
+				if (tc_info.tc_pf)
+					p += sprintf(p, "PreferredPageSizeOptimized ");
+				if (tc_info.tc_unified)
+					p += sprintf(p, "Unified ");
+				if (tc_info.tc_reduce_tr)
+					p += sprintf(p, "TCReduction");
+
+				p += sprintf(p, "\n\tSupported page sizes: ");
+
+				p = bitvector_process(p, tc_pages);
+
+				/* when unified date (j=2) is enough */
+				if (tc_info.tc_unified)
+					break;
+			}
 		}
 	}
 	p += sprintf(p, "\n");
@@ -440,14 +445,14 @@ register_info(char *page)
 		p += sprintf(p, "\n");
 	}
 
-	if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0;
+	if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
 
 	p += sprintf(p,
 		     "RSE stacked physical registers   : %ld\n"
 		     "RSE load/store hints             : %ld (%s)\n",
 		     phys_stacked, hints.ph_data,
 		     hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
-
+	}
 	if (ia64_pal_debug_info(&iregs, &dregs))
 		return 0;
 
diff -puN arch/ia64/kernel/salinfo.c~git-ia64 arch/ia64/kernel/salinfo.c
--- 25/arch/ia64/kernel/salinfo.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/salinfo.c	2005-09-01 05:34:24.000000000 -0600
@@ -22,6 +22,11 @@
  *
  * Dec  5 2004	kaos@sgi.com
  *   Standardize which records are cleared automatically.
+ *
+ * Aug 18 2005	kaos@sgi.com
+ *   mca.c may not pass a buffer, a NULL buffer just indicates that a new
+ *   record is available in SAL.
+ *   Replace some NR_CPUS by cpus_online, for hotplug cpu.
  */
 
 #include <linux/types.h>
@@ -193,7 +198,7 @@ shift1_data_saved (struct salinfo_data *
  * The buffer passed from mca.c points to the output from ia64_log_get. This is
  * a persistent buffer but its contents can change between the interrupt and
  * when user space processes the record.  Save the record id to identify
- * changes.
+ * changes.  If the buffer is NULL then just update the bitmap.
  */
 void
 salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
@@ -206,27 +211,29 @@ salinfo_log_wakeup(int type, u8 *buffer,
 
 	BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
 
-	if (irqsafe)
-		spin_lock_irqsave(&data_saved_lock, flags);
-	for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
-		if (!data_saved->buffer)
-			break;
-	}
-	if (i == saved_size) {
-		if (!data->saved_num) {
-			shift1_data_saved(data, 0);
-			data_saved = data->data_saved + saved_size - 1;
-		} else
-			data_saved = NULL;
-	}
-	if (data_saved) {
-		data_saved->cpu = smp_processor_id();
-		data_saved->id = ((sal_log_record_header_t *)buffer)->id;
-		data_saved->size = size;
-		data_saved->buffer = buffer;
+	if (buffer) {
+		if (irqsafe)
+			spin_lock_irqsave(&data_saved_lock, flags);
+		for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
+			if (!data_saved->buffer)
+				break;
+		}
+		if (i == saved_size) {
+			if (!data->saved_num) {
+				shift1_data_saved(data, 0);
+				data_saved = data->data_saved + saved_size - 1;
+			} else
+				data_saved = NULL;
+		}
+		if (data_saved) {
+			data_saved->cpu = smp_processor_id();
+			data_saved->id = ((sal_log_record_header_t *)buffer)->id;
+			data_saved->size = size;
+			data_saved->buffer = buffer;
+		}
+		if (irqsafe)
+			spin_unlock_irqrestore(&data_saved_lock, flags);
 	}
-	if (irqsafe)
-		spin_unlock_irqrestore(&data_saved_lock, flags);
 
 	if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) {
 		if (irqsafe)
@@ -244,7 +251,7 @@ salinfo_timeout_check(struct salinfo_dat
 	int i;
 	if (!data->open)
 		return;
-	for (i = 0; i < NR_CPUS; ++i) {
+	for_each_online_cpu(i) {
 		if (test_bit(i, &data->cpu_event)) {
 			/* double up() is not a problem, user space will see no
 			 * records for the additional "events".
@@ -291,7 +298,7 @@ retry:
 
 	n = data->cpu_check;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (test_bit(n, &data->cpu_event)) {
+		if (test_bit(n, &data->cpu_event) && cpu_online(n)) {
 			cpu = n;
 			break;
 		}
@@ -585,11 +592,10 @@ salinfo_init(void)
 
 		/* we missed any events before now */
 		online = 0;
-		for (j = 0; j < NR_CPUS; j++)
-			if (cpu_online(j)) {
-				set_bit(j, &data->cpu_event);
-				++online;
-			}
+		for_each_online_cpu(j) {
+			set_bit(j, &data->cpu_event);
+			++online;
+		}
 		sema_init(&data->sem, online);
 
 		*sdir++ = dir;
diff -puN arch/ia64/kernel/sys_ia64.c~git-ia64 arch/ia64/kernel/sys_ia64.c
--- 25/arch/ia64/kernel/sys_ia64.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/sys_ia64.c	2005-09-01 05:34:24.000000000 -0600
@@ -35,7 +35,7 @@ arch_get_unmapped_area (struct file *fil
 		return -ENOMEM;
 
 #ifdef CONFIG_HUGETLB_PAGE
-	if (REGION_NUMBER(addr) == REGION_HPAGE)
+	if (REGION_NUMBER(addr) == RGN_HPAGE)
 		addr = 0;
 #endif
 	if (!addr)
diff -puN arch/ia64/kernel/uncached.c~git-ia64 arch/ia64/kernel/uncached.c
--- 25/arch/ia64/kernel/uncached.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/uncached.c	2005-09-01 05:34:24.000000000 -0600
@@ -184,7 +184,7 @@ uncached_free_page(unsigned long maddr)
 {
 	int node;
 
-	node = nasid_to_cnodeid(NASID_GET(maddr));
+	node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET);
 
 	dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);
 
@@ -217,7 +217,7 @@ uncached_build_memmap(unsigned long star
 
 	memset((char *)vstart, 0, length);
 
-	node = nasid_to_cnodeid(NASID_GET(start));
+	node = paddr_to_nid(start);
 
 	for (; vstart < vend ; vstart += PAGE_SIZE) {
 		dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart);
diff -puN arch/ia64/kernel/unwind.c~git-ia64 arch/ia64/kernel/unwind.c
--- 25/arch/ia64/kernel/unwind.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/unwind.c	2005-09-01 05:34:24.000000000 -0600
@@ -2020,28 +2020,6 @@ init_frame_info (struct unw_frame_info *
 }
 
 void
-unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
-			    struct pt_regs *pt, struct switch_stack *sw)
-{
-	unsigned long sof;
-
-	init_frame_info(info, t, sw, pt->r12);
-	info->cfm_loc = &pt->cr_ifs;
-	info->unat_loc = &pt->ar_unat;
-	info->pfs_loc = &pt->ar_pfs;
-	sof = *info->cfm_loc & 0x7f;
-	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
-	info->ip = pt->cr_iip + ia64_psr(pt)->ri;
-	info->pt = (unsigned long) pt;
-	UNW_DPRINT(3, "unwind.%s:\n"
-		   "  bsp    0x%lx\n"
-		   "  sof    0x%lx\n"
-		   "  ip     0x%lx\n",
-		   __FUNCTION__, info->bsp, sof, info->ip);
-	find_save_locs(info);
-}
-
-void
 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
 {
 	unsigned long sol;
diff -puN arch/ia64/kernel/vmlinux.lds.S~git-ia64 arch/ia64/kernel/vmlinux.lds.S
--- 25/arch/ia64/kernel/vmlinux.lds.S~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/kernel/vmlinux.lds.S	2005-09-01 05:34:24.000000000 -0600
@@ -165,6 +165,7 @@ SECTIONS
   __init_end = .;
 
   /* The initial task and kernel stack */
+  . = ALIGN(KERNEL_STACK_SIZE);
   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
 	{ *(.data.init_task) }
 
diff -puN arch/ia64/lib/Makefile~git-ia64 arch/ia64/lib/Makefile
--- 25/arch/ia64/lib/Makefile~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/lib/Makefile	2005-09-01 05:34:24.000000000 -0600
@@ -6,7 +6,7 @@ obj-y := io.o
 
 lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o			\
 	__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o			\
-	bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o	\
+	bitop.o checksum.o clear_page.o csum_partial_copy.o		\
 	clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o	\
 	flush.o ip_fast_csum.o do_csum.o				\
 	memset.o strlen.o swiotlb.o
diff -puN arch/ia64/lib/swiotlb.c~git-ia64 arch/ia64/lib/swiotlb.c
--- 25/arch/ia64/lib/swiotlb.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/lib/swiotlb.c	2005-09-01 05:34:24.000000000 -0600
@@ -93,8 +93,7 @@ static int __init
 setup_io_tlb_npages(char *str)
 {
 	if (isdigit(*str)) {
-		io_tlb_nslabs = simple_strtoul(str, &str, 0) <<
-			(PAGE_SHIFT - IO_TLB_SHIFT);
+		io_tlb_nslabs = simple_strtoul(str, &str, 0);
 		/* avoid tail segment of size < IO_TLB_SEGSIZE */
 		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
 	}
@@ -117,7 +116,7 @@ swiotlb_init_with_default_size (size_t d
 	unsigned long i;
 
 	if (!io_tlb_nslabs) {
-		io_tlb_nslabs = (default_size >> PAGE_SHIFT);
+		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
 		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
 	}
 
diff -puN arch/ia64/mm/hugetlbpage.c~git-ia64 arch/ia64/mm/hugetlbpage.c
--- 25/arch/ia64/mm/hugetlbpage.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/mm/hugetlbpage.c	2005-09-01 05:34:24.000000000 -0600
@@ -76,7 +76,7 @@ int is_aligned_hugepage_range(unsigned l
 		return -EINVAL;
 	if (addr & ~HPAGE_MASK)
 		return -EINVAL;
-	if (REGION_NUMBER(addr) != REGION_HPAGE)
+	if (REGION_NUMBER(addr) != RGN_HPAGE)
 		return -EINVAL;
 
 	return 0;
@@ -87,7 +87,7 @@ struct page *follow_huge_addr(struct mm_
 	struct page *page;
 	pte_t *ptep;
 
-	if (REGION_NUMBER(addr) != REGION_HPAGE)
+	if (REGION_NUMBER(addr) != RGN_HPAGE)
 		return ERR_PTR(-EINVAL);
 
 	ptep = huge_pte_offset(mm, addr);
@@ -142,8 +142,8 @@ unsigned long hugetlb_get_unmapped_area(
 		return -ENOMEM;
 	if (len & ~HPAGE_MASK)
 		return -EINVAL;
-	/* This code assumes that REGION_HPAGE != 0. */
-	if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
+	/* This code assumes that RGN_HPAGE != 0. */
+	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
 		addr = HPAGE_REGION_BASE;
 	else
 		addr = ALIGN(addr, HPAGE_SIZE);
diff -puN arch/ia64/mm/init.c~git-ia64 arch/ia64/mm/init.c
--- 25/arch/ia64/mm/init.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/mm/init.c	2005-09-01 05:34:24.000000000 -0600
@@ -382,13 +382,22 @@ ia64_mmu_init (void *my_cpu_data)
 
 	if (impl_va_bits < 51 || impl_va_bits > 61)
 		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
+	/*
+	 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
+	 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
+	 * the test makes sure that our mapped space doesn't overlap the
+	 * unimplemented hole in the middle of the region.
+	 */
+	if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
+	    (mapped_space_bits > impl_va_bits - 1))
+		panic("Cannot build a big enough virtual-linear page table"
+		      " to cover mapped address space.\n"
+		      " Try using a smaller page size.\n");
+
 
 	/* place the VMLPT at the end of each page-table mapped region: */
 	pta = POW2(61) - POW2(vmlpt_bits);
 
-	if (POW2(mapped_space_bits) >= pta)
-		panic("mm/init: overlap between virtually mapped linear page table and "
-		      "mapped kernel space!");
 	/*
 	 * Set the (virtually mapped linear) page table address.  Bit
 	 * 8 selects between the short and long format, bits 2-7 the
diff -puN arch/ia64/pci/pci.c~git-ia64 arch/ia64/pci/pci.c
--- 25/arch/ia64/pci/pci.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/pci/pci.c	2005-09-01 05:34:24.000000000 -0600
@@ -24,7 +24,6 @@
 
 #include <asm/machvec.h>
 #include <asm/page.h>
-#include <asm/segment.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/sal.h>
diff -puN arch/ia64/sn/include/tio.h~git-ia64 arch/ia64/sn/include/tio.h
--- 25/arch/ia64/sn/include/tio.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/include/tio.h	2005-09-01 05:34:24.000000000 -0600
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef _ASM_IA64_SN_TIO_H
@@ -26,6 +26,10 @@
 #define TIO_ITTE_VALID_MASK	0x1
 #define TIO_ITTE_VALID_SHIFT	16
 
+#define TIO_ITTE_WIDGET(itte) \
+	(((itte) >> TIO_ITTE_WIDGET_SHIFT) & TIO_ITTE_WIDGET_MASK)
+#define TIO_ITTE_VALID(itte) \
+	(((itte) >> TIO_ITTE_VALID_SHIFT) & TIO_ITTE_VALID_MASK)
 
 #define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
         REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
diff -puN arch/ia64/sn/include/xtalk/hubdev.h~git-ia64 arch/ia64/sn/include/xtalk/hubdev.h
--- 25/arch/ia64/sn/include/xtalk/hubdev.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/include/xtalk/hubdev.h	2005-09-01 05:34:24.000000000 -0600
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
  */
 #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
 #define _ASM_IA64_SN_XTALK_HUBDEV_H
@@ -16,6 +16,9 @@
 #define IIO_ITTE_WIDGET_MASK    ((1<<IIO_ITTE_WIDGET_BITS)-1)
 #define IIO_ITTE_WIDGET_SHIFT   8
 
+#define IIO_ITTE_WIDGET(itte)	\
+	(((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK)
+
 /*
  * Use the top big window as a surrogate for the first small window
  */
@@ -34,7 +37,8 @@ struct sn_flush_device_list {
 	unsigned long sfdl_force_int_addr;
 	unsigned long sfdl_flush_value;
 	volatile unsigned long *sfdl_flush_addr;
-	uint64_t sfdl_persistent_busnum;
+	uint32_t sfdl_persistent_busnum;
+	uint32_t sfdl_persistent_segment;
 	struct pcibus_info *sfdl_pcibus_info;
 	spinlock_t sfdl_flush_lock;
 };
@@ -58,7 +62,8 @@ struct hubdev_info {
 
 	void				*hdi_nodepda;
 	void				*hdi_node_vertex;
-	void				*hdi_xtalk_vertex;
+	uint32_t			max_segment_number;
+	uint32_t			max_pcibus_number;
 };
 
 extern void hubdev_init_node(nodepda_t *, cnodeid_t);
diff -puN arch/ia64/sn/kernel/bte.c~git-ia64 arch/ia64/sn/kernel/bte.c
--- 25/arch/ia64/sn/kernel/bte.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/bte.c	2005-09-01 05:34:24.000000000 -0600
@@ -29,16 +29,30 @@
 
 /* two interfaces on two btes */
 #define MAX_INTERFACES_TO_TRY		4
+#define MAX_NODES_TO_TRY		2
 
 static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
 {
 	nodepda_t *tmp_nodepda;
 
+	if (nasid_to_cnodeid(nasid) == -1)
+		return (struct bteinfo_s *)NULL;;
+
 	tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
 	return &tmp_nodepda->bte_if[interface];
 
 }
 
+static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode)
+{
+	if (is_shub2()) {
+		BTE_CTRL_STORE(bte, (IBLS_BUSY | ((len) | (mode) << 24)));
+	} else {
+		BTE_LNSTAT_STORE(bte, len);
+		BTE_CTRL_STORE(bte, mode);
+	}
+}
+
 /************************************************************************
  * Block Transfer Engine copy related functions.
  *
@@ -67,13 +81,15 @@ bte_result_t bte_copy(u64 src, u64 dest,
 {
 	u64 transfer_size;
 	u64 transfer_stat;
+	u64 notif_phys_addr;
 	struct bteinfo_s *bte;
 	bte_result_t bte_status;
 	unsigned long irq_flags;
 	unsigned long itc_end = 0;
-	struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
-	int bte_if_index;
-	int bte_pri, bte_sec;
+	int nasid_to_try[MAX_NODES_TO_TRY];
+	int my_nasid = get_nasid();
+	int bte_if_index, nasid_index;
+	int bte_first, btes_per_node = BTES_PER_NODE;
 
 	BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
 		    src, dest, len, mode, notification));
@@ -86,36 +102,26 @@ bte_result_t bte_copy(u64 src, u64 dest,
 		 (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK));
 	BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT)));
 
-	/* CPU 0 (per node) tries bte0 first, CPU 1 try bte1 first */
-	if (cpuid_to_subnode(smp_processor_id()) == 0) {
-		bte_pri = 0;
-		bte_sec = 1;
-	} else {
-		bte_pri = 1;
-		bte_sec = 0;
-	}
+	/*
+	 * Start with interface corresponding to cpu number
+	 */
+	bte_first = raw_smp_processor_id() % btes_per_node;
 
 	if (mode & BTE_USE_DEST) {
 		/* try remote then local */
-		btes_to_try[0] = bte_if_on_node(NASID_GET(dest), bte_pri);
-		btes_to_try[1] = bte_if_on_node(NASID_GET(dest), bte_sec);
+		nasid_to_try[0] = NASID_GET(dest);
 		if (mode & BTE_USE_ANY) {
-			btes_to_try[2] = bte_if_on_node(get_nasid(), bte_pri);
-			btes_to_try[3] = bte_if_on_node(get_nasid(), bte_sec);
+			nasid_to_try[1] = my_nasid;
 		} else {
-			btes_to_try[2] = NULL;
-			btes_to_try[3] = NULL;
+			nasid_to_try[1] = (int)NULL;
 		}
 	} else {
 		/* try local then remote */
-		btes_to_try[0] = bte_if_on_node(get_nasid(), bte_pri);
-		btes_to_try[1] = bte_if_on_node(get_nasid(), bte_sec);
+		nasid_to_try[0] = my_nasid;
 		if (mode & BTE_USE_ANY) {
-			btes_to_try[2] = bte_if_on_node(NASID_GET(dest), bte_pri);
-			btes_to_try[3] = bte_if_on_node(NASID_GET(dest), bte_sec);
+			nasid_to_try[1] = NASID_GET(dest);
 		} else {
-			btes_to_try[2] = NULL;
-			btes_to_try[3] = NULL;
+			nasid_to_try[1] = (int)NULL;
 		}
 	}
 
@@ -123,11 +129,12 @@ retry_bteop:
 	do {
 		local_irq_save(irq_flags);
 
-		bte_if_index = 0;
+		bte_if_index = bte_first;
+		nasid_index = 0;
 
 		/* Attempt to lock one of the BTE interfaces. */
-		while (bte_if_index < MAX_INTERFACES_TO_TRY) {
-			bte = btes_to_try[bte_if_index++];
+		while (nasid_index < MAX_NODES_TO_TRY) {
+			bte = bte_if_on_node(nasid_to_try[nasid_index],bte_if_index);
 
 			if (bte == NULL) {
 				continue;
@@ -143,6 +150,15 @@ retry_bteop:
 					break;
 				}
 			}
+
+			bte_if_index = (bte_if_index + 1) % btes_per_node; /* Next interface */
+			if (bte_if_index == bte_first) {
+				/*
+				 * We've tried all interfaces on this node
+				 */
+				nasid_index++;
+			}
+
 			bte = NULL;
 		}
 
@@ -169,7 +185,13 @@ retry_bteop:
 
 	/* Initialize the notification to a known value. */
 	*bte->most_rcnt_na = BTE_WORD_BUSY;
+	notif_phys_addr = TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na));
 
+	if (is_shub2()) {
+		src = SH2_TIO_PHYS_TO_DMA(src);
+		dest = SH2_TIO_PHYS_TO_DMA(dest);
+		notif_phys_addr = SH2_TIO_PHYS_TO_DMA(notif_phys_addr);
+	}
 	/* Set the source and destination registers */
 	BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
 	BTE_SRC_STORE(bte, TO_PHYS(src));
@@ -177,14 +199,12 @@ retry_bteop:
 	BTE_DEST_STORE(bte, TO_PHYS(dest));
 
 	/* Set the notification register */
-	BTE_PRINTKV(("IBNA = 0x%lx)\n",
-		     TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
-	BTE_NOTIF_STORE(bte,
-			TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
+	BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr));
+	BTE_NOTIF_STORE(bte, notif_phys_addr);
 
 	/* Initiate the transfer */
 	BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
-	BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode));
+	bte_start_transfer(bte, transfer_size, BTE_VALID_MODE(mode));
 
 	itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
 
@@ -195,6 +215,7 @@ retry_bteop:
 	}
 
 	while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) {
+		cpu_relax();
 		if (ia64_get_itc() > itc_end) {
 			BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n",
 				NASID_GET(bte->bte_base_addr), bte->bte_num,
diff -puN arch/ia64/sn/kernel/huberror.c~git-ia64 arch/ia64/sn/kernel/huberror.c
--- 25/arch/ia64/sn/kernel/huberror.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/huberror.c	2005-09-01 05:34:24.000000000 -0600
@@ -76,7 +76,7 @@ void hubiio_crb_free(struct hubdev_info 
 	 */
 	REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
 	while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
-		udelay(1);
+		cpu_relax();
 
 }
 
diff -puN arch/ia64/sn/kernel/io_init.c~git-ia64 arch/ia64/sn/kernel/io_init.c
--- 25/arch/ia64/sn/kernel/io_init.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/io_init.c	2005-09-01 05:34:24.000000000 -0600
@@ -18,6 +18,7 @@
 #include <asm/sn/simulator.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/tioca_provider.h>
+#include <asm/sn/tioce_provider.h>
 #include "xtalk/hubdev.h"
 #include "xtalk/xwidgetdev.h"
 
@@ -44,6 +45,9 @@ int sn_ioif_inited = 0;		/* SN I/O infra
 
 struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES];	/* indexed by asic type */
 
+static int max_segment_number = 0; /* Default highest segment number */
+static int max_pcibus_number = 255; /* Default highest pci bus number */
+
 /*
  * Hooks and struct for unsupported pci providers
  */
@@ -157,13 +161,28 @@ static void sn_fixup_ionodes(void)
 	uint64_t nasid;
 	int i, widget;
 
+	/*
+	 * Get SGI Specific HUB chipset information.
+	 * Inform Prom that this kernel can support domain bus numbering.
+	 */
 	for (i = 0; i < numionodes; i++) {
 		hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
 		nasid = cnodeid_to_nasid(i);
+		hubdev->max_segment_number = 0xffffffff;
+		hubdev->max_pcibus_number = 0xff;
 		status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev));
 		if (status)
 			continue;
 
+		/* Save the largest Domain and pcibus numbers found. */
+		if (hubdev->max_segment_number) {
+			/*
+			 * Dealing with a Prom that supports segments.
+			 */
+			max_segment_number = hubdev->max_segment_number;
+			max_pcibus_number = hubdev->max_pcibus_number;
+		}
+
 		/* Attach the error interrupt handlers */
 		if (nasid & 1)
 			ice_error_init(hubdev);
@@ -230,7 +249,7 @@ void sn_pci_unfixup_slot(struct pci_dev 
 void sn_pci_fixup_slot(struct pci_dev *dev)
 {
 	int idx;
-	int segment = 0;
+	int segment = pci_domain_nr(dev->bus);
 	int status = 0;
 	struct pcibus_bussoft *bs;
  	struct pci_bus *host_pci_bus;
@@ -283,9 +302,9 @@ void sn_pci_fixup_slot(struct pci_dev *d
  	 * PCI host_pci_dev struct and set up host bus linkages
  	 */
 
- 	bus_no = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32;
+ 	bus_no = (SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32) & 0xff;
  	devfn = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle & 0xffffffff;
- 	host_pci_bus = pci_find_bus(pci_domain_nr(dev->bus), bus_no);
+ 	host_pci_bus = pci_find_bus(segment, bus_no);
  	host_pci_dev = pci_get_slot(host_pci_bus, devfn);
 
 	SN_PCIDEV_INFO(dev)->host_pci_dev = host_pci_dev;
@@ -333,6 +352,7 @@ void sn_pci_controller_fixup(int segment
 	prom_bussoft_ptr = __va(prom_bussoft_ptr);
 
  	controller = kcalloc(1,sizeof(struct pci_controller), GFP_KERNEL);
+	controller->segment = segment;
  	if (!controller)
  		BUG();
 
@@ -390,7 +410,7 @@ void sn_pci_controller_fixup(int segment
 	if (controller->node >= num_online_nodes()) {
 		struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
 
-		printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%lu"
+		printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
 				    "L_IO=%lx L_MEM=%lx BASE=%lx\n",
 			b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
 			b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
@@ -445,6 +465,7 @@ sn_sysdata_free_start:
 static int __init sn_pci_init(void)
 {
 	int i = 0;
+	int j = 0;
 	struct pci_dev *pci_dev = NULL;
 	extern void sn_init_cpei_timer(void);
 #ifdef CONFIG_PROC_FS
@@ -464,6 +485,7 @@ static int __init sn_pci_init(void)
 
 	pcibr_init_provider();
 	tioca_init_provider();
+	tioce_init_provider();
 
 	/*
 	 * This is needed to avoid bounce limit checks in the blk layer
@@ -479,8 +501,9 @@ static int __init sn_pci_init(void)
 #endif
 
 	/* busses are not known yet ... */
-	for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
-		sn_pci_controller_fixup(0, i, NULL);
+	for (i = 0; i <= max_segment_number; i++)
+		for (j = 0; j <= max_pcibus_number; j++)
+			sn_pci_controller_fixup(i, j, NULL);
 
 	/*
 	 * Generic Linux PCI Layer has created the pci_bus and pci_dev 
diff -puN arch/ia64/sn/kernel/irq.c~git-ia64 arch/ia64/sn/kernel/irq.c
--- 25/arch/ia64/sn/kernel/irq.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/irq.c	2005-09-01 05:34:24.000000000 -0600
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
 #include <linux/irq.h>
@@ -76,16 +76,14 @@ static void sn_enable_irq(unsigned int i
 
 static void sn_ack_irq(unsigned int irq)
 {
-	uint64_t event_occurred, mask = 0;
-	int nasid;
+	u64 event_occurred, mask = 0;
 
 	irq = irq & 0xff;
-	nasid = get_nasid();
 	event_occurred =
-	    HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
+	    HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
 	mask = event_occurred & SH_ALL_INT_MASK;
-	HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
-		 mask);
+	HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
+	      mask);
 	__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
 
 	move_irq(irq);
@@ -93,15 +91,12 @@ static void sn_ack_irq(unsigned int irq)
 
 static void sn_end_irq(unsigned int irq)
 {
-	int nasid;
 	int ivec;
-	uint64_t event_occurred;
+	u64 event_occurred;
 
 	ivec = irq & 0xff;
 	if (ivec == SGI_UART_VECTOR) {
-		nasid = get_nasid();
-		event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
-				       (nasid, SH_EVENT_OCCURRED));
+		event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
 		/* If the UART bit is set here, we may have received an
 		 * interrupt from the UART that the driver missed.  To
 		 * make sure, we IPI ourselves to force us to look again.
@@ -132,6 +127,7 @@ static void sn_set_affinity_irq(unsigned
 		int local_widget, status;
 		nasid_t local_nasid;
 		struct sn_irq_info *new_irq_info;
+		struct sn_pcibus_provider *pci_provider;
 
 		new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
 		if (new_irq_info == NULL)
@@ -171,8 +167,9 @@ static void sn_set_affinity_irq(unsigned
 		new_irq_info->irq_cpuid = cpuid;
 		register_intr_pda(new_irq_info);
 
-		if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type))
-			pcibr_change_devices_irq(new_irq_info);
+		pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
+		if (pci_provider && pci_provider->target_interrupt)
+			(pci_provider->target_interrupt)(new_irq_info);
 
 		spin_lock(&sn_irq_info_lock);
 		list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
@@ -317,6 +314,16 @@ void sn_irq_unfixup(struct pci_dev *pci_
 	pci_dev_put(pci_dev);
 }
 
+static inline void
+sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
+{
+	struct sn_pcibus_provider *pci_provider;
+
+	pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
+	if (pci_provider && pci_provider->force_interrupt)
+		(*pci_provider->force_interrupt)(sn_irq_info);
+}
+
 static void force_interrupt(int irq)
 {
 	struct sn_irq_info *sn_irq_info;
@@ -325,11 +332,9 @@ static void force_interrupt(int irq)
 		return;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) {
-		if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
-		    (sn_irq_info->irq_bridge != NULL))
-			pcibr_force_interrupt(sn_irq_info);
-	}
+	list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
+		sn_call_force_intr_provider(sn_irq_info);
+
 	rcu_read_unlock();
 }
 
@@ -351,6 +356,14 @@ static void sn_check_intr(int irq, struc
 	struct pcidev_info *pcidev_info;
 	struct pcibus_info *pcibus_info;
 
+	/*
+	 * Bridge types attached to TIO (anything but PIC) do not need this WAR
+	 * since they do not target Shub II interrupt registers.  If that
+	 * ever changes, this check needs to accomodate.
+	 */
+	if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
+		return;
+
 	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
 	if (!pcidev_info)
 		return;
@@ -377,16 +390,12 @@ static void sn_check_intr(int irq, struc
 		break;
 	}
 	if (!test_bit(irr_bit, &irr_reg)) {
-		if (!test_bit(irq, pda->sn_soft_irr)) {
-			if (!test_bit(irq, pda->sn_in_service_ivecs)) {
-				regval &= 0xff;
-				if (sn_irq_info->irq_int_bit & regval &
-				    sn_irq_info->irq_last_intr) {
-					regval &=
-					    ~(sn_irq_info->
-					      irq_int_bit & regval);
-					pcibr_force_interrupt(sn_irq_info);
-				}
+		if (!test_bit(irq, pda->sn_in_service_ivecs)) {
+			regval &= 0xff;
+			if (sn_irq_info->irq_int_bit & regval &
+			    sn_irq_info->irq_last_intr) {
+				regval &= ~(sn_irq_info->irq_int_bit & regval);
+				sn_call_force_intr_provider(sn_irq_info);
 			}
 		}
 	}
@@ -404,13 +413,7 @@ void sn_lb_int_war_check(void)
 	rcu_read_lock();
 	for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
 		list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
-			/*
-			 * Only call for PCI bridges that are fully
-			 * initialized.
-			 */
-			if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
-			    (sn_irq_info->irq_bridge != NULL))
-				sn_check_intr(i, sn_irq_info);
+			sn_check_intr(i, sn_irq_info);
 		}
 	}
 	rcu_read_unlock();
diff -puN arch/ia64/sn/kernel/setup.c~git-ia64 arch/ia64/sn/kernel/setup.c
--- 25/arch/ia64/sn/kernel/setup.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/setup.c	2005-09-01 05:34:24.000000000 -0600
@@ -49,6 +49,7 @@
 #include <asm/sn/clksupport.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/geo.h>
+#include <asm/sn/sn_feature_sets.h>
 #include "xtalk/xwidgetdev.h"
 #include "xtalk/hubdev.h"
 #include <asm/sn/klconfig.h>
@@ -80,8 +81,6 @@ EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_na
 DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
 EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
 
-partid_t sn_partid = -1;
-EXPORT_SYMBOL(sn_partid);
 char sn_system_serial_number_string[128];
 EXPORT_SYMBOL(sn_system_serial_number_string);
 u64 sn_partition_serial_number;
@@ -99,6 +98,7 @@ EXPORT_SYMBOL(sn_region_size);
 int sn_prom_type;	/* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
 
 short physical_node_map[MAX_PHYSNODE_ID];
+static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
 
 EXPORT_SYMBOL(physical_node_map);
 
@@ -273,7 +273,10 @@ void __init sn_setup(char **cmdline_p)
 	u32 version = sn_sal_rev();
 	extern void sn_cpu_init(void);
 
-	ia64_sn_plat_set_error_handling_features();
+	ia64_sn_plat_set_error_handling_features();	// obsolete
+	ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
+	ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
+
 
 #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
 	/*
@@ -316,16 +319,6 @@ void __init sn_setup(char **cmdline_p)
 
 	printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
 
-	/*
-	 * Confirm the SAL we're running on is recent enough...
-	 */
-	if (version < SN_SAL_MIN_VERSION) {
-		printk(KERN_ERR "This kernel needs SGI SAL version >= "
-		       "%x.%02x\n", SN_SAL_MIN_VERSION >> 8,
-		        SN_SAL_MIN_VERSION & 0x00FF);
-		panic("PROM version too old\n");
-	}
-
 	master_nasid = boot_get_nasid();
 
 	status =
@@ -403,6 +396,7 @@ static void __init sn_init_pdas(char **c
 		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
 		memset(nodepdaindr[cnode]->phys_cpuid, -1,
 		    sizeof(nodepdaindr[cnode]->phys_cpuid));
+		spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
 	}
 
 	/*
@@ -481,6 +475,10 @@ void __init sn_cpu_init(void)
 	if (nodepdaindr[0] == NULL)
 		return;
 
+	for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
+		if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
+			break;
+
 	cpuid = smp_processor_id();
 	cpuphyid = get_sapicid();
 
@@ -532,8 +530,8 @@ void __init sn_cpu_init(void)
 	 */
 	{
 		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
-		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1,
-			SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
+		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
+			SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
 		u64 *pio;
 		pio = is_shub1() ? pio1 : pio2;
 		pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
@@ -652,3 +650,12 @@ nasid_slice_to_cpuid(int nasid, int slic
 
 	return -1;
 }
+
+int sn_prom_feature_available(int id)
+{
+	if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS)
+		return 0;
+	return test_bit(id, sn_prom_features);
+}
+EXPORT_SYMBOL(sn_prom_feature_available);
+
diff -puN arch/ia64/sn/kernel/sn2/ptc_deadlock.S~git-ia64 arch/ia64/sn/kernel/sn2/ptc_deadlock.S
--- 25/arch/ia64/sn/kernel/sn2/ptc_deadlock.S~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/sn2/ptc_deadlock.S	2005-09-01 05:34:24.000000000 -0600
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
  */
 
 #include <asm/types.h>
@@ -11,7 +11,7 @@
 
 #define DEADLOCKBIT	SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
 #define WRITECOUNTMASK	SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
-#define ALIAS_OFFSET	(SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0)
+#define ALIAS_OFFSET	8
 
 
 	.global	sn2_ptc_deadlock_recovery_core
@@ -36,13 +36,15 @@ sn2_ptc_deadlock_recovery_core:
 	extr.u	piowcphy=piowc,0,61;;	// Convert piowc to uncached physical address
 	dep	piowcphy=-1,piowcphy,63,1
 	movl	mask=WRITECOUNTMASK
+	mov	r8=r0
 
 1:
 	add	scr2=ALIAS_OFFSET,piowc	// Address of WRITE_STATUS alias register 
-	mov	scr1=7;;		// Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
-	st8.rel	[scr2]=scr1;;
+	;;
+	ld8.acq	scr1=[scr2];;
 
 5:	ld8.acq	scr1=[piowc];;		// Wait for PIOs to complete.
+	hint	@pause
 	and	scr2=scr1,mask;;	// mask of writecount bits
 	cmp.ne	p6,p0=zeroval,scr2
 (p6)	br.cond.sptk 5b
@@ -57,6 +59,7 @@ sn2_ptc_deadlock_recovery_core:
 	st8.rel [ptc0]=data0		// Write PTC0 & wait for completion.
 
 5:	ld8.acq	scr1=[piowcphy];;	// Wait for PIOs to complete.
+	hint	@pause
 	and	scr2=scr1,mask;;	// mask of writecount bits
 	cmp.ne	p6,p0=zeroval,scr2
 (p6)	br.cond.sptk 5b;;
@@ -67,6 +70,7 @@ sn2_ptc_deadlock_recovery_core:
 (p7)	st8.rel [ptc1]=data1;;		// Now write PTC1.
 
 5:	ld8.acq	scr1=[piowcphy];;	// Wait for PIOs to complete.
+	hint	@pause
 	and	scr2=scr1,mask;;	// mask of writecount bits
 	cmp.ne	p6,p0=zeroval,scr2
 (p6)	br.cond.sptk 5b
@@ -77,6 +81,7 @@ sn2_ptc_deadlock_recovery_core:
 	srlz.i;;
 	////////////// END   PHYSICAL MODE ////////////////////
 
+(p8)	add	r8=1,r8
 (p8)	br.cond.spnt 1b;;		// Repeat if DEADLOCK occurred.
 
 	br.ret.sptk	rp
diff -puN arch/ia64/sn/kernel/sn2/sn2_smp.c~git-ia64 arch/ia64/sn/kernel/sn2/sn2_smp.c
--- 25/arch/ia64/sn/kernel/sn2/sn2_smp.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/sn2/sn2_smp.c	2005-09-01 05:34:24.000000000 -0600
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -20,6 +20,8 @@
 #include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/nodemask.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 
 #include <asm/processor.h>
 #include <asm/irq.h>
@@ -39,12 +41,120 @@
 #include <asm/sn/nodepda.h>
 #include <asm/sn/rw_mmr.h>
 
-void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0, 
-	volatile unsigned long *, unsigned long data1);
+DEFINE_PER_CPU(struct ptc_stats, ptcstats);
+DECLARE_PER_CPU(struct ptc_stats, ptcstats);
 
 static  __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
 
-static unsigned long sn2_ptc_deadlock_count;
+void sn2_ptc_deadlock_recovery(short *, short, int, volatile unsigned long *, unsigned long data0,
+	volatile unsigned long *, unsigned long data1);
+
+#ifdef DEBUG_PTC
+/*
+ * ptctest:
+ *
+ * 	xyz - 3 digit hex number:
+ * 		x - Force PTC purges to use shub:
+ * 			0 - no force
+ * 			1 - force
+ * 		y - interupt enable
+ * 			0 - disable interrupts
+ * 			1 - leave interuupts enabled
+ * 		z - type of lock:
+ * 			0 - global lock
+ * 			1 - node local lock
+ * 			2 - no lock
+ *
+ *   	Note: on shub1, only ptctest == 0 is supported. Don't try other values!
+ */
+
+static unsigned int sn2_ptctest = 0;
+
+static int __init ptc_test(char *str)
+{
+	get_option(&str, &sn2_ptctest);
+	return 1;
+}
+__setup("ptctest=", ptc_test);
+
+static inline int ptc_lock(unsigned long *flagp)
+{
+	unsigned long opt = sn2_ptctest & 255;
+
+	switch (opt) {
+	case 0x00:
+		spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
+		break;
+	case 0x01:
+		spin_lock_irqsave(&sn_nodepda->ptc_lock, *flagp);
+		break;
+	case 0x02:
+		local_irq_save(*flagp);
+		break;
+	case 0x10:
+		spin_lock(&sn2_global_ptc_lock);
+		break;
+	case 0x11:
+		spin_lock(&sn_nodepda->ptc_lock);
+		break;
+	case 0x12:
+		break;
+	default:
+		BUG();
+	}
+	return opt;
+}
+
+static inline void ptc_unlock(unsigned long flags, int opt)
+{
+	switch (opt) {
+	case 0x00:
+		spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+		break;
+	case 0x01:
+		spin_unlock_irqrestore(&sn_nodepda->ptc_lock, flags);
+		break;
+	case 0x02:
+		local_irq_restore(flags);
+		break;
+	case 0x10:
+		spin_unlock(&sn2_global_ptc_lock);
+		break;
+	case 0x11:
+		spin_unlock(&sn_nodepda->ptc_lock);
+		break;
+	case 0x12:
+		break;
+	default:
+		BUG();
+	}
+}
+#else
+
+#define sn2_ptctest	0
+
+static inline int ptc_lock(unsigned long *flagp)
+{
+	spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
+	return 0;
+}
+
+static inline void ptc_unlock(unsigned long flags, int opt)
+{
+	spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+}
+#endif
+
+struct ptc_stats {
+	unsigned long ptc_l;
+	unsigned long change_rid;
+	unsigned long shub_ptc_flushes;
+	unsigned long nodes_flushed;
+	unsigned long deadlocks;
+	unsigned long lock_itc_clocks;
+	unsigned long shub_itc_clocks;
+	unsigned long shub_itc_clocks_max;
+};
 
 static inline unsigned long wait_piowc(void)
 {
@@ -89,9 +199,9 @@ void
 sn2_global_tlb_purge(unsigned long start, unsigned long end,
 		     unsigned long nbits)
 {
-	int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
+	int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
 	volatile unsigned long *ptc0, *ptc1;
-	unsigned long flags = 0, data0 = 0, data1 = 0;
+	unsigned long itc, itc2, flags, data0 = 0, data1 = 0;
 	struct mm_struct *mm = current->active_mm;
 	short nasids[MAX_NUMNODES], nix;
 	nodemask_t nodes_flushed;
@@ -114,16 +224,19 @@ sn2_global_tlb_purge(unsigned long start
 			start += (1UL << nbits);
 		} while (start < end);
 		ia64_srlz_i();
+		__get_cpu_var(ptcstats).ptc_l++;
 		preempt_enable();
 		return;
 	}
 
 	if (atomic_read(&mm->mm_users) == 1) {
 		flush_tlb_mm(mm);
+		__get_cpu_var(ptcstats).change_rid++;
 		preempt_enable();
 		return;
 	}
 
+	itc = ia64_get_itc();
 	nix = 0;
 	for_each_node_mask(cnode, nodes_flushed)
 		nasids[nix++] = cnodeid_to_nasid(cnode);
@@ -148,7 +261,12 @@ sn2_global_tlb_purge(unsigned long start
 
 	mynasid = get_nasid();
 
-	spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+	itc = ia64_get_itc();
+	opt = ptc_lock(&flags);
+	itc2 = ia64_get_itc();
+	__get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
+	__get_cpu_var(ptcstats).shub_ptc_flushes++;
+	__get_cpu_var(ptcstats).nodes_flushed += nix;
 
 	do {
 		if (shub1)
@@ -157,7 +275,7 @@ sn2_global_tlb_purge(unsigned long start
 			data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
 		for (i = 0; i < nix; i++) {
 			nasid = nasids[i];
-			if (unlikely(nasid == mynasid)) {
+			if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid)) {
 				ia64_ptcga(start, nbits << 2);
 				ia64_srlz_i();
 			} else {
@@ -169,18 +287,22 @@ sn2_global_tlb_purge(unsigned long start
 				flushed = 1;
 			}
 		}
-
 		if (flushed
 		    && (wait_piowc() &
-			SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) {
-			sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1);
+				(SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK))) {
+			sn2_ptc_deadlock_recovery(nasids, nix, mynasid, ptc0, data0, ptc1, data1);
 		}
 
 		start += (1UL << nbits);
 
 	} while (start < end);
 
-	spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+	itc2 = ia64_get_itc() - itc2;
+	__get_cpu_var(ptcstats).shub_itc_clocks += itc2;
+	if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
+		__get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
+
+	ptc_unlock(flags, opt);
 
 	preempt_enable();
 }
@@ -192,31 +314,29 @@ sn2_global_tlb_purge(unsigned long start
  * TLB flush transaction.  The recovery sequence is somewhat tricky & is
  * coded in assembly language.
  */
-void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0,
+void sn2_ptc_deadlock_recovery(short *nasids, short nix, int mynasid, volatile unsigned long *ptc0, unsigned long data0,
 	volatile unsigned long *ptc1, unsigned long data1)
 {
 	extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
 	        volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
-	int cnode, mycnode, nasid;
-	volatile unsigned long *piows;
-	volatile unsigned long zeroval;
+	short nasid, i;
+	unsigned long *piows, zeroval;
 
-	sn2_ptc_deadlock_count++;
+	__get_cpu_var(ptcstats).deadlocks++;
 
-	piows = pda->pio_write_status_addr;
+	piows = (unsigned long *) pda->pio_write_status_addr;
 	zeroval = pda->pio_write_status_val;
 
-	mycnode = numa_node_id();
-
-	for_each_online_node(cnode) {
-		if (is_headless_node(cnode) || cnode == mycnode)
+	for (i=0; i < nix; i++) {
+		nasid = nasids[i];
+		if (!(sn2_ptctest & 3) && nasid == mynasid)
 			continue;
-		nasid = cnodeid_to_nasid(cnode);
 		ptc0 = CHANGE_NASID(nasid, ptc0);
 		if (ptc1)
 			ptc1 = CHANGE_NASID(nasid, ptc1);
 		sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
 	}
+
 }
 
 /**
@@ -293,3 +413,93 @@ void sn2_send_IPI(int cpuid, int vector,
 
 	sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
 }
+
+#ifdef CONFIG_PROC_FS
+
+#define PTC_BASENAME	"sgi_sn/ptc_statistics"
+
+static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
+{
+	if (*offset < NR_CPUS)
+		return offset;
+	return NULL;
+}
+
+static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
+{
+	(*offset)++;
+	if (*offset < NR_CPUS)
+		return offset;
+	return NULL;
+}
+
+static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
+{
+}
+
+static int sn2_ptc_seq_show(struct seq_file *file, void *data)
+{
+	struct ptc_stats *stat;
+	int cpu;
+
+	cpu = *(loff_t *) data;
+
+	if (!cpu) {
+		seq_printf(file, "# ptc_l change_rid shub_ptc_flushes shub_nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max\n");
+		seq_printf(file, "# ptctest %d\n", sn2_ptctest);
+	}
+
+	if (cpu < NR_CPUS && cpu_online(cpu)) {
+		stat = &per_cpu(ptcstats, cpu);
+		seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
+				stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
+				stat->deadlocks,
+				1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
+				1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
+				1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec);
+	}
+
+	return 0;
+}
+
+static struct seq_operations sn2_ptc_seq_ops = {
+	.start = sn2_ptc_seq_start,
+	.next = sn2_ptc_seq_next,
+	.stop = sn2_ptc_seq_stop,
+	.show = sn2_ptc_seq_show
+};
+
+int sn2_ptc_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &sn2_ptc_seq_ops);
+}
+
+static struct file_operations proc_sn2_ptc_operations = {
+	.open = sn2_ptc_proc_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static struct proc_dir_entry *proc_sn2_ptc;
+
+static int __init sn2_ptc_init(void)
+{
+	if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
+		printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
+		return -EINVAL;
+	}
+	proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
+	spin_lock_init(&sn2_global_ptc_lock);
+	return 0;
+}
+
+static void __exit sn2_ptc_exit(void)
+{
+	remove_proc_entry(PTC_BASENAME, NULL);
+}
+
+module_init(sn2_ptc_init);
+module_exit(sn2_ptc_exit);
+#endif /* CONFIG_PROC_FS */
+
diff -puN arch/ia64/sn/kernel/sn2/sn_hwperf.c~git-ia64 arch/ia64/sn/kernel/sn2/sn_hwperf.c
--- 25/arch/ia64/sn/kernel/sn2/sn_hwperf.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/sn2/sn_hwperf.c	2005-09-01 05:34:24.000000000 -0600
@@ -36,7 +36,6 @@
 #include <asm/topology.h>
 #include <asm/smp.h>
 #include <asm/semaphore.h>
-#include <asm/segment.h>
 #include <asm/uaccess.h>
 #include <asm/sal.h>
 #include <asm/sn/io.h>
@@ -59,7 +58,7 @@ static int sn_hwperf_enum_objects(int *n
 	struct sn_hwperf_object_info *objbuf = NULL;
 
 	if ((e = sn_hwperf_init()) < 0) {
-		printk("sn_hwperf_init failed: err %d\n", e);
+		printk(KERN_ERR "sn_hwperf_init failed: err %d\n", e);
 		goto out;
 	}
 
@@ -111,7 +110,7 @@ static int sn_hwperf_geoid_to_cnode(char
 	if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
 		return -1;
 
-	for (cnode = 0; cnode < numionodes; cnode++) {
+	for_each_node(cnode) {
 		geoid = cnodeid_get_geoid(cnode);
 		module_id = geo_module(geoid);
 		this_rack = MODULE_GET_RACK(module_id);
@@ -124,11 +123,13 @@ static int sn_hwperf_geoid_to_cnode(char
 		}
 	}
 
-	return cnode < numionodes ? cnode : -1;
+	return node_possible(cnode) ? cnode : -1;
 }
 
 static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
 {
+	if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
+		BUG();
 	if (!obj->sn_hwp_this_part)
 		return -1;
 	return sn_hwperf_geoid_to_cnode(obj->location);
@@ -174,31 +175,199 @@ static const char *sn_hwperf_get_slabnam
 	return slabname;
 }
 
-static void print_pci_topology(struct seq_file *s,
-	struct sn_hwperf_object_info *obj, int *ordinal,
-	u64 rack, u64 bay, u64 slot, u64 slab)
-{
-	char *p1;
-	char *p2;
-	char *pg;
-
-	if (!(pg = (char *)get_zeroed_page(GFP_KERNEL)))
-		return; /* ignore */
-	if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab,
-		__pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) {
-		for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) {
-			if (!(p2 = strchr(p1, '\n')))
+static void print_pci_topology(struct seq_file *s)
+{
+	char *p;
+	size_t sz;
+	int e;
+
+	for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
+		if (!(p = (char *)kmalloc(sz, GFP_KERNEL)))
+			break;
+		e = ia64_sn_ioif_get_pci_topology(__pa(p), sz);
+		if (e == SALRET_OK)
+			seq_puts(s, p);
+		kfree(p);
+		if (e == SALRET_OK || e == SALRET_NOT_IMPLEMENTED)
+			break;
+	}
+}
+
+static inline int sn_hwperf_has_cpus(cnodeid_t node)
+{
+	return node_online(node) && nr_cpus_node(node);
+}
+
+static inline int sn_hwperf_has_mem(cnodeid_t node)
+{
+	return node_online(node) && NODE_DATA(node)->node_present_pages;
+}
+
+static struct sn_hwperf_object_info *
+sn_hwperf_findobj_id(struct sn_hwperf_object_info *objbuf,
+	int nobj, int id)
+{
+	int i;
+	struct sn_hwperf_object_info *p = objbuf;
+
+	for (i=0; i < nobj; i++, p++) {
+		if (p->id == id)
+			return p;
+	}
+
+	return NULL;
+
+}
+
+static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objbuf,
+	int nobj, cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node)
+{
+	int e;
+	struct sn_hwperf_object_info *nodeobj = NULL;
+	struct sn_hwperf_object_info *op;
+	struct sn_hwperf_object_info *dest;
+	struct sn_hwperf_object_info *router;
+	struct sn_hwperf_port_info ptdata[16];
+	int sz, i, j;
+	cnodeid_t c;
+	int found_mem = 0;
+	int found_cpu = 0;
+
+	if (!node_possible(node))
+		return -EINVAL;
+
+	if (sn_hwperf_has_cpus(node)) {
+		if (near_cpu_node)
+			*near_cpu_node = node;
+		found_cpu++;
+	}
+
+	if (sn_hwperf_has_mem(node)) {
+		if (near_mem_node)
+			*near_mem_node = node;
+		found_mem++;
+	}
+
+	if (found_cpu && found_mem)
+		return 0; /* trivially successful */
+
+	/* find the argument node object */
+	for (i=0, op=objbuf; i < nobj; i++, op++) {
+		if (!SN_HWPERF_IS_NODE(op) && !SN_HWPERF_IS_IONODE(op))
+			continue;
+		if (node == sn_hwperf_obj_to_cnode(op)) {
+			nodeobj = op;
+			break;
+		}
+	}
+	if (!nodeobj) {
+		e = -ENOENT;
+		goto err;
+	}
+
+	/* get it's interconnect topology */
+	sz = op->ports * sizeof(struct sn_hwperf_port_info);
+	if (sz > sizeof(ptdata))
+		BUG();
+	e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+			      SN_HWPERF_ENUM_PORTS, nodeobj->id, sz,
+			      (u64)&ptdata, 0, 0, NULL);
+	if (e != SN_HWPERF_OP_OK) {
+		e = -EINVAL;
+		goto err;
+	}
+
+	/* find nearest node with cpus and nearest memory */
+	for (router=NULL, j=0; j < op->ports; j++) {
+		dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id);
+		if (!dest || SN_HWPERF_FOREIGN(dest) ||
+		    !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) {
+			continue;
+		}
+		c = sn_hwperf_obj_to_cnode(dest);
+		if (!found_cpu && sn_hwperf_has_cpus(c)) {
+			if (near_cpu_node)
+				*near_cpu_node = c;
+			found_cpu++;
+		}
+		if (!found_mem && sn_hwperf_has_mem(c)) {
+			if (near_mem_node)
+				*near_mem_node = c;
+			found_mem++;
+		}
+		if (SN_HWPERF_IS_ROUTER(dest))
+			router = dest;
+	}
+
+	if (router && (!found_cpu || !found_mem)) {
+		/* search for a node connected to the same router */
+		sz = router->ports * sizeof(struct sn_hwperf_port_info);
+		if (sz > sizeof(ptdata))
+			BUG();
+		e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+				      SN_HWPERF_ENUM_PORTS, router->id, sz,
+				      (u64)&ptdata, 0, 0, NULL);
+		if (e != SN_HWPERF_OP_OK) {
+			e = -EINVAL;
+			goto err;
+		}
+		for (j=0; j < router->ports; j++) {
+			dest = sn_hwperf_findobj_id(objbuf, nobj,
+				ptdata[j].conn_id);
+			if (!dest || dest->id == node ||
+			    SN_HWPERF_FOREIGN(dest) ||
+			    !SN_HWPERF_IS_NODE(dest) ||
+			    SN_HWPERF_IS_IONODE(dest)) {
+				continue;
+			}
+			c = sn_hwperf_obj_to_cnode(dest);
+			if (!found_cpu && sn_hwperf_has_cpus(c)) {
+				if (near_cpu_node)
+					*near_cpu_node = c;
+				found_cpu++;
+			}
+			if (!found_mem && sn_hwperf_has_mem(c)) {
+				if (near_mem_node)
+					*near_mem_node = c;
+				found_mem++;
+			}
+			if (found_cpu && found_mem)
 				break;
-			*p2 = '\0';
-			seq_printf(s, "pcibus %d %s-%s\n",
-				*ordinal, obj->location, p1);
-			(*ordinal)++;
-			p1 = p2 + 1;
 		}
 	}
-	free_page((unsigned long)pg);
+
+	if (!found_cpu || !found_mem) {
+		/* resort to _any_ node with CPUs and memory */
+		for (i=0, op=objbuf; i < nobj; i++, op++) {
+			if (SN_HWPERF_FOREIGN(op) ||
+			    SN_HWPERF_IS_IONODE(op) ||
+			    !SN_HWPERF_IS_NODE(op)) {
+				continue;
+			}
+			c = sn_hwperf_obj_to_cnode(op);
+			if (!found_cpu && sn_hwperf_has_cpus(c)) {
+				if (near_cpu_node)
+					*near_cpu_node = c;
+				found_cpu++;
+			}
+			if (!found_mem && sn_hwperf_has_mem(c)) {
+				if (near_mem_node)
+					*near_mem_node = c;
+				found_mem++;
+			}
+			if (found_cpu && found_mem)
+				break;
+		}
+	}
+
+	if (!found_cpu || !found_mem)
+		e = -ENODATA;
+
+err:
+	return e;
 }
 
+
 static int sn_topology_show(struct seq_file *s, void *d)
 {
 	int sz;
@@ -215,7 +384,6 @@ static int sn_topology_show(struct seq_f
 	struct sn_hwperf_object_info *p;
 	struct sn_hwperf_object_info *obj = d;	/* this object */
 	struct sn_hwperf_object_info *objs = s->private; /* all objects */
-	int rack, bay, slot, slab;
 	u8 shubtype;
 	u8 system_size;
 	u8 sharing_size;
@@ -225,7 +393,6 @@ static int sn_topology_show(struct seq_f
 	u8 region_size;
 	u16 nasid_mask;
 	int nasid_msb;
-	int pci_bus_ordinal = 0;
 
 	if (obj == objs) {
 		seq_printf(s, "# sn_topology version 2\n");
@@ -253,6 +420,8 @@ static int sn_topology_show(struct seq_f
 			shubtype ? "shub2" : "shub1", 
 			(u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
 			system_size, sharing_size, coher, region_size);
+
+		print_pci_topology(s);
 	}
 
 	if (SN_HWPERF_FOREIGN(obj)) {
@@ -272,11 +441,24 @@ static int sn_topology_show(struct seq_f
 	if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
 		seq_putc(s, '\n');
 	else {
+		cnodeid_t near_mem = -1;
+		cnodeid_t near_cpu = -1;
+
 		seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
-		for (i=0; i < numionodes; i++) {
-			seq_printf(s, i ? ":%d" : ", dist %d",
-				node_distance(ordinal, i));
+
+		if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt,
+			ordinal, &near_mem, &near_cpu) == 0) {
+			seq_printf(s, ", near_mem_nodeid %d, near_cpu_nodeid %d",
+				near_mem, near_cpu);
 		}
+
+		if (!SN_HWPERF_IS_IONODE(obj)) {
+			for_each_online_node(i) {
+				seq_printf(s, i ? ":%d" : ", dist %d",
+					node_distance(ordinal, i));
+			}
+		}
+
 		seq_putc(s, '\n');
 
 		/*
@@ -300,17 +482,6 @@ static int sn_topology_show(struct seq_f
 				seq_putc(s, '\n');
 			}
 		}
-
-		/*
-		 * PCI busses attached to this node, if any
-		 */
-		if (sn_hwperf_location_to_bpos(obj->location,
-			&rack, &bay, &slot, &slab)) {
-			/* export pci bus info */
-			print_pci_topology(s, obj, &pci_bus_ordinal,
-				rack, bay, slot, slab);
-
-		}
 	}
 
 	if (obj->ports) {
@@ -572,6 +743,8 @@ sn_hwperf_ioctl(struct inode *in, struct
 		if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
 			memset(p, 0, a.sz);
 			for (i = 0; i < nobj; i++) {
+				if (!SN_HWPERF_IS_NODE(objs + i))
+					continue;
 				node = sn_hwperf_obj_to_cnode(objs + i);
 				for_each_online_cpu(j) {
 					if (node != cpu_to_node(j))
@@ -598,7 +771,7 @@ sn_hwperf_ioctl(struct inode *in, struct
 
 	case SN_HWPERF_GET_NODE_NASID:
 		if (a.sz != sizeof(u64) ||
-		   (node = a.arg) < 0 || node >= numionodes) {
+		   (node = a.arg) < 0 || !node_possible(node)) {
 			r = -EINVAL;
 			goto error;
 		}
@@ -627,6 +800,14 @@ sn_hwperf_ioctl(struct inode *in, struct
 				vfree(objs);
 				goto error;
 			}
+
+			if (!SN_HWPERF_IS_NODE(objs + i) &&
+			    !SN_HWPERF_IS_IONODE(objs + i)) {
+			    	r = -ENOENT;
+				vfree(objs);
+				goto error;
+			}
+
 			*(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
 			vfree(objs);
 		}
@@ -692,6 +873,7 @@ static int sn_hwperf_init(void)
 
 	/* single threaded, once-only initialization */
 	down(&sn_hwperf_init_mutex);
+
 	if (sn_hwperf_salheap) {
 		up(&sn_hwperf_init_mutex);
 		return e;
@@ -742,19 +924,6 @@ out:
 		sn_hwperf_salheap = NULL;
 		sn_hwperf_obj_cnt = 0;
 	}
-
-	if (!e) {
-		/*
-		 * Register a dynamic misc device for ioctl. Platforms
-		 * supporting hotplug will create /dev/sn_hwperf, else
-		 * user can to look up the minor number in /proc/misc.
-		 */
-		if ((e = misc_register(&sn_hwperf_dev)) != 0) {
-			printk(KERN_ERR "sn_hwperf_init: misc register "
-			       "for \"sn_hwperf\" failed, err %d\n", e);
-		}
-	}
-
 	up(&sn_hwperf_init_mutex);
 	return e;
 }
@@ -782,3 +951,41 @@ int sn_topology_release(struct inode *in
 	vfree(seq->private);
 	return seq_release(inode, file);
 }
+
+int sn_hwperf_get_nearest_node(cnodeid_t node,
+	cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node)
+{
+	int e;
+	int nobj;
+	struct sn_hwperf_object_info *objbuf;
+
+	if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
+		e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj,
+			node, near_mem_node, near_cpu_node);
+		vfree(objbuf);
+	}
+
+	return e;
+}
+
+static int __devinit sn_hwperf_misc_register_init(void)
+{
+	int e;
+
+	sn_hwperf_init();
+
+	/*
+	 * Register a dynamic misc device for hwperf ioctls. Platforms
+	 * supporting hotplug will create /dev/sn_hwperf, else user
+	 * can to look up the minor number in /proc/misc.
+	 */
+	if ((e = misc_register(&sn_hwperf_dev)) != 0) {
+		printk(KERN_ERR "sn_hwperf_misc_register_init: failed to "
+		"register misc device for \"%s\"\n", sn_hwperf_dev.name);
+	}
+
+	return e;
+}
+
+device_initcall(sn_hwperf_misc_register_init); /* after misc_init() */
+EXPORT_SYMBOL(sn_hwperf_get_nearest_node);
diff -puN arch/ia64/sn/kernel/sn2/sn_proc_fs.c~git-ia64 arch/ia64/sn/kernel/sn2/sn_proc_fs.c
--- 25/arch/ia64/sn/kernel/sn2/sn_proc_fs.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/sn2/sn_proc_fs.c	2005-09-01 05:34:24.000000000 -0600
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
  */
 #include <linux/config.h>
 #include <asm/uaccess.h>
@@ -15,7 +15,7 @@
 
 static int partition_id_show(struct seq_file *s, void *p)
 {
-	seq_printf(s, "%d\n", sn_local_partid());
+	seq_printf(s, "%d\n", sn_partition_id);
 	return 0;
 }
 
diff -puN arch/ia64/sn/kernel/sn2/timer_interrupt.c~git-ia64 arch/ia64/sn/kernel/sn2/timer_interrupt.c
--- 25/arch/ia64/sn/kernel/sn2/timer_interrupt.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/kernel/sn2/timer_interrupt.c	2005-09-01 05:34:24.000000000 -0600
@@ -1,7 +1,7 @@
 /*
  *
  *
- * Copyright (c) 2003 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2005 Silicon Graphics, Inc.  All Rights Reserved.
  * 
  * This program is free software; you can redistribute it and/or modify it 
  * under the terms of version 2 of the GNU General Public License 
@@ -50,14 +50,16 @@ void sn_timer_interrupt(int irq, void *d
 			     LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
 	}
 
-	if (enable_shub_wars_1_1()) {
-		/* Bugfix code for SHUB 1.1 */
-		if (pda->pio_shub_war_cam_addr)
-			*pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
+	if (is_shub1()) {
+		if (enable_shub_wars_1_1()) {
+			/* Bugfix code for SHUB 1.1 */
+			if (pda->pio_shub_war_cam_addr)
+				*pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
+		}
+		if (pda->sn_lb_int_war_ticks == 0)
+			sn_lb_int_war_check();
+		pda->sn_lb_int_war_ticks++;
+		if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
+			pda->sn_lb_int_war_ticks = 0;
 	}
-	if (pda->sn_lb_int_war_ticks == 0)
-		sn_lb_int_war_check();
-	pda->sn_lb_int_war_ticks++;
-	if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
-		pda->sn_lb_int_war_ticks = 0;
 }
diff -puN arch/ia64/sn/pci/Makefile~git-ia64 arch/ia64/sn/pci/Makefile
--- 25/arch/ia64/sn/pci/Makefile~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/pci/Makefile	2005-09-01 05:34:24.000000000 -0600
@@ -7,4 +7,4 @@
 #
 # Makefile for the sn pci general routines.
 
-obj-y := pci_dma.o tioca_provider.o pcibr/ 
+obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
diff -puN arch/ia64/sn/pci/pcibr/pcibr_dma.c~git-ia64 arch/ia64/sn/pci/pcibr/pcibr_dma.c
--- 25/arch/ia64/sn/pci/pcibr/pcibr_dma.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/pci/pcibr/pcibr_dma.c	2005-09-01 05:34:24.000000000 -0600
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
  */
 
 #include <linux/types.h>
@@ -215,8 +215,8 @@ void sn_dma_flush(uint64_t addr)
 	int is_tio;
 	int wid_num;
 	int i, j;
-	int bwin;
 	uint64_t flags;
+	uint64_t itte;
 	struct hubdev_info *hubinfo;
 	volatile struct sn_flush_device_list *p;
 	struct sn_flush_nasid_entry *flush_nasid_list;
@@ -233,31 +233,36 @@ void sn_dma_flush(uint64_t addr)
 	if (!hubinfo) {
 		BUG();
 	}
-	is_tio = (nasid & 1);
-	if (is_tio) {
-		wid_num = TIO_SWIN_WIDGETNUM(addr);
-		bwin = TIO_BWIN_WINDOWNUM(addr);
-	} else {
-		wid_num = SWIN_WIDGETNUM(addr);
-		bwin = BWIN_WINDOWNUM(addr);
-	}
 
 	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
 	if (flush_nasid_list->widget_p == NULL)
 		return;
-	if (bwin > 0) {
-		uint64_t itte = flush_nasid_list->iio_itte[bwin];
 
-		if (is_tio) {
-			wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) &
-			    TIO_ITTE_WIDGET_MASK;
-		} else {
-			wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
-			    IIO_ITTE_WIDGET_MASK;
-		}
+	is_tio = (nasid & 1);
+	if (is_tio) {
+		int itte_index;
+
+		if (TIO_HWIN(addr))
+			itte_index = 0;
+		else if (TIO_BWIN_WINDOWNUM(addr))
+			itte_index = TIO_BWIN_WINDOWNUM(addr);
+		else
+			itte_index = -1;
+
+		if (itte_index >= 0) {
+			itte = flush_nasid_list->iio_itte[itte_index];
+			if (! TIO_ITTE_VALID(itte))
+				return;
+			wid_num = TIO_ITTE_WIDGET(itte);
+		} else
+			wid_num = TIO_SWIN_WIDGETNUM(addr);
+	} else {
+		if (BWIN_WINDOWNUM(addr)) {
+			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
+			wid_num = IIO_ITTE_WIDGET(itte);
+		} else
+			wid_num = SWIN_WIDGETNUM(addr);
 	}
-	if (flush_nasid_list->widget_p == NULL)
-		return;
 	if (flush_nasid_list->widget_p[wid_num] == NULL)
 		return;
 	p = &flush_nasid_list->widget_p[wid_num][0];
@@ -283,10 +288,16 @@ void sn_dma_flush(uint64_t addr)
 	/*
 	 * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
 	 * register since it ensures the data has entered the coherence
-	 * domain, unlike PIC
+	 * domain, unlike PIC.
 	 */
 	if (is_tio) {
-		uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID);
+		/*
+	 	 * Note:  devices behind TIOCE should never be matched in the
+		 * above code, and so the following code is PIC/CP centric.
+		 * If CE ever needs the sn_dma_flush mechanism, we will have
+		 * to account for that here and in tioce_bus_fixup().
+	 	 */
+		uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
 		uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);
 
 		/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
@@ -306,7 +317,8 @@ void sn_dma_flush(uint64_t addr)
 		*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
 
 		/* wait for the interrupt to come back. */
-		while (*(p->sfdl_flush_addr) != 0x10f) ;
+		while (*(p->sfdl_flush_addr) != 0x10f)
+			cpu_relax();
 
 		/* okay, everything is synched up. */
 		spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
diff -puN arch/ia64/sn/pci/pcibr/pcibr_provider.c~git-ia64 arch/ia64/sn/pci/pcibr/pcibr_provider.c
--- 25/arch/ia64/sn/pci/pcibr/pcibr_provider.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/pci/pcibr/pcibr_provider.c	2005-09-01 05:34:24.000000000 -0600
@@ -15,6 +15,7 @@
 #include <asm/sn/pcibus_provider_defs.h>
 #include <asm/sn/pcidev.h>
 #include <asm/sn/sn_sal.h>
+#include <asm/sn/sn2/sn_hwperf.h>
 #include "xtalk/xwidgetdev.h"
 #include "xtalk/hubdev.h"
 
@@ -60,7 +61,7 @@ static int sal_pcibr_error_interrupt(str
 	ret_stuff.status = 0;
 	ret_stuff.v0 = 0;
 
-	segment = 0;
+	segment = soft->pbi_buscommon.bs_persist_segment;
 	busnum = soft->pbi_buscommon.bs_persist_busnum;
 	SAL_CALL_NOLOCK(ret_stuff,
 			(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
@@ -88,6 +89,7 @@ void *
 pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
 {
 	int nasid, cnode, j;
+	cnodeid_t near_cnode;
 	struct hubdev_info *hubdev_info;
 	struct pcibus_info *soft;
 	struct sn_flush_device_list *sn_flush_device_list;
@@ -115,7 +117,7 @@ pcibr_bus_fixup(struct pcibus_bussoft *p
 	/*
 	 * register the bridge's error interrupt handler
 	 */
-	if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
+	if (request_irq(SGI_PCIASIC_ERROR, (void *)pcibr_error_intr_handler,
 			SA_SHIRQ, "PCIBR error", (void *)(soft))) {
 		printk(KERN_WARNING
 		       "pcibr cannot allocate interrupt for error handler\n");
@@ -142,9 +144,12 @@ pcibr_bus_fixup(struct pcibus_bussoft *p
 			     j++, sn_flush_device_list++) {
 				if (sn_flush_device_list->sfdl_slot == -1)
 					continue;
-				if (sn_flush_device_list->
-				    sfdl_persistent_busnum ==
-				    soft->pbi_buscommon.bs_persist_busnum)
+				if ((sn_flush_device_list->
+				     sfdl_persistent_segment ==
+				     soft->pbi_buscommon.bs_persist_segment) &&
+				     (sn_flush_device_list->
+				     sfdl_persistent_busnum ==
+				     soft->pbi_buscommon.bs_persist_busnum))
 					sn_flush_device_list->sfdl_pcibus_info =
 					    soft;
 			}
@@ -158,12 +163,18 @@ pcibr_bus_fixup(struct pcibus_bussoft *p
 	memset(soft->pbi_int_ate_resource.ate, 0,
  	       (soft->pbi_int_ate_size * sizeof(uint64_t)));
 
-	if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP)
-		/*
-		 * TIO PCI Bridge with no closest node information.
-		 * FIXME: Find another way to determine the closest node
-		 */
-		controller->node = -1;
+	if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
+		/* TIO PCI Bridge: find nearest node with CPUs */
+		int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);
+
+		if (e < 0) {
+			near_cnode = (cnodeid_t)-1; /* use any node */
+			printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
+				"near node with CPUs to TIO node %d, err=%d\n",
+				cnode, e);
+		}
+		controller->node = near_cnode;
+	}
 	else
 		controller->node = cnode;
 	return soft;
@@ -175,6 +186,9 @@ void pcibr_force_interrupt(struct sn_irq
 	struct pcibus_info *pcibus_info;
 	int bit = sn_irq_info->irq_int_bit;
 
+	if (! sn_irq_info->irq_bridge)
+		return;
+
 	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
 	if (pcidev_info) {
 		pcibus_info =
@@ -184,7 +198,7 @@ void pcibr_force_interrupt(struct sn_irq
 	}
 }
 
-void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
+void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
 {
 	struct pcidev_info *pcidev_info;
 	struct pcibus_info *pcibus_info;
@@ -219,6 +233,8 @@ struct sn_pcibus_provider pcibr_provider
 	.dma_map_consistent = pcibr_dma_map_consistent,
 	.dma_unmap = pcibr_dma_unmap,
 	.bus_fixup = pcibr_bus_fixup,
+	.force_interrupt = pcibr_force_interrupt,
+	.target_interrupt = pcibr_target_interrupt
 };
 
 int
diff -puN arch/ia64/sn/pci/tioca_provider.c~git-ia64 arch/ia64/sn/pci/tioca_provider.c
--- 25/arch/ia64/sn/pci/tioca_provider.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/arch/ia64/sn/pci/tioca_provider.c	2005-09-01 05:34:24.000000000 -0600
@@ -559,7 +559,7 @@ tioca_error_intr_handler(int irq, void *
 	ret_stuff.status = 0;
 	ret_stuff.v0 = 0;
 
-	segment = 0;
+	segment = soft->ca_common.bs_persist_segment;
 	busnum = soft->ca_common.bs_persist_busnum;
 
 	SAL_CALL_NOLOCK(ret_stuff,
@@ -622,7 +622,8 @@ tioca_bus_fixup(struct pcibus_bussoft *p
 	    nasid_to_cnodeid(tioca_common->ca_closest_nasid);
 	tioca_common->ca_kernel_private = (uint64_t) tioca_kern;
 
-	bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum);
+	bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
+		tioca_common->ca_common.bs_persist_busnum);
 	BUG_ON(!bus);
 	tioca_kern->ca_devices = &bus->devices;
 
@@ -656,6 +657,8 @@ static struct sn_pcibus_provider tioca_p
 	.dma_map_consistent = tioca_dma_map,
 	.dma_unmap = tioca_dma_unmap,
 	.bus_fixup = tioca_bus_fixup,
+	.force_interrupt = NULL,
+	.target_interrupt = NULL
 };
 
 /**
diff -puN /dev/null arch/ia64/sn/pci/tioce_provider.c
--- /dev/null	2004-08-10 19:55:00.000000000 -0600
+++ 25-akpm/arch/ia64/sn/pci/tioce_provider.c	2005-09-01 05:34:24.000000000 -0600
@@ -0,0 +1,771 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioce_provider.h>
+
+/**
+ * Bus address ranges for the 5 flavors of TIOCE DMA
+ */
+
+#define TIOCE_D64_MIN	0x8000000000000000UL
+#define TIOCE_D64_MAX	0xffffffffffffffffUL
+#define TIOCE_D64_ADDR(a)	((a) >= TIOCE_D64_MIN)
+
+#define TIOCE_D32_MIN	0x0000000080000000UL
+#define TIOCE_D32_MAX	0x00000000ffffffffUL
+#define TIOCE_D32_ADDR(a)	((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX)
+
+#define TIOCE_M32_MIN	0x0000000000000000UL
+#define TIOCE_M32_MAX	0x000000007fffffffUL
+#define TIOCE_M32_ADDR(a)	((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX)
+
+#define TIOCE_M40_MIN	0x0000004000000000UL
+#define TIOCE_M40_MAX	0x0000007fffffffffUL
+#define TIOCE_M40_ADDR(a)	((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX)
+
+#define TIOCE_M40S_MIN	0x0000008000000000UL
+#define TIOCE_M40S_MAX	0x000000ffffffffffUL
+#define TIOCE_M40S_ADDR(a)	((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX)
+
+/*
+ * ATE manipulation macros.
+ */
+
+#define ATE_PAGESHIFT(ps)	(__ffs(ps))
+#define ATE_PAGEMASK(ps)	((ps)-1)
+
+#define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps))
+#define ATE_NPAGES(start, len, pagesize) \
+	(ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
+
+#define ATE_VALID(ate)	((ate) & (1UL << 63))
+#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63))
+
+/*
+ * Flavors of ate-based mapping supported by tioce_alloc_map()
+ */
+
+#define TIOCE_ATE_M32	1
+#define TIOCE_ATE_M40	2
+#define TIOCE_ATE_M40S	3
+
+#define KB(x)	((x) << 10)
+#define MB(x)	((x) << 20)
+#define GB(x)	((x) << 30)
+
+/**
+ * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode
+ * @ct_addr: system coretalk address
+ *
+ * Map @ct_addr into 64-bit CE bus space.  No device context is necessary
+ * and no CE mapping are consumed.
+ *
+ * Bits 53:0 come from the coretalk address.  The remaining bits are set as
+ * follows:
+ *
+ * 63    - must be 1 to indicate d64 mode to CE hardware
+ * 62    - barrier bit ... controlled with tioce_dma_barrier()
+ * 61    - 0 since this is not an MSI transaction
+ * 60:54 - reserved, MBZ
+ */
+static uint64_t
+tioce_dma_d64(unsigned long ct_addr)
+{
+	uint64_t bus_addr;
+
+	bus_addr = ct_addr | (1UL << 63);
+
+	return bus_addr;
+}
+
+/**
+ * pcidev_to_tioce - return misc ce related pointers given a pci_dev
+ * @pci_dev: pci device context
+ * @base: ptr to store struct tioce_mmr * for the CE holding this device
+ * @kernel: ptr to store struct tioce_kernel * for the CE holding this device
+ * @port: ptr to store the CE port number that this device is on
+ *
+ * Return pointers to various CE-related structures for the CE upstream of
+ * @pci_dev.
+ */
+static inline void
+pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
+		struct tioce_kernel **kernel, int *port)
+{
+	struct pcidev_info *pcidev_info;
+	struct tioce_common *ce_common;
+	struct tioce_kernel *ce_kernel;
+
+	pcidev_info = SN_PCIDEV_INFO(pdev);
+	ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
+	ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private;
+
+	if (base)
+		*base = (struct tioce *)ce_common->ce_pcibus.bs_base;
+	if (kernel)
+		*kernel = ce_kernel;
+
+	/*
+	 * we use port as a zero-based value internally, even though the
+	 * documentation is 1-based.
+	 */
+	if (port)
+		*port =
+		    (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1;
+}
+
+/**
+ * tioce_alloc_map - Given a coretalk address, map it to pcie bus address
+ * space using one of the various ATE-based address modes.
+ * @ce_kern: tioce context
+ * @type: map mode to use
+ * @port: 0-based port that the requesting device is downstream of
+ * @ct_addr: the coretalk address to map
+ * @len: number of bytes to map
+ *
+ * Given the addressing type, set up various paramaters that define the
+ * ATE pool to use.  Search for a contiguous block of entries to cover the
+ * length, and if enough resources exist, fill in the ATE's and construct a
+ * tioce_dmamap struct to track the mapping.
+ */
+static uint64_t
+tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
+		uint64_t ct_addr, int len)
+{
+	int i;
+	int j;
+	int first;
+	int last;
+	int entries;
+	int nates;
+	int pagesize;
+	uint64_t *ate_shadow;
+	uint64_t *ate_reg;
+	uint64_t addr;
+	struct tioce *ce_mmr;
+	uint64_t bus_base;
+	struct tioce_dmamap *map;
+
+	ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base;
+
+	switch (type) {
+	case TIOCE_ATE_M32:
+		/*
+		 * The first 64 entries of the ate3240 pool are dedicated to
+		 * super-page (TIOCE_ATE_M40S) mode.
+		 */
+		first = 64;
+		entries = TIOCE_NUM_M3240_ATES - 64;
+		ate_shadow = ce_kern->ce_ate3240_shadow;
+		ate_reg = ce_mmr->ce_ure_ate3240;
+		pagesize = ce_kern->ce_ate3240_pagesize;
+		bus_base = TIOCE_M32_MIN;
+		break;
+	case TIOCE_ATE_M40:
+		first = 0;
+		entries = TIOCE_NUM_M40_ATES;
+		ate_shadow = ce_kern->ce_ate40_shadow;
+		ate_reg = ce_mmr->ce_ure_ate40;
+		pagesize = MB(64);
+		bus_base = TIOCE_M40_MIN;
+		break;
+	case TIOCE_ATE_M40S:
+		/*
+		 * ate3240 entries 0-31 are dedicated to port1 super-page
+		 * mappings.  ate3240 entries 32-63 are dedicated to port2.
+		 */
+		first = port * 32;
+		entries = 32;
+		ate_shadow = ce_kern->ce_ate3240_shadow;
+		ate_reg = ce_mmr->ce_ure_ate3240;
+		pagesize = GB(16);
+		bus_base = TIOCE_M40S_MIN;
+		break;
+	default:
+		return 0;
+	}
+
+	nates = ATE_NPAGES(ct_addr, len, pagesize);
+	if (nates > entries)
+		return 0;
+
+	last = first + entries - nates;
+	for (i = first; i <= last; i++) {
+		if (ATE_VALID(ate_shadow[i]))
+			continue;
+
+		for (j = i; j < i + nates; j++)
+			if (ATE_VALID(ate_shadow[j]))
+				break;
+
+		if (j >= i + nates)
+			break;
+	}
+
+	if (i > last)
+		return 0;
+
+	map = kcalloc(1, sizeof(struct tioce_dmamap), GFP_ATOMIC);
+	if (!map)
+		return 0;
+
+	addr = ct_addr;
+	for (j = 0; j < nates; j++) {
+		uint64_t ate;
+
+		ate = ATE_MAKE(addr, pagesize);
+		ate_shadow[i + j] = ate;
+		ate_reg[i + j] = ate;
+		addr += pagesize;
+	}
+
+	map->refcnt = 1;
+	map->nbytes = nates * pagesize;
+	map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize);
+	map->pci_start = bus_base + (i * pagesize);
+	map->ate_hw = &ate_reg[i];
+	map->ate_shadow = &ate_shadow[i];
+	map->ate_count = nates;
+
+	list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list);
+
+	return (map->pci_start + (ct_addr - map->ct_start));
+}
+
+/**
+ * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode
+ * @pdev: linux pci_dev representing the function
+ * @paddr: system physical address
+ *
+ * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
+ */
+static uint64_t
+tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
+{
+	int dma_ok;
+	int port;
+	struct tioce *ce_mmr;
+	struct tioce_kernel *ce_kern;
+	uint64_t ct_upper;
+	uint64_t ct_lower;
+	dma_addr_t bus_addr;
+
+	ct_upper = ct_addr & ~0x3fffffffUL;
+	ct_lower = ct_addr & 0x3fffffffUL;
+
+	pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
+
+	if (ce_kern->ce_port[port].dirmap_refcnt == 0) {
+		volatile uint64_t tmp;
+
+		ce_kern->ce_port[port].dirmap_shadow = ct_upper;
+		ce_mmr->ce_ure_dir_map[port] = ct_upper;
+		tmp = ce_mmr->ce_ure_dir_map[port];
+		dma_ok = 1;
+	} else
+		dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper);
+
+	if (dma_ok) {
+		ce_kern->ce_port[port].dirmap_refcnt++;
+		bus_addr = TIOCE_D32_MIN + ct_lower;
+	} else
+		bus_addr = 0;
+
+	return bus_addr;
+}
+
+/**
+ * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude
+ * the barrier bit.
+ * @bus_addr:  bus address to swizzle
+ *
+ * Given a TIOCE bus address, set the appropriate bit to indicate barrier
+ * attributes.
+ */
+static uint64_t
+tioce_dma_barrier(uint64_t bus_addr, int on)
+{
+	uint64_t barrier_bit;
+
+	/* barrier not supported in M40/M40S mode */
+	if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr))
+		return bus_addr;
+
+	if (TIOCE_D64_ADDR(bus_addr))
+		barrier_bit = (1UL << 62);
+	else			/* must be m32 or d32 */
+		barrier_bit = (1UL << 30);
+
+	return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit);
+}
+
+/**
+ * tioce_dma_unmap - release CE mapping resources
+ * @pdev: linux pci_dev representing the function
+ * @bus_addr: bus address returned by an earlier tioce_dma_map
+ * @dir: mapping direction (unused)
+ *
+ * Locate mapping resources associated with @bus_addr and release them.
+ * For mappings created using the direct modes there are no resources
+ * to release.
+ */
+void
+tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
+{
+	int i;
+	int port;
+	struct tioce_kernel *ce_kern;
+	struct tioce *ce_mmr;
+	unsigned long flags;
+
+	bus_addr = tioce_dma_barrier(bus_addr, 0);
+	pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
+
+	/* nothing to do for D64 */
+
+	if (TIOCE_D64_ADDR(bus_addr))
+		return;
+
+	spin_lock_irqsave(&ce_kern->ce_lock, flags);
+
+	if (TIOCE_D32_ADDR(bus_addr)) {
+		if (--ce_kern->ce_port[port].dirmap_refcnt == 0) {
+			ce_kern->ce_port[port].dirmap_shadow = 0;
+			ce_mmr->ce_ure_dir_map[port] = 0;
+		}
+	} else {
+		struct tioce_dmamap *map;
+
+		list_for_each_entry(map, &ce_kern->ce_dmamap_list,
+				    ce_dmamap_list) {
+			uint64_t last;
+
+			last = map->pci_start + map->nbytes - 1;
+			if (bus_addr >= map->pci_start && bus_addr <= last)
+				break;
+		}
+
+		if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
+			printk(KERN_WARNING
+			       "%s:  %s - no map found for bus_addr 0x%lx\n",
+			       __FUNCTION__, pci_name(pdev), bus_addr);
+		} else if (--map->refcnt == 0) {
+			for (i = 0; i < map->ate_count; i++) {
+				map->ate_shadow[i] = 0;
+				map->ate_hw[i] = 0;
+			}
+
+			list_del(&map->ce_dmamap_list);
+			kfree(map);
+		}
+	}
+
+	spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
+}
+
+/**
+ * tioce_do_dma_map - map pages for PCI DMA
+ * @pdev: linux pci_dev representing the function
+ * @paddr: host physical address to map
+ * @byte_count: bytes to map
+ *
+ * This is the main wrapper for mapping host physical pages to CE PCI space.
+ * The mapping mode used is based on the device's dma_mask.
+ */
+static uint64_t
+tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count,
+		 int barrier)
+{
+	unsigned long flags;
+	uint64_t ct_addr;
+	uint64_t mapaddr = 0;
+	struct tioce_kernel *ce_kern;
+	struct tioce_dmamap *map;
+	int port;
+	uint64_t dma_mask;
+
+	dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask;
+
+	/* cards must be able to address at least 31 bits */
+	if (dma_mask < 0x7fffffffUL)
+		return 0;
+
+	ct_addr = PHYS_TO_TIODMA(paddr);
+
+	/*
+	 * If the device can generate 64 bit addresses, create a D64 map.
+	 * Since this should never fail, bypass the rest of the checks.
+	 */
+	if (dma_mask == ~0UL) {
+		mapaddr = tioce_dma_d64(ct_addr);
+		goto dma_map_done;
+	}
+
+	pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
+
+	spin_lock_irqsave(&ce_kern->ce_lock, flags);
+
+	/*
+	 * D64 didn't work ... See if we have an existing map that covers
+	 * this address range.  Must account for devices dma_mask here since
+	 * an existing map might have been done in a mode using more pci
+	 * address bits than this device can support.
+	 */
+	list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) {
+		uint64_t last;
+
+		last = map->ct_start + map->nbytes - 1;
+		if (ct_addr >= map->ct_start &&
+		    ct_addr + byte_count - 1 <= last &&
+		    map->pci_start <= dma_mask) {
+			map->refcnt++;
+			mapaddr = map->pci_start + (ct_addr - map->ct_start);
+			break;
+		}
+	}
+
+	/*
+	 * If we don't have a map yet, and the card can generate 40
+	 * bit addresses, try the M40/M40S modes.  Note these modes do not
+	 * support a barrier bit, so if we need a consistent map these
+	 * won't work.
+	 */
+	if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
+		/*
+		 * We have two options for 40-bit mappings:  16GB "super" ATE's
+		 * and 64MB "regular" ATE's.  We'll try both if needed for a
+		 * given mapping but which one we try first depends on the
+		 * size.  For requests >64MB, prefer to use a super page with
+		 * regular as the fallback. Otherwise, try in the reverse order.
+		 */
+
+		if (byte_count > MB(64)) {
+			mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
+						  port, ct_addr, byte_count);
+			if (!mapaddr)
+				mapaddr =
+				    tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
+						    ct_addr, byte_count);
+		} else {
+			mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
+						  ct_addr, byte_count);
+			if (!mapaddr)
+				mapaddr =
+				    tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
+						    port, ct_addr, byte_count);
+		}
+	}
+
+	/*
+	 * 32-bit direct is the next mode to try
+	 */
+	if (!mapaddr && dma_mask >= 0xffffffffUL)
+		mapaddr = tioce_dma_d32(pdev, ct_addr);
+
+	/*
+	 * Last resort, try 32-bit ATE-based map.
+	 */
+	if (!mapaddr)
+		mapaddr =
+		    tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
+				    byte_count);
+
+	spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
+
+dma_map_done:
+	if (mapaddr & barrier)
+		mapaddr = tioce_dma_barrier(mapaddr, 1);
+
+	return mapaddr;
+}
+
+/**
+ * tioce_dma - standard pci dma map interface
+ * @pdev: pci device requesting the map
+ * @paddr: system physical address to map into pci space
+ * @byte_count: # bytes to map
+ *
+ * Simply call tioce_do_dma_map() to create a map with the barrier bit clear
+ * in the address.
+ */
+static uint64_t
+tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+{
+	return tioce_do_dma_map(pdev, paddr, byte_count, 0);
+}
+
+/**
+ * tioce_dma_consistent - consistent pci dma map interface
+ * @pdev: pci device requesting the map
+ * @paddr: system physical address to map into pci space
+ * @byte_count: # bytes to map
+ *
+ * Simply call tioce_do_dma_map() to create a map with the barrier bit set
+ * in the address.
+ */ static uint64_t
+tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+{
+	return tioce_do_dma_map(pdev, paddr, byte_count, 1);
+}
+
+/**
+ * tioce_error_intr_handler - SGI TIO CE error interrupt handler
+ * @irq: unused
+ * @arg: pointer to tioce_common struct for the given CE
+ * @pt: unused
+ *
+ * Handle a CE error interrupt.  Simply a wrapper around a SAL call which
+ * defers processing to the SGI prom.
+ */ static irqreturn_t
+tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
+{
+	struct tioce_common *soft = arg;
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
+			soft->ce_pcibus.bs_persist_segment,
+			soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * tioce_kern_init - init kernel structures related to a given TIOCE
+ * @tioce_common: ptr to a cached tioce_common struct that originated in prom
+ */ static struct tioce_kernel *
+tioce_kern_init(struct tioce_common *tioce_common)
+{
+	int i;
+	uint32_t tmp;
+	struct tioce *tioce_mmr;
+	struct tioce_kernel *tioce_kern;
+
+	tioce_kern = kcalloc(1, sizeof(struct tioce_kernel), GFP_KERNEL);
+	if (!tioce_kern) {
+		return NULL;
+	}
+
+	tioce_kern->ce_common = tioce_common;
+	spin_lock_init(&tioce_kern->ce_lock);
+	INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list);
+	tioce_common->ce_kernel_private = (uint64_t) tioce_kern;
+
+	/*
+	 * Determine the secondary bus number of the port2 logical PPB.
+	 * This is used to decide whether a given pci device resides on
+	 * port1 or port2.  Note:  We don't have enough plumbing set up
+	 * here to use pci_read_config_xxx() so use the raw_pci_ops vector.
+	 */
+
+	raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment,
+			  tioce_common->ce_pcibus.bs_persist_busnum,
+			  PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp);
+	tioce_kern->ce_port1_secondary = (uint8_t) tmp;
+
+	/*
+	 * Set PMU pagesize to the largest size available, and zero out
+	 * the ate's.
+	 */
+
+	tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base;
+	tioce_mmr->ce_ure_page_map &= ~CE_URE_PAGESIZE_MASK;
+	tioce_mmr->ce_ure_page_map |= CE_URE_256K_PAGESIZE;
+	tioce_kern->ce_ate3240_pagesize = KB(256);
+
+	for (i = 0; i < TIOCE_NUM_M40_ATES; i++) {
+		tioce_kern->ce_ate40_shadow[i] = 0;
+		tioce_mmr->ce_ure_ate40[i] = 0;
+	}
+
+	for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) {
+		tioce_kern->ce_ate3240_shadow[i] = 0;
+		tioce_mmr->ce_ure_ate3240[i] = 0;
+	}
+
+	return tioce_kern;
+}
+
+/**
+ * tioce_force_interrupt - implement altix force_interrupt() backend for CE
+ * @sn_irq_info: sn asic irq that we need an interrupt generated for
+ *
+ * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to
+ * force a secondary interrupt to be generated.  This is to work around an
+ * asic issue where there is a small window of opportunity for a legacy device
+ * interrupt to be lost.
+ */
+static void
+tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
+{
+	struct pcidev_info *pcidev_info;
+	struct tioce_common *ce_common;
+	struct tioce *ce_mmr;
+	uint64_t force_int_val;
+
+	if (!sn_irq_info->irq_bridge)
+		return;
+
+	if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE)
+		return;
+
+	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+	if (!pcidev_info)
+		return;
+
+	ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
+	ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base;
+
+	/*
+	 * irq_int_bit is originally set up by prom, and holds the interrupt
+	 * bit shift (not mask) as defined by the bit definitions in the
+	 * ce_adm_int mmr.  These shifts are not the same for the
+	 * ce_adm_force_int register, so do an explicit mapping here to make
+	 * things clearer.
+	 */
+
+	switch (sn_irq_info->irq_int_bit) {
+	case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT;
+		break;
+	case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT;
+		break;
+	case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT;
+		break;
+	case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT;
+		break;
+	case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT;
+		break;
+	case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT;
+		break;
+	case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT;
+		break;
+	case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT:
+		force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT;
+		break;
+	default:
+		return;
+	}
+	ce_mmr->ce_adm_force_int = force_int_val;
+}
+
+/**
+ * tioce_target_interrupt - implement set_irq_affinity for tioce resident
+ * functions.  Note:  only applies to line interrupts, not MSI's.
+ *
+ * @sn_irq_info: SN IRQ context
+ *
+ * Given an sn_irq_info, set the associated CE device's interrupt destination
+ * register.  Since the interrupt destination registers are on a per-ce-slot
+ * basis, this will retarget line interrupts for all functions downstream of
+ * the slot.
+ */
+static void
+tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
+{
+	struct pcidev_info *pcidev_info;
+	struct tioce_common *ce_common;
+	struct tioce *ce_mmr;
+	int bit;
+
+	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+	if (!pcidev_info)
+		return;
+
+	ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
+	ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base;
+
+	bit = sn_irq_info->irq_int_bit;
+
+	ce_mmr->ce_adm_int_mask |= (1UL << bit);
+	ce_mmr->ce_adm_int_dest[bit] =
+		((uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT) |
+			   sn_irq_info->irq_xtalkaddr;
+	ce_mmr->ce_adm_int_mask &= ~(1UL << bit);
+
+	tioce_force_interrupt(sn_irq_info);
+}
+
+/**
+ * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus
+ * @prom_bussoft: Common prom/kernel struct representing the bus
+ *
+ * Replicates the tioce_common pointed to by @prom_bussoft in kernel
+ * space.  Allocates and initializes a kernel-only area for a given CE,
+ * and sets up an irq for handling CE error interrupts.
+ *
+ * On successful setup, returns the kernel version of tioce_common back to
+ * the caller.
+ */
+static void *
+tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
+{
+	struct tioce_common *tioce_common;
+
+	/*
+	 * Allocate kernel bus soft and copy from prom.
+	 */
+
+	tioce_common = kcalloc(1, sizeof(struct tioce_common), GFP_KERNEL);
+	if (!tioce_common)
+		return NULL;
+
+	memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));
+	tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET;
+
+	if (tioce_kern_init(tioce_common) == NULL) {
+		kfree(tioce_common);
+		return NULL;
+	}
+
+	if (request_irq(SGI_PCIASIC_ERROR,
+			tioce_error_intr_handler,
+			SA_SHIRQ, "TIOCE error", (void *)tioce_common))
+		printk(KERN_WARNING
+		       "%s:  Unable to get irq %d.  "
+		       "Error interrupts won't be routed for "
+		       "TIOCE bus %04x:%02x\n",
+		       __FUNCTION__, SGI_PCIASIC_ERROR,
+		       tioce_common->ce_pcibus.bs_persist_segment,
+		       tioce_common->ce_pcibus.bs_persist_busnum);
+
+	return tioce_common;
+}
+
+static struct sn_pcibus_provider tioce_pci_interfaces = {
+	.dma_map = tioce_dma,
+	.dma_map_consistent = tioce_dma_consistent,
+	.dma_unmap = tioce_dma_unmap,
+	.bus_fixup = tioce_bus_fixup,
+	.force_interrupt = tioce_force_interrupt,
+	.target_interrupt = tioce_target_interrupt
+};
+
+/**
+ * tioce_init_provider - init SN PCI provider ops for TIO CE
+ */
+int
+tioce_init_provider(void)
+{
+	sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces;
+	return 0;
+}
diff -puN drivers/char/snsc_event.c~git-ia64 drivers/char/snsc_event.c
--- 25/drivers/char/snsc_event.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/drivers/char/snsc_event.c	2005-09-01 05:34:24.000000000 -0600
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/byteorder/generic.h>
 #include <asm/sn/sn_sal.h>
+#include <asm/unaligned.h>
 #include "snsc.h"
 
 static struct subch_data_s *event_sd;
@@ -62,13 +63,16 @@ static int
 scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
 {
 	char *desc_end;
+	__be32 from_buf;
 
 	/* record event source address */
-	*src = be32_to_cpup((__be32 *)event);
+	from_buf = get_unaligned((__be32 *)event);
+	*src = be32_to_cpup(&from_buf);
 	event += 4; 			/* move on to event code */
 
 	/* record the system controller's event code */
-	*code = be32_to_cpup((__be32 *)event);
+	from_buf = get_unaligned((__be32 *)event);
+	*code = be32_to_cpup(&from_buf);
 	event += 4;			/* move on to event arguments */
 
 	/* how many arguments are in the packet? */
@@ -82,7 +86,8 @@ scdrv_parse_event(char *event, int *src,
 		/* not an integer argument, so give up */
 		return -1;
 	}
-	*esp_code = be32_to_cpup((__be32 *)event);
+	from_buf = get_unaligned((__be32 *)event);
+	*esp_code = be32_to_cpup(&from_buf);
 	event += 4;
 
 	/* parse out the event description */
diff -puN include/asm-ia64/acpi.h~git-ia64 include/asm-ia64/acpi.h
--- 25/include/asm-ia64/acpi.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/acpi.h	2005-09-01 05:34:24.000000000 -0600
@@ -116,6 +116,11 @@ extern int __initdata nid_to_pxm_map[MAX
 
 extern u16 ia64_acpiid_to_sapicid[];
 
+/*
+ * Refer Intel ACPI _PDC support document for bit definitions
+ */
+#define ACPI_PDC_EST_CAPABILITY_SMP     0x8
+
 #endif /*__KERNEL__*/
 
 #endif /*_ASM_ACPI_H*/
diff -puN include/asm-ia64/fcntl.h~git-ia64 include/asm-ia64/fcntl.h
--- 25/include/asm-ia64/fcntl.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/fcntl.h	2005-09-01 05:34:24.000000000 -0600
@@ -81,6 +81,7 @@ struct flock {
 
 #define F_LINUX_SPECIFIC_BASE	1024
 
-#define force_o_largefile() ( ! (current->personality & PER_LINUX32) )
+#define force_o_largefile()	\
+		(personality(current->personality) != PER_LINUX32)
 
 #endif /* _ASM_IA64_FCNTL_H */
diff -puN include/asm-ia64/io.h~git-ia64 include/asm-ia64/io.h
--- 25/include/asm-ia64/io.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/io.h	2005-09-01 05:34:24.000000000 -0600
@@ -23,7 +23,7 @@
 #define __SLOW_DOWN_IO	do { } while (0)
 #define SLOW_DOWN_IO	do { } while (0)
 
-#define __IA64_UNCACHED_OFFSET	0xc000000000000000UL	/* region 6 */
+#define __IA64_UNCACHED_OFFSET	RGN_BASE(RGN_UNCACHED)
 
 /*
  * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
@@ -41,7 +41,7 @@
 #define IO_SPACE_BASE(space)		((space) << IO_SPACE_BITS)
 #define IO_SPACE_PORT(port)		((port) & (IO_SPACE_SIZE - 1))
 
-#define IO_SPACE_SPARSE_ENCODING(p)	((((p) >> 2) << 12) | (p & 0xfff))
+#define IO_SPACE_SPARSE_ENCODING(p)	((((p) >> 2) << 12) | ((p) & 0xfff))
 
 struct io_space {
 	unsigned long mmio_base;	/* base in MMIO space */
diff -puN include/asm-ia64/mca_asm.h~git-ia64 include/asm-ia64/mca_asm.h
--- 25/include/asm-ia64/mca_asm.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/mca_asm.h	2005-09-01 05:34:24.000000000 -0600
@@ -8,6 +8,8 @@
  * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
  * Copyright (C) 2002 Intel Corp.
  * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2005 Silicon Graphics, Inc
+ * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
  */
 #ifndef _ASM_IA64_MCA_ASM_H
 #define _ASM_IA64_MCA_ASM_H
@@ -207,106 +209,33 @@
 	;;
 
 /*
- * The following offsets capture the order in which the
- * RSE related registers from the old context are
- * saved onto the new stack frame.
- *
- *	+-----------------------+
- *	|NDIRTY [BSP - BSPSTORE]|
- *	+-----------------------+
- *	|	RNAT		|
- *	+-----------------------+
- *	|	BSPSTORE	|
- *	+-----------------------+
- *	|	IFS		|
- *	+-----------------------+
- *	|	PFS		|
- *	+-----------------------+
- *	|	RSC		|
- *	+-----------------------+ <-------- Bottom of new stack frame
- */
-#define  rse_rsc_offset		0
-#define  rse_pfs_offset		(rse_rsc_offset+0x08)
-#define  rse_ifs_offset		(rse_pfs_offset+0x08)
-#define  rse_bspstore_offset	(rse_ifs_offset+0x08)
-#define  rse_rnat_offset	(rse_bspstore_offset+0x08)
-#define  rse_ndirty_offset	(rse_rnat_offset+0x08)
-
-/*
- * rse_switch_context
- *
- *	1. Save old RSC onto the new stack frame
- *	2. Save PFS onto new stack frame
- *	3. Cover the old frame and start a new frame.
- *	4. Save IFS onto new stack frame
- *	5. Save the old BSPSTORE on the new stack frame
- *	6. Save the old RNAT on the new stack frame
- *	7. Write BSPSTORE with the new backing store pointer
- *	8. Read and save the new BSP to calculate the #dirty registers
- * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
- */
-#define rse_switch_context(temp,p_stackframe,p_bspstore)			\
-	;;									\
-	mov     temp=ar.rsc;;							\
-	st8     [p_stackframe]=temp,8;;					\
-	mov     temp=ar.pfs;;							\
-	st8     [p_stackframe]=temp,8;						\
-	cover ;;								\
-	mov     temp=cr.ifs;;							\
-	st8     [p_stackframe]=temp,8;;						\
-	mov     temp=ar.bspstore;;						\
-	st8     [p_stackframe]=temp,8;;					\
-	mov     temp=ar.rnat;;							\
-	st8     [p_stackframe]=temp,8;						\
-	mov     ar.bspstore=p_bspstore;;					\
-	mov     temp=ar.bsp;;							\
-	sub     temp=temp,p_bspstore;;						\
-	st8     [p_stackframe]=temp,8;;
-
-/*
- * rse_return_context
- *	1. Allocate a zero-sized frame
- *	2. Store the number of dirty registers RSC.loadrs field
- *	3. Issue a loadrs to insure that any registers from the interrupted
- *	   context which were saved on the new stack frame have been loaded
- *	   back into the stacked registers
- *	4. Restore BSPSTORE
- *	5. Restore RNAT
- *	6. Restore PFS
- *	7. Restore IFS
- *	8. Restore RSC
- *	9. Issue an RFI
- */
-#define rse_return_context(psr_mask_reg,temp,p_stackframe)			\
-	;;									\
-	alloc   temp=ar.pfs,0,0,0,0;						\
-	add     p_stackframe=rse_ndirty_offset,p_stackframe;;			\
-	ld8     temp=[p_stackframe];;						\
-	shl     temp=temp,16;;							\
-	mov     ar.rsc=temp;;							\
-	loadrs;;								\
-	add     p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
-	ld8     temp=[p_stackframe];;						\
-	mov     ar.bspstore=temp;;						\
-	add     p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
-	ld8     temp=[p_stackframe];;						\
-	mov     ar.rnat=temp;;							\
-	add     p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;;	\
-	ld8     temp=[p_stackframe];;						\
-	mov     ar.pfs=temp;;							\
-	add     p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;;	\
-	ld8     temp=[p_stackframe];;						\
-	mov     cr.ifs=temp;;							\
-	add     p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;;	\
-	ld8     temp=[p_stackframe];;						\
-	mov     ar.rsc=temp ;							\
-	mov     temp=psr;;							\
-	or      temp=temp,psr_mask_reg;;					\
-	mov     cr.ipsr=temp;;							\
-	mov     temp=ip;;							\
-	add     temp=0x30,temp;;						\
-	mov     cr.iip=temp;;							\
-	srlz.i;;								\
-	rfi;;
+ * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
+ * stacks, except that the SAL/OS state and a switch_stack are stored near the
+ * top of the MCA/INIT stack.  To support concurrent entry to MCA or INIT, as
+ * well as MCA over INIT, each event needs its own SAL/OS state.  All entries
+ * are 16 byte aligned.
+ *
+ *      +---------------------------+
+ *      |          pt_regs          |
+ *      +---------------------------+
+ *      |        switch_stack       |
+ *      +---------------------------+
+ *      |        SAL/OS state       |
+ *      +---------------------------+
+ *      |    16 byte scratch area   |
+ *      +---------------------------+ <-------- SP at start of C MCA handler
+ *      |           .....           |
+ *      +---------------------------+
+ *      | RBS for MCA/INIT handler  |
+ *      +---------------------------+
+ *      | struct task for MCA/INIT  |
+ *      +---------------------------+ <-------- Bottom of MCA/INIT stack
+ */
+
+#define ALIGN16(x)			((x)&~15)
+#define MCA_PT_REGS_OFFSET		ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
+#define MCA_SWITCH_STACK_OFFSET		ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
+#define MCA_SOS_OFFSET			ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
+#define MCA_SP_OFFSET			ALIGN16(MCA_SOS_OFFSET-16)
 
 #endif /* _ASM_IA64_MCA_ASM_H */
diff -puN include/asm-ia64/mca.h~git-ia64 include/asm-ia64/mca.h
--- 25/include/asm-ia64/mca.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/mca.h	2005-09-01 05:34:24.000000000 -0600
@@ -11,8 +11,6 @@
 #ifndef _ASM_IA64_MCA_H
 #define _ASM_IA64_MCA_H
 
-#define IA64_MCA_STACK_SIZE	8192
-
 #if !defined(__ASSEMBLY__)
 
 #include <linux/interrupt.h>
@@ -48,7 +46,8 @@ typedef union cmcv_reg_u {
 
 enum {
 	IA64_MCA_RENDEZ_CHECKIN_NOTDONE	=	0x0,
-	IA64_MCA_RENDEZ_CHECKIN_DONE	=	0x1
+	IA64_MCA_RENDEZ_CHECKIN_DONE	=	0x1,
+	IA64_MCA_RENDEZ_CHECKIN_INIT	=	0x2,
 };
 
 /* Information maintained by the MC infrastructure */
@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s {
 
 } ia64_mc_info_t;
 
-typedef struct ia64_mca_sal_to_os_state_s {
-	u64		imsto_os_gp;		/* GP of the os registered with the SAL */
-	u64		imsto_pal_proc;		/* PAL_PROC entry point - physical addr */
-	u64		imsto_sal_proc;		/* SAL_PROC entry point - physical addr */
-	u64		imsto_sal_gp;		/* GP of the SAL - physical */
-	u64		imsto_rendez_state;	/* Rendez state information */
-	u64		imsto_sal_check_ra;	/* Return address in SAL_CHECK while going
-						 * back to SAL from OS after MCA handling.
-						 */
-	u64		pal_min_state;		/* from PAL in r17 */
-	u64		proc_state_param;	/* from PAL in r18. See SDV 2:268 11.3.2.1 */
-} ia64_mca_sal_to_os_state_t;
+/* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
+ * Besides the handover state, it also contains some saved registers from the
+ * time of the event.
+ * Note: mca_asm.S depends on the precise layout of this structure.
+ */
+
+struct ia64_sal_os_state {
+	/* SAL to OS, must be at offset 0 */
+	u64			os_gp;			/* GP of the os registered with the SAL, physical */
+	u64			pal_proc;		/* PAL_PROC entry point, physical */
+	u64			sal_proc;		/* SAL_PROC entry point, physical */
+	u64			rv_rc;			/* MCA - Rendezvous state, INIT - reason code */
+	u64			proc_state_param;	/* from R18 */
+	u64			monarch;		/* 1 for a monarch event, 0 for a slave */
+	/* common, must follow SAL to OS */
+	u64			sal_ra;			/* Return address in SAL, physical */
+	u64			sal_gp;			/* GP of the SAL - physical */
+	pal_min_state_area_t	*pal_min_state;		/* from R17.  physical in asm, virtual in C */
+	u64			prev_IA64_KR_CURRENT;	/* previous value of IA64_KR(CURRENT) */
+	struct task_struct	*prev_task;		/* previous task, NULL if it is not useful */
+	/* Some interrupt registers are not saved in minstate, pt_regs or
+	 * switch_stack.  Because MCA/INIT can occur when interrupts are
+	 * disabled, we need to save the additional interrupt registers over
+	 * MCA/INIT and resume.
+	 */
+	u64			isr;
+	u64			ifa;
+	u64			itir;
+	u64			iipa;
+	u64			iim;
+	u64			iha;
+	/* OS to SAL, must follow common */
+	u64			os_status;		/* OS status to SAL, enum below */
+	u64			context;		/* 0 if return to same context
+							   1 if return to new context */
+};
 
 enum {
 	IA64_MCA_CORRECTED	=	0x0,	/* Error has been corrected by OS_MCA */
@@ -84,35 +107,21 @@ enum {
 };
 
 enum {
+	IA64_INIT_RESUME	=	0x0,	/* Resume after return from INIT */
+	IA64_INIT_WARM_BOOT	=	-1,	/* Warm boot of the system need from SAL */
+};
+
+enum {
 	IA64_MCA_SAME_CONTEXT	=	0x0,	/* SAL to return to same context */
 	IA64_MCA_NEW_CONTEXT	=	-1	/* SAL to return to new context */
 };
 
-typedef struct ia64_mca_os_to_sal_state_s {
-	u64		imots_os_status;	/*   OS status to SAL as to what happened
-						 *   with the MCA handling.
-						 */
-	u64		imots_sal_gp;		/* GP of the SAL - physical */
-	u64		imots_context;		/* 0 if return to same context
-						   1 if return to new context */
-	u64		*imots_new_min_state;	/* Pointer to structure containing
-						 * new values of registers in the min state
-						 * save area.
-						 */
-	u64		imots_sal_check_ra;	/* Return address in SAL_CHECK while going
-						 * back to SAL from OS after MCA handling.
-						 */
-} ia64_mca_os_to_sal_state_t;
-
 /* Per-CPU MCA state that is too big for normal per-CPU variables.  */
 
 struct ia64_mca_cpu {
-	u64 stack[IA64_MCA_STACK_SIZE/8];	/* MCA memory-stack */
-	u64 proc_state_dump[512];
-	u64 stackframe[32];
-	u64 rbstore[IA64_MCA_STACK_SIZE/8];	/* MCA reg.-backing store */
+	u64 mca_stack[KERNEL_STACK_SIZE/8];
 	u64 init_stack[KERNEL_STACK_SIZE/8];
-} __attribute__ ((aligned(16)));
+};
 
 /* Array of physical addresses of each CPU's MCA area.  */
 extern unsigned long __per_cpu_mca[NR_CPUS];
@@ -121,12 +130,29 @@ extern void ia64_mca_init(void);
 extern void ia64_mca_cpu_init(void *);
 extern void ia64_os_mca_dispatch(void);
 extern void ia64_os_mca_dispatch_end(void);
-extern void ia64_mca_ucmc_handler(void);
+extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
+extern void ia64_init_handler(struct pt_regs *,
+			      struct switch_stack *,
+			      struct ia64_sal_os_state *);
 extern void ia64_monarch_init_handler(void);
 extern void ia64_slave_init_handler(void);
 extern void ia64_mca_cmc_vector_setup(void);
-extern int  ia64_reg_MCA_extension(void*);
+extern int  ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
 extern void ia64_unreg_MCA_extension(void);
+extern u64 ia64_get_rnat(u64 *);
+
+#else	/* __ASSEMBLY__ */
+
+#define IA64_MCA_CORRECTED	0x0	/* Error has been corrected by OS_MCA */
+#define IA64_MCA_WARM_BOOT	-1	/* Warm boot of the system need from SAL */
+#define IA64_MCA_COLD_BOOT	-2	/* Cold boot of the system need from SAL */
+#define IA64_MCA_HALT		-3	/* System to be halted by SAL */
+
+#define IA64_INIT_RESUME	0x0	/* Resume after return from INIT */
+#define IA64_INIT_WARM_BOOT	-1	/* Warm boot of the system need from SAL */
+
+#define IA64_MCA_SAME_CONTEXT	0x0	/* SAL to return to same context */
+#define IA64_MCA_NEW_CONTEXT	-1	/* SAL to return to new context */
 
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASM_IA64_MCA_H */
diff -puN include/asm-ia64/mmu_context.h~git-ia64 include/asm-ia64/mmu_context.h
--- 25/include/asm-ia64/mmu_context.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/mmu_context.h	2005-09-01 05:34:24.000000000 -0600
@@ -19,6 +19,7 @@
 
 #define ia64_rid(ctx,addr)	(((ctx) << 3) | (addr >> 61))
 
+# include <asm/page.h>
 # ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
@@ -55,34 +56,46 @@ static inline void
 delayed_tlb_flush (void)
 {
 	extern void local_flush_tlb_all (void);
+	unsigned long flags;
 
 	if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
-		local_flush_tlb_all();
-		__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+		spin_lock_irqsave(&ia64_ctx.lock, flags);
+		{
+			if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
+				local_flush_tlb_all();
+				__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+			}
+		}
+		spin_unlock_irqrestore(&ia64_ctx.lock, flags);
 	}
 }
 
-static inline mm_context_t
+static inline nv_mm_context_t
 get_mmu_context (struct mm_struct *mm)
 {
 	unsigned long flags;
-	mm_context_t context = mm->context;
-
-	if (context)
-		return context;
+	nv_mm_context_t context = mm->context;
 
-	spin_lock_irqsave(&ia64_ctx.lock, flags);
-	{
-		/* re-check, now that we've got the lock: */
-		context = mm->context;
-		if (context == 0) {
-			cpus_clear(mm->cpu_vm_mask);
-			if (ia64_ctx.next >= ia64_ctx.limit)
-				wrap_mmu_context(mm);
-			mm->context = context = ia64_ctx.next++;
+	if (unlikely(!context)) {
+		spin_lock_irqsave(&ia64_ctx.lock, flags);
+		{
+			/* re-check, now that we've got the lock: */
+			context = mm->context;
+			if (context == 0) {
+				cpus_clear(mm->cpu_vm_mask);
+				if (ia64_ctx.next >= ia64_ctx.limit)
+					wrap_mmu_context(mm);
+				mm->context = context = ia64_ctx.next++;
+			}
 		}
+		spin_unlock_irqrestore(&ia64_ctx.lock, flags);
 	}
-	spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+	/*
+	 * Ensure we're not starting to use "context" before any old
+	 * uses of it are gone from our TLB.
+	 */
+	delayed_tlb_flush();
+
 	return context;
 }
 
@@ -104,13 +117,13 @@ destroy_context (struct mm_struct *mm)
 }
 
 static inline void
-reload_context (mm_context_t context)
+reload_context (nv_mm_context_t context)
 {
 	unsigned long rid;
 	unsigned long rid_incr = 0;
 	unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
 
-	old_rr4 = ia64_get_rr(0x8000000000000000UL);
+	old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
 	rid = context << 3;	/* make space for encoding the region number */
 	rid_incr = 1 << 8;
 
@@ -122,6 +135,10 @@ reload_context (mm_context_t context)
 	rr4 = rr0 + 4*rid_incr;
 #ifdef  CONFIG_HUGETLB_PAGE
 	rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
+
+#  if RGN_HPAGE != 4
+#    error "reload_context assumes RGN_HPAGE is 4"
+#  endif
 #endif
 
 	ia64_set_rr(0x0000000000000000UL, rr0);
@@ -138,7 +155,7 @@ reload_context (mm_context_t context)
 static inline void
 activate_context (struct mm_struct *mm)
 {
-	mm_context_t context;
+	nv_mm_context_t context;
 
 	do {
 		context = get_mmu_context(mm);
@@ -157,8 +174,6 @@ activate_context (struct mm_struct *mm)
 static inline void
 activate_mm (struct mm_struct *prev, struct mm_struct *next)
 {
-	delayed_tlb_flush();
-
 	/*
 	 * We may get interrupts here, but that's OK because interrupt handlers cannot
 	 * touch user-space.
diff -puN include/asm-ia64/mmu.h~git-ia64 include/asm-ia64/mmu.h
--- 25/include/asm-ia64/mmu.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/mmu.h	2005-09-01 05:34:24.000000000 -0600
@@ -2,10 +2,12 @@
 #define __MMU_H
 
 /*
- * Type for a context number.  We declare it volatile to ensure proper ordering when it's
- * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
- * init_new_context()).
+ * Type for a context number.  We declare it volatile to ensure proper
+ * ordering when it's accessed outside of spinlock'd critical sections
+ * (e.g., as done in activate_mm() and init_new_context()).
  */
 typedef volatile unsigned long mm_context_t;
 
+typedef unsigned long nv_mm_context_t;
+
 #endif
diff -puN include/asm-ia64/page.h~git-ia64 include/asm-ia64/page.h
--- 25/include/asm-ia64/page.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/page.h	2005-09-01 05:34:24.000000000 -0600
@@ -13,6 +13,19 @@
 #include <asm/types.h>
 
 /*
+ * The top three bits of an IA64 address are its Region Number.
+ * Different regions are assigned to different purposes.
+ */
+#define RGN_SHIFT	(61)
+#define RGN_BASE(r)	(__IA64_UL_CONST(r)<<RGN_SHIFT)
+#define RGN_BITS	(RGN_BASE(-1))
+
+#define RGN_KERNEL	7	/* Identity mapped region */
+#define RGN_UNCACHED    6	/* Identity mapped I/O region */
+#define RGN_GATE	5	/* Gate page, Kernel text, etc */
+#define RGN_HPAGE	4	/* For Huge TLB pages */
+
+/*
  * PAGE_SHIFT determines the actual kernel page size.
  */
 #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
@@ -36,10 +49,9 @@
 
 #define RGN_MAP_LIMIT	((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)	/* per region addr limit */
 
+
 #ifdef CONFIG_HUGETLB_PAGE
-# define REGION_HPAGE		(4UL)	/* note: this is hardcoded in reload_context()!*/
-# define REGION_SHIFT		61
-# define HPAGE_REGION_BASE	(REGION_HPAGE << REGION_SHIFT)
+# define HPAGE_REGION_BASE	RGN_BASE(RGN_HPAGE)
 # define HPAGE_SHIFT		hpage_shift
 # define HPAGE_SHIFT_DEFAULT	28	/* check ia64 SDM for architecture supported size */
 # define HPAGE_SIZE		(__IA64_UL_CONST(1) << HPAGE_SHIFT)
@@ -130,16 +142,13 @@ typedef union ia64_va {
 #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
 #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
 
-#define REGION_SIZE		REGION_NUMBER(1)
-#define REGION_KERNEL		7
-
 #ifdef CONFIG_HUGETLB_PAGE
 # define htlbpage_to_page(x)	(((unsigned long) REGION_NUMBER(x) << 61)			\
 				 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
 # define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 # define is_hugepage_only_range(mm, addr, len)		\
-	 (REGION_NUMBER(addr) == REGION_HPAGE &&	\
-	  REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
+	 (REGION_NUMBER(addr) == RGN_HPAGE &&	\
+	  REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
 extern unsigned int hpage_shift;
 #endif
 
@@ -197,7 +206,7 @@ get_order (unsigned long size)
 # define __pgprot(x)	(x)
 #endif /* !STRICT_MM_TYPECHECKS */
 
-#define PAGE_OFFSET			__IA64_UL_CONST(0xe000000000000000)
+#define PAGE_OFFSET			RGN_BASE(RGN_KERNEL)
 
 #define VM_DATA_DEFAULT_FLAGS		(VM_READ | VM_WRITE |					\
 					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |		\
diff -puN include/asm-ia64/pal.h~git-ia64 include/asm-ia64/pal.h
--- 25/include/asm-ia64/pal.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/pal.h	2005-09-01 05:34:24.000000000 -0600
@@ -75,6 +75,8 @@
 #define PAL_CACHE_READ		259	/* read tag & data of cacheline for diagnostic testing */
 #define PAL_CACHE_WRITE		260	/* write tag & data of cacheline for diagnostic testing */
 #define PAL_VM_TR_READ		261	/* read contents of translation register */
+#define PAL_GET_PSTATE		262	/* get the current P-state */
+#define PAL_SET_PSTATE		263	/* set the P-state */
 
 #ifndef __ASSEMBLY__
 
@@ -1111,6 +1113,25 @@ ia64_pal_halt_info (pal_power_mgmt_info_
 	return iprv.status;
 }
 
+/* Get the current P-state information */
+static inline s64
+ia64_pal_get_pstate (u64 *pstate_index)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_STK(iprv, PAL_GET_PSTATE, 0, 0, 0);
+	*pstate_index = iprv.v0;
+	return iprv.status;
+}
+
+/* Set the P-state */
+static inline s64
+ia64_pal_set_pstate (u64 pstate_index)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0);
+	return iprv.status;
+}
+
 /* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
  * suspended, but cache and TLB coherency is maintained.
  */
diff -puN include/asm-ia64/pgtable.h~git-ia64 include/asm-ia64/pgtable.h
--- 25/include/asm-ia64/pgtable.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/pgtable.h	2005-09-01 05:34:24.000000000 -0600
@@ -204,21 +204,18 @@ ia64_phys_addr_valid (unsigned long addr
 #define set_pte(ptep, pteval)	(*(ptep) = (pteval))
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
-#define RGN_SIZE	(1UL << 61)
-#define RGN_KERNEL	7
-
-#define VMALLOC_START		0xa000000200000000UL
+#define VMALLOC_START		(RGN_BASE(RGN_GATE) + 0x200000000UL)
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-# define VMALLOC_END_INIT	(0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
+# define VMALLOC_END_INIT	(RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
 # define VMALLOC_END		vmalloc_end
   extern unsigned long vmalloc_end;
 #else
-# define VMALLOC_END		(0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
+# define VMALLOC_END		(RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
 #endif
 
 /* fs/proc/kcore.c */
-#define	kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
-#define	kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
+#define	kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
+#define	kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
 
 /*
  * Conversion functions: convert page frame number (pfn) and a protection value to a page
diff -puN include/asm-ia64/ptrace.h~git-ia64 include/asm-ia64/ptrace.h
--- 25/include/asm-ia64/ptrace.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/ptrace.h	2005-09-01 05:34:24.000000000 -0600
@@ -119,7 +119,7 @@ struct pt_regs {
 	unsigned long ar_unat;		/* interrupted task's NaT register (preserved) */
 	unsigned long ar_pfs;		/* prev function state  */
 	unsigned long ar_rsc;		/* RSE configuration */
-	/* The following two are valid only if cr_ipsr.cpl > 0: */
+	/* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */
 	unsigned long ar_rnat;		/* RSE NaT */
 	unsigned long ar_bspstore;	/* RSE bspstore */
 
diff -puN include/asm-ia64/rwsem.h~git-ia64 include/asm-ia64/rwsem.h
--- 25/include/asm-ia64/rwsem.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/rwsem.h	2005-09-01 05:34:24.000000000 -0600
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
  * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
  *
  * Based on asm-i386/rwsem.h and other architecture implementation.
  *
@@ -11,9 +12,9 @@
  *
  * The lock count is initialized to 0 (no active and no waiting lockers).
  *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case
- * of an uncontended lock. Readers increment by 1 and see a positive value
- * when uncontended, negative if there are writers (and maybe) readers
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
+ * the case of an uncontended lock. Readers increment by 1 and see a positive
+ * value when uncontended, negative if there are writers (and maybe) readers
  * waiting (in which case it goes to sleep).
  */
 
@@ -29,7 +30,7 @@
  * the semaphore definition
  */
 struct rw_semaphore {
-	signed int		count;
+	signed long		count;
 	spinlock_t		wait_lock;
 	struct list_head	wait_list;
 #if RWSEM_DEBUG
@@ -37,10 +38,10 @@ struct rw_semaphore {
 #endif
 };
 
-#define RWSEM_UNLOCKED_VALUE		0x00000000
-#define RWSEM_ACTIVE_BIAS		0x00000001
-#define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		(-0x00010000)
+#define RWSEM_UNLOCKED_VALUE		__IA64_UL_CONST(0x0000000000000000)
+#define RWSEM_ACTIVE_BIAS		__IA64_UL_CONST(0x0000000000000001)
+#define RWSEM_ACTIVE_MASK		__IA64_UL_CONST(0x00000000ffffffff)
+#define RWSEM_WAITING_BIAS		-__IA64_UL_CONST(0x0000000100000000)
 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
@@ -83,7 +84,7 @@ init_rwsem (struct rw_semaphore *sem)
 static inline void
 __down_read (struct rw_semaphore *sem)
 {
-	int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
+	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
 
 	if (result < 0)
 		rwsem_down_read_failed(sem);
@@ -95,7 +96,7 @@ __down_read (struct rw_semaphore *sem)
 static inline void
 __down_write (struct rw_semaphore *sem)
 {
-	int old, new;
+	long old, new;
 
 	do {
 		old = sem->count;
@@ -112,7 +113,7 @@ __down_write (struct rw_semaphore *sem)
 static inline void
 __up_read (struct rw_semaphore *sem)
 {
-	int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
+	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
 
 	if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
 		rwsem_wake(sem);
@@ -124,7 +125,7 @@ __up_read (struct rw_semaphore *sem)
 static inline void
 __up_write (struct rw_semaphore *sem)
 {
-	int old, new;
+	long old, new;
 
 	do {
 		old = sem->count;
@@ -141,7 +142,7 @@ __up_write (struct rw_semaphore *sem)
 static inline int
 __down_read_trylock (struct rw_semaphore *sem)
 {
-	int tmp;
+	long tmp;
 	while ((tmp = sem->count) >= 0) {
 		if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
 			return 1;
@@ -156,7 +157,7 @@ __down_read_trylock (struct rw_semaphore
 static inline int
 __down_write_trylock (struct rw_semaphore *sem)
 {
-	int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
+	long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
 			      RWSEM_ACTIVE_WRITE_BIAS);
 	return tmp == RWSEM_UNLOCKED_VALUE;
 }
@@ -167,7 +168,7 @@ __down_write_trylock (struct rw_semaphor
 static inline void
 __downgrade_write (struct rw_semaphore *sem)
 {
-	int old, new;
+	long old, new;
 
 	do {
 		old = sem->count;
@@ -182,7 +183,7 @@ __downgrade_write (struct rw_semaphore *
  * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
  * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
  */
-#define rwsem_atomic_add(delta, sem)	atomic_add(delta, (atomic_t *)(&(sem)->count))
-#define rwsem_atomic_update(delta, sem)	atomic_add_return(delta, (atomic_t *)(&(sem)->count))
+#define rwsem_atomic_add(delta, sem)	atomic64_add(delta, (atomic64_t *)(&(sem)->count))
+#define rwsem_atomic_update(delta, sem)	atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
 
 #endif /* _ASM_IA64_RWSEM_H */
diff -puN include/asm-ia64/sn/addrs.h~git-ia64 include/asm-ia64/sn/addrs.h
--- 25/include/asm-ia64/sn/addrs.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/addrs.h	2005-09-01 05:34:24.000000000 -0600
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1992-1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef _ASM_IA64_SN_ADDRS_H
@@ -65,7 +65,6 @@
 
 #define NASID_MASK              ((u64)NASID_BITMASK << NASID_SHIFT)
 #define AS_MASK			((u64)AS_BITMASK << AS_SHIFT)
-#define REGION_BITS		0xe000000000000000UL
 
 
 /*
@@ -79,38 +78,30 @@
 #define AS_CAC_SPACE		(AS_CAC_VAL << AS_SHIFT)
 
 
-/*
- * Base addresses for various address ranges.
- */
-#define CACHED			0xe000000000000000UL
-#define UNCACHED                0xc000000000000000UL
-#define UNCACHED_PHYS           0x8000000000000000UL
-
-
 /* 
  * Virtual Mode Local & Global MMR space.  
  */
 #define SH1_LOCAL_MMR_OFFSET	0x8000000000UL
 #define SH2_LOCAL_MMR_OFFSET	0x0200000000UL
 #define LOCAL_MMR_OFFSET	(is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
-#define LOCAL_MMR_SPACE		(UNCACHED | LOCAL_MMR_OFFSET)
-#define LOCAL_PHYS_MMR_SPACE	(UNCACHED_PHYS | LOCAL_MMR_OFFSET)
+#define LOCAL_MMR_SPACE		(__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
+#define LOCAL_PHYS_MMR_SPACE	(RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
 
 #define SH1_GLOBAL_MMR_OFFSET	0x0800000000UL
 #define SH2_GLOBAL_MMR_OFFSET	0x0300000000UL
 #define GLOBAL_MMR_OFFSET	(is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
-#define GLOBAL_MMR_SPACE	(UNCACHED | GLOBAL_MMR_OFFSET)
+#define GLOBAL_MMR_SPACE	(__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
 
 /*
  * Physical mode addresses
  */
-#define GLOBAL_PHYS_MMR_SPACE	(UNCACHED_PHYS | GLOBAL_MMR_OFFSET)
+#define GLOBAL_PHYS_MMR_SPACE	(RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
 
 
 /*
  * Clear region & AS bits.
  */
-#define TO_PHYS_MASK		(~(REGION_BITS | AS_MASK))
+#define TO_PHYS_MASK		(~(RGN_BITS | AS_MASK))
 
 
 /*
@@ -126,6 +117,7 @@
 #define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
 #define GLOBAL_CAC_ADDR(n,a)	(CAC_BASE | REMOTE_ADDR(n,a))
 #define CHANGE_NASID(n,x)	((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
+#define IS_TIO_NASID(n)		((n) & 1)
 
 
 /* non-II mmr's start at top of big window space (4G) */
@@ -134,10 +126,10 @@
 /*
  * general address defines
  */
-#define CAC_BASE		(CACHED   | AS_CAC_SPACE)
-#define AMO_BASE		(UNCACHED | AS_AMO_SPACE)
-#define AMO_PHYS_BASE		(UNCACHED_PHYS | AS_AMO_SPACE)
-#define GET_BASE		(CACHED   | AS_GET_SPACE)
+#define CAC_BASE		(PAGE_OFFSET | AS_CAC_SPACE)
+#define AMO_BASE		(__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
+#define AMO_PHYS_BASE		(RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
+#define GET_BASE		(PAGE_OFFSET | AS_GET_SPACE)
 
 /*
  * Convert Memory addresses between various addressing modes.
@@ -155,17 +147,35 @@
  *           the chiplet id is zero.  If we implement TIO-TIO dma, we might need
  *           to insert a chiplet id into this macro.  However, it is our belief
  *           right now that this chiplet id will be ICE, which is also zero.
- *           Nasid starts on bit 40.
  */
-#define PHYS_TO_TIODMA(x)	( (((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
-#define PHYS_TO_DMA(x)          ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
+#define SH1_TIO_PHYS_TO_DMA(x) 						\
+	((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
+
+#define SH2_NETWORK_BANK_OFFSET(x) 					\
+        ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
+
+#define SH2_NETWORK_BANK_SELECT(x) 					\
+        ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4)))	\
+        	>> (sn_hub_info->nasid_shift - 4)) << 36)
+
+#define SH2_NETWORK_ADDRESS(x) 						\
+	(SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
+
+#define SH2_TIO_PHYS_TO_DMA(x) 						\
+        (((u64)(NASID_GET(x)) << 40) | 	SH2_NETWORK_ADDRESS(x))
+
+#define PHYS_TO_TIODMA(x)						\
+	(is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
+
+#define PHYS_TO_DMA(x)							\
+	((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
 
 
 /*
  * Macros to test for address type.
  */
-#define IS_AMO_ADDRESS(x)	(((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE)
-#define IS_AMO_PHYS_ADDRESS(x)	(((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE)
+#define IS_AMO_ADDRESS(x)	(((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
+#define IS_AMO_PHYS_ADDRESS(x)	(((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
 
 
 /*
@@ -180,18 +190,20 @@
 #define TIO_SWIN_BASE(n, w) 		(TIO_IO_BASE(n) + \
 					    ((u64) (w) << TIO_SWIN_SIZE_BITS))
 #define NODE_IO_BASE(n)			(GLOBAL_MMR_SPACE | NASID_SPACE(n))
-#define TIO_IO_BASE(n)                  (UNCACHED | NASID_SPACE(n))
+#define TIO_IO_BASE(n)                  (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
 #define BWIN_SIZE			(1UL << BWIN_SIZE_BITS)
 #define NODE_BWIN_BASE0(n)		(NODE_IO_BASE(n) + BWIN_SIZE)
 #define NODE_BWIN_BASE(n, w)		(NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
 #define RAW_NODE_SWIN_BASE(n, w)	(NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
 #define BWIN_WIDGET_MASK		0x7
 #define BWIN_WINDOWNUM(x)		(((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+#define SH1_IS_BIG_WINDOW_ADDR(x)	((x) & BWIN_TOP)
 
 #define TIO_BWIN_WINDOW_SELECT_MASK	0x7
 #define TIO_BWIN_WINDOWNUM(x)		(((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
 
-
+#define TIO_HWIN_SHIFT_BITS		33
+#define TIO_HWIN(x)			(NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
 
 /*
  * The following definitions pertain to the IO special address
@@ -216,10 +228,6 @@
 #define TIO_SWIN_WIDGETNUM(x)		(((x)  >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
 
 
-#define TIO_IOSPACE_ADDR(n,x)					\
-	/* Move in the Chiplet ID for TIO Local Block MMR */	\
-	(REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2))
-
 /*
  * The following macros produce the correct base virtual address for
  * the hub registers. The REMOTE_HUB_* macro produce
@@ -234,18 +242,40 @@
  *	Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
  *	They're always safe.
  */
+/* Shub1 TIO & MMR addressing macros */
+#define SH1_TIO_IOSPACE_ADDR(n,x)					\
+	GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_BWIN_MMR(n,x)					\
+	GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_SWIN_MMR(n,x)					\
+	(NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
+
+#define SH1_REMOTE_MMR(n,x)						\
+	(SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) :		\
+	 	SH1_REMOTE_SWIN_MMR(n,x))
+
+/* Shub1 TIO & MMR addressing macros */
+#define SH2_TIO_IOSPACE_ADDR(n,x)					\
+	((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
+
+#define SH2_REMOTE_MMR(n,x)						\
+	GLOBAL_MMR_ADDR(n,x)
+
+
+/* TIO & MMR addressing macros that work on both shub1 & shub2 */
+#define TIO_IOSPACE_ADDR(n,x)						\
+	((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) :		\
+		 SH2_TIO_IOSPACE_ADDR(n,x)))
+
+#define SH_REMOTE_MMR(n,x)						\
+	(is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
+
 #define REMOTE_HUB_ADDR(n,x)						\
-	((n & 1) ?							\
-	/* TIO: */							\
-	(is_shub2() ?							\
-	/* TIO on Shub2 */						\
-	(volatile u64 *)(TIO_IOSPACE_ADDR(n,x))				\
-	: /* TIO on shub1 */						\
-	(volatile u64 *)(GLOBAL_MMR_ADDR(n,x)))				\
-									\
-	: /* SHUB1 and SHUB2 MMRs: */					\
-	(((x) & BWIN_TOP) ? ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x)))	\
-	: ((volatile u64 *)(NODE_SWIN_BASE(n,1) + 0x800000 + (x)))))
+	(IS_TIO_NASID(n) ?  ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) :	\
+	 ((volatile u64*)SH_REMOTE_MMR(n,x)))
+
 
 #define HUB_L(x)			(*((volatile typeof(*x) *)x))
 #define	HUB_S(x,d)			(*((volatile typeof(*x) *)x) = (d))
diff -puN include/asm-ia64/sn/geo.h~git-ia64 include/asm-ia64/sn/geo.h
--- 25/include/asm-ia64/sn/geo.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/geo.h	2005-09-01 05:34:24.000000000 -0600
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef _ASM_IA64_SN_GEO_H
@@ -108,7 +108,6 @@ typedef union geoid_u {
 #define INVALID_SLAB            (slabid_t)-1
 #define INVALID_SLOT            (slotid_t)-1
 #define INVALID_MODULE          ((moduleid_t)-1)
-#define INVALID_PARTID          ((partid_t)-1)
 
 static inline slabid_t geo_slab(geoid_t g)
 {
diff -puN include/asm-ia64/sn/intr.h~git-ia64 include/asm-ia64/sn/intr.h
--- 25/include/asm-ia64/sn/intr.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/intr.h	2005-09-01 05:34:24.000000000 -0600
@@ -12,13 +12,12 @@
 #include <linux/rcupdate.h>
 
 #define SGI_UART_VECTOR		(0xe9)
-#define SGI_PCIBR_ERROR		(0x33)
 
 /* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
 #define SGI_XPC_ACTIVATE                (0x30)
 #define SGI_II_ERROR                    (0x31)
 #define SGI_XBOW_ERROR                  (0x32)
-#define SGI_PCIBR_ERROR                 (0x33)
+#define SGI_PCIASIC_ERROR               (0x33)
 #define SGI_ACPI_SCI_INT                (0x34)
 #define SGI_TIOCA_ERROR                 (0x35)
 #define SGI_TIO_ERROR                   (0x36)
diff -puN include/asm-ia64/sn/nodepda.h~git-ia64 include/asm-ia64/sn/nodepda.h
--- 25/include/asm-ia64/sn/nodepda.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/nodepda.h	2005-09-01 05:34:24.000000000 -0600
@@ -37,7 +37,6 @@ struct phys_cpuid {
 
 struct nodepda_s {
 	void 		*pdinfo;	/* Platform-dependent per-node info */
-	spinlock_t		bist_lock;
 
 	/*
 	 * The BTEs on this node are shared by the local cpus
@@ -55,6 +54,8 @@ struct nodepda_s {
 	 * Array of physical cpu identifiers. Indexed by cpuid.
 	 */
 	struct phys_cpuid	phys_cpuid[NR_CPUS];
+	spinlock_t		ptc_lock ____cacheline_aligned_in_smp;
+	spinlock_t		bist_lock;
 };
 
 typedef struct nodepda_s nodepda_t;
diff -puN include/asm-ia64/sn/pcibus_provider_defs.h~git-ia64 include/asm-ia64/sn/pcibus_provider_defs.h
--- 25/include/asm-ia64/sn/pcibus_provider_defs.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/pcibus_provider_defs.h	2005-09-01 05:34:24.000000000 -0600
@@ -18,8 +18,9 @@
 #define PCIIO_ASIC_TYPE_PIC	2
 #define PCIIO_ASIC_TYPE_TIOCP	3
 #define PCIIO_ASIC_TYPE_TIOCA	4
+#define PCIIO_ASIC_TYPE_TIOCE	5
 
-#define PCIIO_ASIC_MAX_TYPES	5
+#define PCIIO_ASIC_MAX_TYPES	6
 
 /*
  * Common pciio bus provider data.  There should be one of these as the
@@ -30,7 +31,8 @@
 struct pcibus_bussoft {
 	uint32_t		bs_asic_type;	/* chipset type */
 	uint32_t		bs_xid;		/* xwidget id */
-	uint64_t		bs_persist_busnum; /* Persistent Bus Number */
+	uint32_t		bs_persist_busnum; /* Persistent Bus Number */
+	uint32_t		bs_persist_segment; /* Segment Number */
 	uint64_t		bs_legacy_io;	/* legacy io pio addr */
 	uint64_t		bs_legacy_mem;	/* legacy mem pio addr */
 	uint64_t		bs_base;	/* widget base */
@@ -47,6 +49,8 @@ struct sn_pcibus_provider {
 	dma_addr_t	(*dma_map_consistent)(struct pci_dev *, unsigned long, size_t);
 	void		(*dma_unmap)(struct pci_dev *, dma_addr_t, int);
 	void *		(*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
+ 	void		(*force_interrupt)(struct sn_irq_info *);
+ 	void		(*target_interrupt)(struct sn_irq_info *);
 };
 
 extern struct sn_pcibus_provider *sn_pci_provider[];
diff -puN include/asm-ia64/sn/pda.h~git-ia64 include/asm-ia64/sn/pda.h
--- 25/include/asm-ia64/sn/pda.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/pda.h	2005-09-01 05:34:24.000000000 -0600
@@ -39,7 +39,6 @@ typedef struct pda_s {
 	unsigned long pio_write_status_val;
 	volatile unsigned long *pio_shub_war_cam_addr;
 
-	unsigned long	sn_soft_irr[4];
 	unsigned long	sn_in_service_ivecs[4];
 	int		sn_lb_int_war_ticks;
 	int		sn_last_irq;
diff -puN include/asm-ia64/sn/sn2/sn_hwperf.h~git-ia64 include/asm-ia64/sn/sn2/sn_hwperf.h
--- 25/include/asm-ia64/sn/sn2/sn_hwperf.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/sn2/sn_hwperf.h	2005-09-01 05:34:24.000000000 -0600
@@ -43,6 +43,7 @@ struct sn_hwperf_object_info {
 
 /* macros for object classification */
 #define SN_HWPERF_IS_NODE(x)		((x) && strstr((x)->name, "SHub"))
+#define SN_HWPERF_IS_NODE_SHUB2(x)	((x) && strstr((x)->name, "SHub 2."))
 #define SN_HWPERF_IS_IONODE(x)		((x) && strstr((x)->name, "TIO"))
 #define SN_HWPERF_IS_ROUTER(x)		((x) && strstr((x)->name, "Router"))
 #define SN_HWPERF_IS_NL3ROUTER(x)	((x) && strstr((x)->name, "NL3Router"))
@@ -214,6 +215,15 @@ struct sn_hwperf_ioctl_args {
  */
 #define SN_HWPERF_GET_NODE_NASID	(102|SN_HWPERF_OP_MEM_COPYOUT)
 
+/*
+ * Given a node id, determine the id of the nearest node with CPUs
+ * and the id of the nearest node that has memory. The argument
+ * node would normally be a "headless" node, e.g. an "IO node".
+ * Return 0 on success.
+ */
+extern int sn_hwperf_get_nearest_node(cnodeid_t node,
+	cnodeid_t *near_mem, cnodeid_t *near_cpu);
+
 /* return codes */
 #define SN_HWPERF_OP_OK			0
 #define SN_HWPERF_OP_NOMEM		1
diff -puN /dev/null include/asm-ia64/sn/sn_feature_sets.h
--- /dev/null	2004-08-10 19:55:00.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/sn_feature_sets.h	2005-09-01 05:34:24.000000000 -0600
@@ -0,0 +1,57 @@
+#ifndef _ASM_IA64_SN_FEATURE_SETS_H
+#define _ASM_IA64_SN_FEATURE_SETS_H
+
+/*
+ * SN PROM Features
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+
+#include <asm/types.h>
+#include <asm/bitops.h>
+
+/* --------------------- PROM Features -----------------------------*/
+extern int sn_prom_feature_available(int id);
+
+#define MAX_PROM_FEATURE_SETS			2
+
+/*
+ * The following defines features that may or may not be supported by the
+ * current PROM. The OS uses sn_prom_feature_available(feature) to test for
+ * the presence of a PROM feature. Down rev (old) PROMs will always test
+ * "false" for new features.
+ *
+ * Use:
+ * 		if (sn_prom_feature_available(PRF_FEATURE_XXX))
+ * 			...
+ */
+
+/*
+ * Example: feature XXX
+ */
+#define PRF_FEATURE_XXX		0
+
+
+
+/* --------------------- OS Features -------------------------------*/
+
+/*
+ * The following defines OS features that are optionally present in
+ * the operating system.
+ * During boot, PROM is notified of these features via a series of calls:
+ *
+ * 		ia64_sn_set_os_feature(feature1);
+ *
+ * Once enabled, a feature cannot be disabled.
+ *
+ * By default, features are disabled unless explicitly enabled.
+ */
+#define  OSF_MCA_SLV_TO_OS_INIT_SLV		0
+#define  OSF_FEAT_LOG_SBES			1
+
+#endif /* _ASM_IA64_SN_FEATURE_SETS_H */
diff -puN include/asm-ia64/sn/sn_sal.h~git-ia64 include/asm-ia64/sn/sn_sal.h
--- 25/include/asm-ia64/sn/sn_sal.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/sn_sal.h	2005-09-01 05:34:24.000000000 -0600
@@ -55,7 +55,6 @@
 #define  SN_SAL_BUS_CONFIG		   	   0x02000037
 #define  SN_SAL_SYS_SERIAL_GET			   0x02000038
 #define  SN_SAL_PARTITION_SERIAL_GET		   0x02000039
-#define  SN_SAL_SYSCTL_PARTITION_GET		   0x0200003a
 #define  SN_SAL_SYSTEM_POWER_DOWN		   0x0200003b
 #define  SN_SAL_GET_MASTER_BASEIO_NASID		   0x0200003c
 #define  SN_SAL_COHERENCE                          0x0200003d
@@ -78,7 +77,11 @@
 
 #define SN_SAL_HUB_ERROR_INTERRUPT		   0x02000060
 #define SN_SAL_BTE_RECOVER			   0x02000061
-#define SN_SAL_IOIF_GET_PCI_TOPOLOGY	           0x02000062
+#define SN_SAL_RESERVED_DO_NOT_USE		   0x02000062
+#define SN_SAL_IOIF_GET_PCI_TOPOLOGY		   0x02000064
+
+#define  SN_SAL_GET_PROM_FEATURE_SET		   0x02000065
+#define  SN_SAL_SET_OS_FEATURE_SET		   0x02000066
 
 /*
  * Service-specific constants
@@ -118,8 +121,8 @@
 /*
  * Error Handling Features
  */
-#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV	0x1
-#define SAL_ERR_FEAT_LOG_SBES			0x2
+#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV	0x1	// obsolete
+#define SAL_ERR_FEAT_LOG_SBES			0x2	// obsolete
 #define SAL_ERR_FEAT_MFR_OVERRIDE		0x4
 #define SAL_ERR_FEAT_SBE_THRESHOLD		0xffff0000
 
@@ -152,12 +155,6 @@ sn_sal_rev(void)
 }
 
 /*
- * Specify the minimum PROM revsion required for this kernel.
- * Note that they're stored in hex format...
- */
-#define SN_SAL_MIN_VERSION	0x0404
-
-/*
  * Returns the master console nasid, if the call fails, return an illegal
  * value.
  */
@@ -336,7 +333,7 @@ ia64_sn_plat_cpei_handler(void)
 }
 
 /*
- * Set Error Handling Features
+ * Set Error Handling Features	(Obsolete)
  */
 static inline u64
 ia64_sn_plat_set_error_handling_features(void)
@@ -586,35 +583,6 @@ sn_partition_serial_number_val(void) {
 }
 
 /*
- * Returns the partition id of the nasid passed in as an argument,
- * or INVALID_PARTID if the partition id cannot be retrieved.
- */
-static inline partid_t
-ia64_sn_sysctl_partition_get(nasid_t nasid)
-{
-	struct ia64_sal_retval ret_stuff;
-	ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
-				0, 0, 0, 0, 0, 0);
-	if (ret_stuff.status != 0)
-	    return INVALID_PARTID;
-	return ((partid_t)ret_stuff.v0);
-}
-
-/*
- * Returns the partition id of the current processor.
- */
-
-extern partid_t sn_partid;
-
-static inline partid_t
-sn_local_partid(void) {
-	if (unlikely(sn_partid < 0)) {
-		sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()));
-	}
-	return sn_partid;
-}
-
-/*
  * Returns the physical address of the partition's reserved page through
  * an iterative number of calls.
  *
@@ -749,7 +717,8 @@ ia64_sn_power_down(void)
 {
 	struct ia64_sal_retval ret_stuff;
 	SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
-	while(1);
+	while(1)
+		cpu_relax();
 	/* never returns */
 }
 
@@ -1018,24 +987,6 @@ ia64_sn_get_sn_info(int fc, u8 *shubtype
 	ret_stuff.v2 = 0;
 	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
 
-/***** BEGIN HACK - temp til old proms no longer supported ********/
-	if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
-		int nasid = get_sapicid() & 0xfff;;
-#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL                                               
-#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48                                                               
-		if (shubtype) *shubtype = 0;
-		if (nasid_bitmask) *nasid_bitmask = 0x7ff;
-		if (nasid_shift) *nasid_shift = 38;
-		if (systemsize) *systemsize = 11;
-		if (sharing_domain_size) *sharing_domain_size = 9;
-		if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
-		if (coher) *coher = nasid >> 9;
-		if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >>
-			SH_SHUB_ID_NODES_PER_BIT_SHFT;
-		return 0;
-	}
-/***** END HACK *******/
-
 	if (ret_stuff.status < 0)
 		return ret_stuff.status;
 
@@ -1068,12 +1019,10 @@ ia64_sn_hwperf_op(nasid_t nasid, u64 opc
 }
 
 static inline int
-ia64_sn_ioif_get_pci_topology(u64 rack, u64 bay, u64 slot, u64 slab,
-			      u64 buf, u64 len)
+ia64_sn_ioif_get_pci_topology(u64 buf, u64 len)
 {
 	struct ia64_sal_retval rv;
-	SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY,
-		rack, bay, slot, slab, buf, len, 0);
+	SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 0);
 	return (int) rv.status;
 }
 
@@ -1100,4 +1049,25 @@ ia64_sn_is_fake_prom(void)
 	return (rv.status == 0);
 }
 
+static inline int
+ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set)
+{
+	struct ia64_sal_retval rv;
+
+	SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0);
+	if (rv.status != 0)
+		return rv.status;
+	*feature_set = rv.v0;
+	return 0;
+}
+
+static inline int
+ia64_sn_set_os_feature(int feature)
+{
+	struct ia64_sal_retval rv;
+
+	SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0);
+	return rv.status;
+}
+
 #endif /* _ASM_IA64_SN_SN_SAL_H */
diff -puN /dev/null include/asm-ia64/sn/tioce.h
--- /dev/null	2004-08-10 19:55:00.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/tioce.h	2005-09-01 05:34:24.000000000 -0600
@@ -0,0 +1,740 @@
+/**************************************************************************
+ *                                                                        *
+ *  Unpublished copyright (c) 2005, Silicon Graphics, Inc.                *
+ *  THIS IS UNPUBLISHED CONFIDENTIAL AND PROPRIETARY SOURCE CODE OF SGI.  *
+ *                                                                        *
+ *  The copyright notice above does  not evidence any actual or intended  *
+ *  publication  or  disclosure  of  this source  code,  which  includes  *
+ *  information that is confidential  and/or proprietary, and is a trade  *
+ *  secret, of  Silicon Graphics, Inc.   ANY REPRODUCTION, MODIFICATION,  *
+ *  DISTRIBUTION, PUBLIC  PERFORMANCE, OR  PUBLIC DISPLAY OF  OR THROUGH  *
+ *  USE  OF THIS  SOURCE CODE  WITHOUT  THE EXPRESS  WRITTEN CONSENT  OF  *
+ *  SILICON GRAPHICS, INC.  IS  STRICTLY PROHIBITED, AND IN VIOLATION OF  *
+ *  APPLICABLE  LAWS   AND  INTERNATIONAL  TREATIES.    THE  RECEIPT  OR  *
+ *  POSSESSION OF  THIS SOURCE CODE AND/OR RELATED  INFORMATION DOES NOT  *
+ *  CONVEY OR IMPLY ANY RIGHTS  TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS  *
+ *  CONTENTS,  OR TO  MANUFACTURE, USE,  OR  SELL ANYTHING  THAT IT  MAY  *
+ *  DESCRIBE, IN WHOLE OR IN PART.                                        *
+ *                                                                        *
+ **************************************************************************/
+
+#ifndef __ASM_IA64_SN_TIOCE_H__
+#define __ASM_IA64_SN_TIOCE_H__
+
+/* CE ASIC part & mfgr information  */
+#define TIOCE_PART_NUM			0xCE00
+#define TIOCE_MFGR_NUM			0x36
+#define TIOCE_REV_A			0x1
+
+/* CE Virtual PPB Vendor/Device IDs */
+#define CE_VIRT_PPB_VENDOR_ID		0x10a9
+#define CE_VIRT_PPB_DEVICE_ID		0x4002
+
+/* CE Host Bridge Vendor/Device IDs */
+#define CE_HOST_BRIDGE_VENDOR_ID	0x10a9
+#define CE_HOST_BRIDGE_DEVICE_ID	0x4003
+
+
+#define TIOCE_NUM_M40_ATES		4096
+#define TIOCE_NUM_M3240_ATES		2048
+#define TIOCE_NUM_PORTS			2
+
+/*
+ * Register layout for TIOCE.  MMR offsets are shown at the far right of the
+ * structure definition.
+ */
+typedef volatile struct tioce {
+	/*
+	 * ADMIN : Administration Registers
+	 */
+	uint64_t	ce_adm_id;				/* 0x000000 */
+	uint64_t	ce_pad_000008;				/* 0x000008 */
+	uint64_t	ce_adm_dyn_credit_status;		/* 0x000010 */
+	uint64_t	ce_adm_last_credit_status;		/* 0x000018 */
+	uint64_t	ce_adm_credit_limit;			/* 0x000020 */
+	uint64_t	ce_adm_force_credit;			/* 0x000028 */
+	uint64_t	ce_adm_control;				/* 0x000030 */
+	uint64_t	ce_adm_mmr_chn_timeout;			/* 0x000038 */
+	uint64_t	ce_adm_ssp_ure_timeout;			/* 0x000040 */
+	uint64_t	ce_adm_ssp_dre_timeout;			/* 0x000048 */
+	uint64_t	ce_adm_ssp_debug_sel;			/* 0x000050 */
+	uint64_t	ce_adm_int_status;			/* 0x000058 */
+	uint64_t	ce_adm_int_status_alias;		/* 0x000060 */
+	uint64_t	ce_adm_int_mask;			/* 0x000068 */
+	uint64_t	ce_adm_int_pending;			/* 0x000070 */
+	uint64_t	ce_adm_force_int;			/* 0x000078 */
+	uint64_t	ce_adm_ure_ups_buf_barrier_flush;	/* 0x000080 */
+	uint64_t	ce_adm_int_dest[15];	    /* 0x000088 -- 0x0000F8 */
+	uint64_t	ce_adm_error_summary;			/* 0x000100 */
+	uint64_t	ce_adm_error_summary_alias;		/* 0x000108 */
+	uint64_t	ce_adm_error_mask;			/* 0x000110 */
+	uint64_t	ce_adm_first_error;			/* 0x000118 */
+	uint64_t	ce_adm_error_overflow;			/* 0x000120 */
+	uint64_t	ce_adm_error_overflow_alias;		/* 0x000128 */
+	uint64_t	ce_pad_000130[2];	    /* 0x000130 -- 0x000138 */
+	uint64_t	ce_adm_tnum_error;			/* 0x000140 */
+	uint64_t	ce_adm_mmr_err_detail;			/* 0x000148 */
+	uint64_t	ce_adm_msg_sram_perr_detail;		/* 0x000150 */
+	uint64_t	ce_adm_bap_sram_perr_detail;		/* 0x000158 */
+	uint64_t	ce_adm_ce_sram_perr_detail;		/* 0x000160 */
+	uint64_t	ce_adm_ce_credit_oflow_detail;		/* 0x000168 */
+	uint64_t	ce_adm_tx_link_idle_max_timer;		/* 0x000170 */
+	uint64_t	ce_adm_pcie_debug_sel;			/* 0x000178 */
+	uint64_t	ce_pad_000180[16];	    /* 0x000180 -- 0x0001F8 */
+
+	uint64_t	ce_adm_pcie_debug_sel_top;		/* 0x000200 */
+	uint64_t	ce_adm_pcie_debug_lat_sel_lo_top;	/* 0x000208 */
+	uint64_t	ce_adm_pcie_debug_lat_sel_hi_top;	/* 0x000210 */
+	uint64_t	ce_adm_pcie_debug_trig_sel_top;		/* 0x000218 */
+	uint64_t	ce_adm_pcie_debug_trig_lat_sel_lo_top;	/* 0x000220 */
+	uint64_t	ce_adm_pcie_debug_trig_lat_sel_hi_top;	/* 0x000228 */
+	uint64_t	ce_adm_pcie_trig_compare_top;		/* 0x000230 */
+	uint64_t	ce_adm_pcie_trig_compare_en_top;	/* 0x000238 */
+	uint64_t	ce_adm_ssp_debug_sel_top;		/* 0x000240 */
+	uint64_t	ce_adm_ssp_debug_lat_sel_lo_top;	/* 0x000248 */
+	uint64_t	ce_adm_ssp_debug_lat_sel_hi_top;	/* 0x000250 */
+	uint64_t	ce_adm_ssp_debug_trig_sel_top;		/* 0x000258 */
+	uint64_t	ce_adm_ssp_debug_trig_lat_sel_lo_top;	/* 0x000260 */
+	uint64_t	ce_adm_ssp_debug_trig_lat_sel_hi_top;	/* 0x000268 */
+	uint64_t	ce_adm_ssp_trig_compare_top;		/* 0x000270 */
+	uint64_t	ce_adm_ssp_trig_compare_en_top;		/* 0x000278 */
+	uint64_t	ce_pad_000280[48];	    /* 0x000280 -- 0x0003F8 */
+
+	uint64_t	ce_adm_bap_ctrl;			/* 0x000400 */
+	uint64_t	ce_pad_000408[127];	    /* 0x000408 -- 0x0007F8 */
+
+	uint64_t	ce_msg_buf_data63_0[35];    /* 0x000800 -- 0x000918 */
+	uint64_t	ce_pad_000920[29];	    /* 0x000920 -- 0x0009F8 */
+
+	uint64_t	ce_msg_buf_data127_64[35];  /* 0x000A00 -- 0x000B18 */
+	uint64_t	ce_pad_000B20[29];	    /* 0x000B20 -- 0x000BF8 */
+
+	uint64_t	ce_msg_buf_parity[35];	    /* 0x000C00 -- 0x000D18 */
+	uint64_t	ce_pad_000D20[29];	    /* 0x000D20 -- 0x000DF8 */
+
+	uint64_t	ce_pad_000E00[576];	    /* 0x000E00 -- 0x001FF8 */
+
+	/*
+	 * LSI : LSI's PCI Express Link Registers (Link#1 and Link#2)
+	 * Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000
+	 * NOTE: the comment offsets at far right: let 'z' = {2 or 3}
+	 */
+	#define ce_lsi(link_num)	ce_lsi[link_num-1]
+	struct ce_lsi_reg {
+		uint64_t	ce_lsi_lpu_id;			/* 0x00z000 */
+		uint64_t	ce_lsi_rst;			/* 0x00z008 */
+		uint64_t	ce_lsi_dbg_stat;		/* 0x00z010 */
+		uint64_t	ce_lsi_dbg_cfg;			/* 0x00z018 */
+		uint64_t	ce_lsi_ltssm_ctrl;		/* 0x00z020 */
+		uint64_t	ce_lsi_lk_stat;			/* 0x00z028 */
+		uint64_t	ce_pad_00z030[2];   /* 0x00z030 -- 0x00z038 */
+		uint64_t	ce_lsi_int_and_stat;		/* 0x00z040 */
+		uint64_t	ce_lsi_int_mask;		/* 0x00z048 */
+		uint64_t	ce_pad_00z050[22];  /* 0x00z050 -- 0x00z0F8 */
+		uint64_t	ce_lsi_lk_perf_cnt_sel;		/* 0x00z100 */
+		uint64_t	ce_pad_00z108;			/* 0x00z108 */
+		uint64_t	ce_lsi_lk_perf_cnt_ctrl;	/* 0x00z110 */
+		uint64_t	ce_pad_00z118;			/* 0x00z118 */
+		uint64_t	ce_lsi_lk_perf_cnt1;		/* 0x00z120 */
+		uint64_t	ce_lsi_lk_perf_cnt1_test;	/* 0x00z128 */
+		uint64_t	ce_lsi_lk_perf_cnt2;		/* 0x00z130 */
+		uint64_t	ce_lsi_lk_perf_cnt2_test;	/* 0x00z138 */
+		uint64_t	ce_pad_00z140[24];  /* 0x00z140 -- 0x00z1F8 */
+		uint64_t	ce_lsi_lk_lyr_cfg;		/* 0x00z200 */
+		uint64_t	ce_lsi_lk_lyr_status;		/* 0x00z208 */
+		uint64_t	ce_lsi_lk_lyr_int_stat;		/* 0x00z210 */
+		uint64_t	ce_lsi_lk_ly_int_stat_test;	/* 0x00z218 */
+		uint64_t	ce_lsi_lk_ly_int_stat_mask;	/* 0x00z220 */
+		uint64_t	ce_pad_00z228[3];   /* 0x00z228 -- 0x00z238 */
+		uint64_t	ce_lsi_fc_upd_ctl;		/* 0x00z240 */
+		uint64_t	ce_pad_00z248[3];   /* 0x00z248 -- 0x00z258 */
+		uint64_t	ce_lsi_flw_ctl_upd_to_timer;	/* 0x00z260 */
+		uint64_t	ce_lsi_flw_ctl_upd_timer0;	/* 0x00z268 */
+		uint64_t	ce_lsi_flw_ctl_upd_timer1;	/* 0x00z270 */
+		uint64_t	ce_pad_00z278[49];  /* 0x00z278 -- 0x00z3F8 */
+		uint64_t	ce_lsi_freq_nak_lat_thrsh;	/* 0x00z400 */
+		uint64_t	ce_lsi_ack_nak_lat_tmr;		/* 0x00z408 */
+		uint64_t	ce_lsi_rply_tmr_thr;		/* 0x00z410 */
+		uint64_t	ce_lsi_rply_tmr;		/* 0x00z418 */
+		uint64_t	ce_lsi_rply_num_stat;		/* 0x00z420 */
+		uint64_t	ce_lsi_rty_buf_max_addr;	/* 0x00z428 */
+		uint64_t	ce_lsi_rty_fifo_ptr;		/* 0x00z430 */
+		uint64_t	ce_lsi_rty_fifo_rd_wr_ptr;	/* 0x00z438 */
+		uint64_t	ce_lsi_rty_fifo_cred;		/* 0x00z440 */
+		uint64_t	ce_lsi_seq_cnt;			/* 0x00z448 */
+		uint64_t	ce_lsi_ack_sent_seq_num;	/* 0x00z450 */
+		uint64_t	ce_lsi_seq_cnt_fifo_max_addr;	/* 0x00z458 */
+		uint64_t	ce_lsi_seq_cnt_fifo_ptr;	/* 0x00z460 */
+		uint64_t	ce_lsi_seq_cnt_rd_wr_ptr;	/* 0x00z468 */
+		uint64_t	ce_lsi_tx_lk_ts_ctl;		/* 0x00z470 */
+		uint64_t	ce_pad_00z478;			/* 0x00z478 */
+		uint64_t	ce_lsi_mem_addr_ctl;		/* 0x00z480 */
+		uint64_t	ce_lsi_mem_d_ld0;		/* 0x00z488 */
+		uint64_t	ce_lsi_mem_d_ld1;		/* 0x00z490 */
+		uint64_t	ce_lsi_mem_d_ld2;		/* 0x00z498 */
+		uint64_t	ce_lsi_mem_d_ld3;		/* 0x00z4A0 */
+		uint64_t	ce_lsi_mem_d_ld4;		/* 0x00z4A8 */
+		uint64_t	ce_pad_00z4B0[2];   /* 0x00z4B0 -- 0x00z4B8 */
+		uint64_t	ce_lsi_rty_d_cnt;		/* 0x00z4C0 */
+		uint64_t	ce_lsi_seq_buf_cnt;		/* 0x00z4C8 */
+		uint64_t	ce_lsi_seq_buf_bt_d;		/* 0x00z4D0 */
+		uint64_t	ce_pad_00z4D8;			/* 0x00z4D8 */
+		uint64_t	ce_lsi_ack_lat_thr;		/* 0x00z4E0 */
+		uint64_t	ce_pad_00z4E8[3];   /* 0x00z4E8 -- 0x00z4F8 */
+		uint64_t	ce_lsi_nxt_rcv_seq_1_cntr;	/* 0x00z500 */
+		uint64_t	ce_lsi_unsp_dllp_rcvd;		/* 0x00z508 */
+		uint64_t	ce_lsi_rcv_lk_ts_ctl;		/* 0x00z510 */
+		uint64_t	ce_pad_00z518[29];  /* 0x00z518 -- 0x00z5F8 */
+		uint64_t	ce_lsi_phy_lyr_cfg;		/* 0x00z600 */
+		uint64_t	ce_pad_00z608;			/* 0x00z608 */
+		uint64_t	ce_lsi_phy_lyr_int_stat;	/* 0x00z610 */
+		uint64_t	ce_lsi_phy_lyr_int_stat_test;	/* 0x00z618 */
+		uint64_t	ce_lsi_phy_lyr_int_mask;	/* 0x00z620 */
+		uint64_t	ce_pad_00z628[11];  /* 0x00z628 -- 0x00z678 */
+		uint64_t	ce_lsi_rcv_phy_cfg;		/* 0x00z680 */
+		uint64_t	ce_lsi_rcv_phy_stat1;		/* 0x00z688 */
+		uint64_t	ce_lsi_rcv_phy_stat2;		/* 0x00z690 */
+		uint64_t	ce_lsi_rcv_phy_stat3;		/* 0x00z698 */
+		uint64_t	ce_lsi_rcv_phy_int_stat;	/* 0x00z6A0 */
+		uint64_t	ce_lsi_rcv_phy_int_stat_test;	/* 0x00z6A8 */
+		uint64_t	ce_lsi_rcv_phy_int_mask;	/* 0x00z6B0 */
+		uint64_t	ce_pad_00z6B8[9];   /* 0x00z6B8 -- 0x00z6F8 */
+		uint64_t	ce_lsi_tx_phy_cfg;		/* 0x00z700 */
+		uint64_t	ce_lsi_tx_phy_stat;		/* 0x00z708 */
+		uint64_t	ce_lsi_tx_phy_int_stat;		/* 0x00z710 */
+		uint64_t	ce_lsi_tx_phy_int_stat_test;	/* 0x00z718 */
+		uint64_t	ce_lsi_tx_phy_int_mask;		/* 0x00z720 */
+		uint64_t	ce_lsi_tx_phy_stat2;		/* 0x00z728 */
+		uint64_t	ce_pad_00z730[10];  /* 0x00z730 -- 0x00z77F */
+		uint64_t	ce_lsi_ltssm_cfg1;		/* 0x00z780 */
+		uint64_t	ce_lsi_ltssm_cfg2;		/* 0x00z788 */
+		uint64_t	ce_lsi_ltssm_cfg3;		/* 0x00z790 */
+		uint64_t	ce_lsi_ltssm_cfg4;		/* 0x00z798 */
+		uint64_t	ce_lsi_ltssm_cfg5;		/* 0x00z7A0 */
+		uint64_t	ce_lsi_ltssm_stat1;		/* 0x00z7A8 */
+		uint64_t	ce_lsi_ltssm_stat2;		/* 0x00z7B0 */
+		uint64_t	ce_lsi_ltssm_int_stat;		/* 0x00z7B8 */
+		uint64_t	ce_lsi_ltssm_int_stat_test;	/* 0x00z7C0 */
+		uint64_t	ce_lsi_ltssm_int_mask;		/* 0x00z7C8 */
+		uint64_t	ce_lsi_ltssm_stat_wr_en;	/* 0x00z7D0 */
+		uint64_t	ce_pad_00z7D8[5];   /* 0x00z7D8 -- 0x00z7F8 */
+		uint64_t	ce_lsi_gb_cfg1;			/* 0x00z800 */
+		uint64_t	ce_lsi_gb_cfg2;			/* 0x00z808 */
+		uint64_t	ce_lsi_gb_cfg3;			/* 0x00z810 */
+		uint64_t	ce_lsi_gb_cfg4;			/* 0x00z818 */
+		uint64_t	ce_lsi_gb_stat;			/* 0x00z820 */
+		uint64_t	ce_lsi_gb_int_stat;		/* 0x00z828 */
+		uint64_t	ce_lsi_gb_int_stat_test;	/* 0x00z830 */
+		uint64_t	ce_lsi_gb_int_mask;		/* 0x00z838 */
+		uint64_t	ce_lsi_gb_pwr_dn1;		/* 0x00z840 */
+		uint64_t	ce_lsi_gb_pwr_dn2;		/* 0x00z848 */
+		uint64_t	ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */
+	} ce_lsi[2];
+
+	uint64_t	ce_pad_004000[10];	    /* 0x004000 -- 0x004048 */
+
+	/*
+	 * CRM: Coretalk Receive Module Registers
+	 */
+	uint64_t	ce_crm_debug_mux;			/* 0x004050 */
+	uint64_t	ce_pad_004058;				/* 0x004058 */
+	uint64_t	ce_crm_ssp_err_cmd_wrd;			/* 0x004060 */
+	uint64_t	ce_crm_ssp_err_addr;			/* 0x004068 */
+	uint64_t	ce_crm_ssp_err_syn;			/* 0x004070 */
+
+	uint64_t	ce_pad_004078[499];	    /* 0x004078 -- 0x005008 */
+
+	/*
+         * CXM: Coretalk Xmit Module Registers
+         */
+	uint64_t	ce_cxm_dyn_credit_status;		/* 0x005010 */
+	uint64_t	ce_cxm_last_credit_status;		/* 0x005018 */
+	uint64_t	ce_cxm_credit_limit;			/* 0x005020 */
+	uint64_t	ce_cxm_force_credit;			/* 0x005028 */
+	uint64_t	ce_cxm_disable_bypass;			/* 0x005030 */
+	uint64_t	ce_pad_005038[3];	    /* 0x005038 -- 0x005048 */
+	uint64_t	ce_cxm_debug_mux;			/* 0x005050 */
+
+        uint64_t        ce_pad_005058[501];         /* 0x005058 -- 0x005FF8 */
+
+	/*
+	 * DTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
+	 * DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000
+	 * DTL: the comment offsets at far right: let 'y' = {6 or 8}
+	 *
+	 * UTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
+	 * UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000
+	 * UTL: the comment offsets at far right: let 'z' = {7 or 9}
+	 */
+	#define ce_dtl(link_num)	ce_dtl_utl[link_num-1]
+	#define ce_utl(link_num)	ce_dtl_utl[link_num-1]
+	struct ce_dtl_utl_reg {
+		/* DTL */
+		uint64_t	ce_dtl_dtdr_credit_limit;	/* 0x00y000 */
+		uint64_t	ce_dtl_dtdr_credit_force;	/* 0x00y008 */
+		uint64_t	ce_dtl_dyn_credit_status;	/* 0x00y010 */
+		uint64_t	ce_dtl_dtl_last_credit_stat;	/* 0x00y018 */
+		uint64_t	ce_dtl_dtl_ctrl;		/* 0x00y020 */
+		uint64_t	ce_pad_00y028[5];   /* 0x00y028 -- 0x00y048 */
+		uint64_t	ce_dtl_debug_sel;		/* 0x00y050 */
+		uint64_t	ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */
+
+		/* UTL */
+		uint64_t	ce_utl_utl_ctrl;		/* 0x00z000 */
+		uint64_t	ce_utl_debug_sel;		/* 0x00z008 */
+		uint64_t	ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */
+	} ce_dtl_utl[2];
+
+	uint64_t	ce_pad_00A000[514];	    /* 0x00A000 -- 0x00B008 */
+
+	/*
+	 * URE: Upstream Request Engine
+         */
+	uint64_t	ce_ure_dyn_credit_status;		/* 0x00B010 */
+	uint64_t	ce_ure_last_credit_status;		/* 0x00B018 */
+	uint64_t	ce_ure_credit_limit;			/* 0x00B020 */
+	uint64_t	ce_pad_00B028;				/* 0x00B028 */
+	uint64_t	ce_ure_control;				/* 0x00B030 */
+	uint64_t	ce_ure_status;				/* 0x00B038 */
+	uint64_t	ce_pad_00B040[2];	    /* 0x00B040 -- 0x00B048 */
+	uint64_t	ce_ure_debug_sel;			/* 0x00B050 */
+	uint64_t	ce_ure_pcie_debug_sel;			/* 0x00B058 */
+	uint64_t	ce_ure_ssp_err_cmd_wrd;			/* 0x00B060 */
+	uint64_t	ce_ure_ssp_err_addr;			/* 0x00B068 */
+	uint64_t	ce_ure_page_map;			/* 0x00B070 */
+	uint64_t	ce_ure_dir_map[TIOCE_NUM_PORTS];	/* 0x00B078 */
+	uint64_t	ce_ure_pipe_sel1;			/* 0x00B088 */
+	uint64_t	ce_ure_pipe_mask1;			/* 0x00B090 */
+	uint64_t	ce_ure_pipe_sel2;			/* 0x00B098 */
+	uint64_t	ce_ure_pipe_mask2;			/* 0x00B0A0 */
+	uint64_t	ce_ure_pcie1_credits_sent;		/* 0x00B0A8 */
+	uint64_t	ce_ure_pcie1_credits_used;		/* 0x00B0B0 */
+	uint64_t	ce_ure_pcie1_credit_limit;		/* 0x00B0B8 */
+	uint64_t	ce_ure_pcie2_credits_sent;		/* 0x00B0C0 */
+	uint64_t	ce_ure_pcie2_credits_used;		/* 0x00B0C8 */
+	uint64_t	ce_ure_pcie2_credit_limit;		/* 0x00B0D0 */
+	uint64_t	ce_ure_pcie_force_credit;		/* 0x00B0D8 */
+	uint64_t	ce_ure_rd_tnum_val;			/* 0x00B0E0 */
+	uint64_t	ce_ure_rd_tnum_rsp_rcvd;		/* 0x00B0E8 */
+	uint64_t	ce_ure_rd_tnum_esent_timer;		/* 0x00B0F0 */
+	uint64_t	ce_ure_rd_tnum_error;			/* 0x00B0F8 */
+	uint64_t	ce_ure_rd_tnum_first_cl;		/* 0x00B100 */
+	uint64_t	ce_ure_rd_tnum_link_buf;		/* 0x00B108 */
+	uint64_t	ce_ure_wr_tnum_val;			/* 0x00B110 */
+	uint64_t	ce_ure_sram_err_addr0;			/* 0x00B118 */
+	uint64_t	ce_ure_sram_err_addr1;			/* 0x00B120 */
+	uint64_t	ce_ure_sram_err_addr2;			/* 0x00B128 */
+	uint64_t	ce_ure_sram_rd_addr0;			/* 0x00B130 */
+	uint64_t	ce_ure_sram_rd_addr1;			/* 0x00B138 */
+	uint64_t	ce_ure_sram_rd_addr2;			/* 0x00B140 */
+	uint64_t	ce_ure_sram_wr_addr0;			/* 0x00B148 */
+	uint64_t	ce_ure_sram_wr_addr1;			/* 0x00B150 */
+	uint64_t	ce_ure_sram_wr_addr2;			/* 0x00B158 */
+	uint64_t	ce_ure_buf_flush10;			/* 0x00B160 */
+	uint64_t	ce_ure_buf_flush11;			/* 0x00B168 */
+	uint64_t	ce_ure_buf_flush12;			/* 0x00B170 */
+	uint64_t	ce_ure_buf_flush13;			/* 0x00B178 */
+	uint64_t	ce_ure_buf_flush20;			/* 0x00B180 */
+	uint64_t	ce_ure_buf_flush21;			/* 0x00B188 */
+	uint64_t	ce_ure_buf_flush22;			/* 0x00B190 */
+	uint64_t	ce_ure_buf_flush23;			/* 0x00B198 */
+	uint64_t	ce_ure_pcie_control1;			/* 0x00B1A0 */
+	uint64_t	ce_ure_pcie_control2;			/* 0x00B1A8 */
+
+	uint64_t	ce_pad_00B1B0[458];	    /* 0x00B1B0 -- 0x00BFF8 */
+
+	/* Upstream Data Buffer, Port1 */
+	struct ce_ure_maint_ups_dat1_data {
+		uint64_t	data63_0[512];	    /* 0x00C000 -- 0x00CFF8 */
+		uint64_t	data127_64[512];    /* 0x00D000 -- 0x00DFF8 */
+		uint64_t	parity[512];	    /* 0x00E000 -- 0x00EFF8 */
+	} ce_ure_maint_ups_dat1;
+
+	/* Upstream Header Buffer, Port1 */
+	struct ce_ure_maint_ups_hdr1_data {
+		uint64_t	data63_0[512];	    /* 0x00F000 -- 0x00FFF8 */
+		uint64_t	data127_64[512];    /* 0x010000 -- 0x010FF8 */
+		uint64_t	parity[512];	    /* 0x011000 -- 0x011FF8 */
+	} ce_ure_maint_ups_hdr1;
+
+	/* Upstream Data Buffer, Port2 */
+	struct ce_ure_maint_ups_dat2_data {
+		uint64_t	data63_0[512];	    /* 0x012000 -- 0x012FF8 */
+		uint64_t	data127_64[512];    /* 0x013000 -- 0x013FF8 */
+		uint64_t	parity[512];	    /* 0x014000 -- 0x014FF8 */
+	} ce_ure_maint_ups_dat2;
+
+	/* Upstream Header Buffer, Port2 */
+	struct ce_ure_maint_ups_hdr2_data {
+		uint64_t	data63_0[512];	    /* 0x015000 -- 0x015FF8 */
+		uint64_t	data127_64[512];    /* 0x016000 -- 0x016FF8 */
+		uint64_t	parity[512];	    /* 0x017000 -- 0x017FF8 */
+	} ce_ure_maint_ups_hdr2;
+
+	/* Downstream Data Buffer */
+	struct ce_ure_maint_dns_dat_data {
+		uint64_t	data63_0[512];	    /* 0x018000 -- 0x018FF8 */
+		uint64_t	data127_64[512];    /* 0x019000 -- 0x019FF8 */
+		uint64_t	parity[512];	    /* 0x01A000 -- 0x01AFF8 */
+	} ce_ure_maint_dns_dat;
+
+	/* Downstream Header Buffer */
+	struct	ce_ure_maint_dns_hdr_data {
+		uint64_t	data31_0[64];	    /* 0x01B000 -- 0x01B1F8 */
+		uint64_t	data95_32[64];	    /* 0x01B200 -- 0x01B3F8 */
+		uint64_t	parity[64];	    /* 0x01B400 -- 0x01B5F8 */
+	} ce_ure_maint_dns_hdr;
+
+	/* RCI Buffer Data */
+	struct	ce_ure_maint_rci_data {
+		uint64_t	data41_0[64];	    /* 0x01B600 -- 0x01B7F8 */
+		uint64_t	data69_42[64];	    /* 0x01B800 -- 0x01B9F8 */
+	} ce_ure_maint_rci;
+
+	/* Response Queue */
+	uint64_t	ce_ure_maint_rspq[64];	    /* 0x01BA00 -- 0x01BBF8 */
+
+	uint64_t	ce_pad_01C000[4224];	    /* 0x01BC00 -- 0x023FF8 */
+
+	/* Admin Build-a-Packet Buffer */
+	struct	ce_adm_maint_bap_buf_data {
+		uint64_t	data63_0[258];	    /* 0x024000 -- 0x024808 */
+		uint64_t	data127_64[258];    /* 0x024810 -- 0x025018 */
+		uint64_t	parity[258];	    /* 0x025020 -- 0x025828 */
+	} ce_adm_maint_bap_buf;
+
+	uint64_t	ce_pad_025830[5370];	    /* 0x025830 -- 0x02FFF8 */
+
+	/* URE: 40bit PMU ATE Buffer */		    /* 0x030000 -- 0x037FF8 */
+	uint64_t	ce_ure_ate40[TIOCE_NUM_M40_ATES];
+
+	/* URE: 32/40bit PMU ATE Buffer */	    /* 0x038000 -- 0x03BFF8 */
+	uint64_t	ce_ure_ate3240[TIOCE_NUM_M3240_ATES];
+
+	uint64_t	ce_pad_03C000[2050];	    /* 0x03C000 -- 0x040008 */
+
+	/*
+	 * DRE: Down Stream Request Engine
+         */
+	uint64_t	ce_dre_dyn_credit_status1;		/* 0x040010 */
+	uint64_t	ce_dre_dyn_credit_status2;		/* 0x040018 */
+	uint64_t	ce_dre_last_credit_status1;		/* 0x040020 */
+	uint64_t	ce_dre_last_credit_status2;		/* 0x040028 */
+	uint64_t	ce_dre_credit_limit1;			/* 0x040030 */
+	uint64_t	ce_dre_credit_limit2;			/* 0x040038 */
+	uint64_t	ce_dre_force_credit1;			/* 0x040040 */
+	uint64_t	ce_dre_force_credit2;			/* 0x040048 */
+	uint64_t	ce_dre_debug_mux1;			/* 0x040050 */
+	uint64_t	ce_dre_debug_mux2;			/* 0x040058 */
+	uint64_t	ce_dre_ssp_err_cmd_wrd;			/* 0x040060 */
+	uint64_t	ce_dre_ssp_err_addr;			/* 0x040068 */
+	uint64_t	ce_dre_comp_err_cmd_wrd;		/* 0x040070 */
+	uint64_t	ce_dre_comp_err_addr;			/* 0x040078 */
+	uint64_t	ce_dre_req_status;			/* 0x040080 */
+	uint64_t	ce_dre_config1;				/* 0x040088 */
+	uint64_t	ce_dre_config2;				/* 0x040090 */
+	uint64_t	ce_dre_config_req_status;		/* 0x040098 */
+	uint64_t	ce_pad_0400A0[12];	    /* 0x0400A0 -- 0x0400F8 */
+	uint64_t	ce_dre_dyn_fifo;			/* 0x040100 */
+	uint64_t	ce_pad_040108[3];	    /* 0x040108 -- 0x040118 */
+	uint64_t	ce_dre_last_fifo;			/* 0x040120 */
+
+	uint64_t	ce_pad_040128[27];	    /* 0x040128 -- 0x0401F8 */
+
+	/* DRE Downstream Head Queue */
+	struct	ce_dre_maint_ds_head_queue {
+		uint64_t	data63_0[32];	    /* 0x040200 -- 0x0402F8 */
+		uint64_t	data127_64[32];	    /* 0x040300 -- 0x0403F8 */
+		uint64_t	parity[32];	    /* 0x040400 -- 0x0404F8 */
+	} ce_dre_maint_ds_head_q;
+
+	uint64_t	ce_pad_040500[352];	    /* 0x040500 -- 0x040FF8 */
+
+	/* DRE Downstream Data Queue */
+	struct	ce_dre_maint_ds_data_queue {
+		uint64_t	data63_0[256];	    /* 0x041000 -- 0x0417F8 */
+		uint64_t	ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */
+		uint64_t	data127_64[256];    /* 0x042000 -- 0x0427F8 */
+		uint64_t	ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */
+		uint64_t	parity[256];	    /* 0x043000 -- 0x0437F8 */
+		uint64_t	ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */
+	} ce_dre_maint_ds_data_q;
+
+	/* DRE URE Upstream Response Queue */
+	struct	ce_dre_maint_ure_us_rsp_queue {
+		uint64_t	data63_0[8];	    /* 0x044000 -- 0x044038 */
+		uint64_t	ce_pad_044040[24];  /* 0x044040 -- 0x0440F8 */
+		uint64_t	data127_64[8];      /* 0x044100 -- 0x044138 */
+		uint64_t	ce_pad_044140[24];  /* 0x044140 -- 0x0441F8 */
+		uint64_t	parity[8];	    /* 0x044200 -- 0x044238 */
+		uint64_t	ce_pad_044240[24];  /* 0x044240 -- 0x0442F8 */
+	} ce_dre_maint_ure_us_rsp_q;
+
+	uint64_t 	ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */
+
+	uint64_t	ce_end_of_struct;			/* 0x044400 */
+} tioce_t;
+
+
+/* ce_adm_int_mask/ce_adm_int_status register bit defines */
+#define CE_ADM_INT_CE_ERROR_SHFT		0
+#define CE_ADM_INT_LSI1_IP_ERROR_SHFT		1
+#define CE_ADM_INT_LSI2_IP_ERROR_SHFT		2
+#define CE_ADM_INT_PCIE_ERROR_SHFT		3
+#define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT	4
+#define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT	5
+#define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT	6
+#define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT	7
+#define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT	8
+#define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT	9
+#define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT	10
+#define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT	11
+#define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT	12
+#define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT	13
+#define CE_ADM_INT_PCIE_MSG_SHFT		14 /*see int_dest_14*/
+#define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT		14
+#define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT		15
+#define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT		16
+#define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT		17
+#define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT	22
+#define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT	23
+
+/* ce_adm_force_int register bit defines */
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT	0
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT	1
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT	2
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT	3
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT	4
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT	5
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT	6
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT	7
+#define CE_ADM_FORCE_INT_ALWAYS_SHFT		8
+
+/* ce_adm_int_dest register bit masks & shifts */
+#define INTR_VECTOR_SHFT			56
+
+/* ce_adm_error_mask and ce_adm_error_summary register bit masks */
+#define CE_ADM_ERR_CRM_SSP_REQ_INVALID			(0x1ULL <<  0)
+#define CE_ADM_ERR_SSP_REQ_HEADER			(0x1ULL <<  1)
+#define CE_ADM_ERR_SSP_RSP_HEADER			(0x1ULL <<  2)
+#define CE_ADM_ERR_SSP_PROTOCOL_ERROR			(0x1ULL <<  3)
+#define CE_ADM_ERR_SSP_SBE				(0x1ULL <<  4)
+#define CE_ADM_ERR_SSP_MBE				(0x1ULL <<  5)
+#define CE_ADM_ERR_CXM_CREDIT_OFLOW			(0x1ULL <<  6)
+#define CE_ADM_ERR_DRE_SSP_REQ_INVAL			(0x1ULL <<  7)
+#define CE_ADM_ERR_SSP_REQ_LONG				(0x1ULL <<  8)
+#define CE_ADM_ERR_SSP_REQ_OFLOW			(0x1ULL <<  9)
+#define CE_ADM_ERR_SSP_REQ_SHORT			(0x1ULL << 10)
+#define CE_ADM_ERR_SSP_REQ_SIDEBAND			(0x1ULL << 11)
+#define CE_ADM_ERR_SSP_REQ_ADDR_ERR			(0x1ULL << 12)
+#define CE_ADM_ERR_SSP_REQ_BAD_BE			(0x1ULL << 13)
+#define CE_ADM_ERR_PCIE_COMPL_TIMEOUT			(0x1ULL << 14)
+#define CE_ADM_ERR_PCIE_UNEXP_COMPL			(0x1ULL << 15)
+#define CE_ADM_ERR_PCIE_ERR_COMPL			(0x1ULL << 16)
+#define CE_ADM_ERR_DRE_CREDIT_OFLOW			(0x1ULL << 17)
+#define CE_ADM_ERR_DRE_SRAM_PE				(0x1ULL << 18)
+#define CE_ADM_ERR_SSP_RSP_INVALID			(0x1ULL << 19)
+#define CE_ADM_ERR_SSP_RSP_LONG				(0x1ULL << 20)
+#define CE_ADM_ERR_SSP_RSP_SHORT			(0x1ULL << 21)
+#define CE_ADM_ERR_SSP_RSP_SIDEBAND			(0x1ULL << 22)
+#define CE_ADM_ERR_URE_SSP_RSP_UNEXP			(0x1ULL << 23)
+#define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT		(0x1ULL << 24)
+#define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT		(0x1ULL << 25)
+#define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT		(0x1ULL << 26)
+#define CE_ADM_ERR_URE_ATE40_PAGE_FAULT			(0x1ULL << 27)
+#define CE_ADM_ERR_URE_CREDIT_OFLOW			(0x1ULL << 28)
+#define CE_ADM_ERR_URE_SRAM_PE				(0x1ULL << 29)
+#define CE_ADM_ERR_ADM_SSP_RSP_UNEXP			(0x1ULL << 30)
+#define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT			(0x1ULL << 31)
+#define CE_ADM_ERR_MMR_ACCESS_ERROR			(0x1ULL << 32)
+#define CE_ADM_ERR_MMR_ADDR_ERROR			(0x1ULL << 33)
+#define CE_ADM_ERR_ADM_CREDIT_OFLOW			(0x1ULL << 34)
+#define CE_ADM_ERR_ADM_SRAM_PE				(0x1ULL << 35)
+#define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR		(0x1ULL << 36)
+#define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR		(0x1ULL << 37)
+#define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR	(0x1ULL << 38)
+#define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR	(0x1ULL << 39)
+#define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR		(0x1ULL << 40)
+#define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR		(0x1ULL << 41)
+#define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR		(0x1ULL << 42)
+#define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR		(0x1ULL << 43)
+#define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR		(0x1ULL << 44)
+#define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR		(0x1ULL << 45)
+#define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR		(0x1ULL << 46)
+#define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR		(0x1ULL << 47)
+#define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR	(0x1ULL << 48)
+#define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR	(0x1ULL << 49)
+#define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR		(0x1ULL << 50)
+#define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR		(0x1ULL << 51)
+#define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR		(0x1ULL << 52)
+#define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR		(0x1ULL << 53)
+#define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR		(0x1ULL << 54)
+#define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR		(0x1ULL << 55)
+#define CE_ADM_ERR_PORT1_PCIE_COR_ERR			(0x1ULL << 56)
+#define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR			(0x1ULL << 57)
+#define CE_ADM_ERR_PORT1_PCIE_FAT_ERR			(0x1ULL << 58)
+#define CE_ADM_ERR_PORT2_PCIE_COR_ERR			(0x1ULL << 59)
+#define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR			(0x1ULL << 60)
+#define CE_ADM_ERR_PORT2_PCIE_FAT_ERR			(0x1ULL << 61)
+
+/* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */
+#define FLUSH_SEL_PORT1_PIPE0_SHFT	0
+#define FLUSH_SEL_PORT1_PIPE1_SHFT	4
+#define FLUSH_SEL_PORT1_PIPE2_SHFT	8
+#define FLUSH_SEL_PORT1_PIPE3_SHFT	12
+#define FLUSH_SEL_PORT2_PIPE0_SHFT	16
+#define FLUSH_SEL_PORT2_PIPE1_SHFT	20
+#define FLUSH_SEL_PORT2_PIPE2_SHFT	24
+#define FLUSH_SEL_PORT2_PIPE3_SHFT	28
+
+/* ce_dre_config1 register bit masks and shifts */
+#define CE_DRE_RO_ENABLE		(0x1ULL << 0)
+#define CE_DRE_DYN_RO_ENABLE		(0x1ULL << 1)
+#define CE_DRE_SUP_CONFIG_COMP_ERROR	(0x1ULL << 2)
+#define CE_DRE_SUP_IO_COMP_ERROR	(0x1ULL << 3)
+#define CE_DRE_ADDR_MODE_SHFT		4
+
+/* ce_dre_config_req_status register bit masks */
+#define CE_DRE_LAST_CONFIG_COMPLETION	(0x7ULL << 0)
+#define CE_DRE_DOWNSTREAM_CONFIG_ERROR	(0x1ULL << 3)
+#define CE_DRE_CONFIG_COMPLETION_VALID	(0x1ULL << 4)
+#define CE_DRE_CONFIG_REQUEST_ACTIVE	(0x1ULL << 5)
+
+/* ce_ure_control register bit masks & shifts */
+#define CE_URE_RD_MRG_ENABLE		(0x1ULL << 0)
+#define CE_URE_WRT_MRG_ENABLE1		(0x1ULL << 4)
+#define CE_URE_WRT_MRG_ENABLE2		(0x1ULL << 5)
+#define CE_URE_RSPQ_BYPASS_DISABLE	(0x1ULL << 24)
+#define CE_URE_UPS_DAT1_PAR_DISABLE	(0x1ULL << 32)
+#define CE_URE_UPS_HDR1_PAR_DISABLE	(0x1ULL << 33)
+#define CE_URE_UPS_DAT2_PAR_DISABLE	(0x1ULL << 34)
+#define CE_URE_UPS_HDR2_PAR_DISABLE	(0x1ULL << 35)
+#define CE_URE_ATE_PAR_DISABLE		(0x1ULL << 36)
+#define CE_URE_RCI_PAR_DISABLE		(0x1ULL << 37)
+#define CE_URE_RSPQ_PAR_DISABLE		(0x1ULL << 38)
+#define CE_URE_DNS_DAT_PAR_DISABLE	(0x1ULL << 39)
+#define CE_URE_DNS_HDR_PAR_DISABLE	(0x1ULL << 40)
+#define CE_URE_MALFORM_DISABLE		(0x1ULL << 44)
+#define CE_URE_UNSUP_DISABLE		(0x1ULL << 45)
+
+/* ce_ure_page_map register bit masks & shifts */
+#define CE_URE_ATE3240_ENABLE		(0x1ULL << 0)
+#define CE_URE_ATE40_ENABLE 		(0x1ULL << 1)
+#define CE_URE_PAGESIZE_SHFT		4
+#define CE_URE_PAGESIZE_MASK		(0x7ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_4K_PAGESIZE		(0x0ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_16K_PAGESIZE		(0x1ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_64K_PAGESIZE		(0x2ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_128K_PAGESIZE		(0x3ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_256K_PAGESIZE		(0x4ULL << CE_URE_PAGESIZE_SHFT)
+
+/* ce_ure_pipe_sel register bit masks & shifts */
+#define PKT_TRAFIC_SHRT			16
+#define BUS_SRC_ID_SHFT			8
+#define DEV_SRC_ID_SHFT			3
+#define FNC_SRC_ID_SHFT			0
+#define CE_URE_TC_MASK			(0x07ULL << PKT_TRAFIC_SHRT)
+#define CE_URE_BUS_MASK			(0xFFULL << BUS_SRC_ID_SHFT)
+#define CE_URE_DEV_MASK			(0x1FULL << DEV_SRC_ID_SHFT)
+#define CE_URE_FNC_MASK			(0x07ULL << FNC_SRC_ID_SHFT)
+#define CE_URE_PIPE_BUS(b)		(((uint64_t)(b) << BUS_SRC_ID_SHFT) & \
+					 CE_URE_BUS_MASK)
+#define CE_URE_PIPE_DEV(d)		(((uint64_t)(d) << DEV_SRC_ID_SHFT) & \
+					 CE_URE_DEV_MASK)
+#define CE_URE_PIPE_FNC(f)		(((uint64_t)(f) << FNC_SRC_ID_SHFT) & \
+					 CE_URE_FNC_MASK)
+
+#define CE_URE_SEL1_SHFT		0
+#define CE_URE_SEL2_SHFT		20
+#define CE_URE_SEL3_SHFT		40
+#define CE_URE_SEL1_MASK		(0x7FFFFULL << CE_URE_SEL1_SHFT)
+#define CE_URE_SEL2_MASK		(0x7FFFFULL << CE_URE_SEL2_SHFT)
+#define CE_URE_SEL3_MASK		(0x7FFFFULL << CE_URE_SEL3_SHFT)
+
+
+/* ce_ure_pipe_mask register bit masks & shifts */
+#define CE_URE_MASK1_SHFT		0
+#define CE_URE_MASK2_SHFT		20
+#define CE_URE_MASK3_SHFT		40
+#define CE_URE_MASK1_MASK		(0x7FFFFULL << CE_URE_MASK1_SHFT)
+#define CE_URE_MASK2_MASK		(0x7FFFFULL << CE_URE_MASK2_SHFT)
+#define CE_URE_MASK3_MASK		(0x7FFFFULL << CE_URE_MASK3_SHFT)
+
+
+/* ce_ure_pcie_control1 register bit masks & shifts */
+#define CE_URE_SI			(0x1ULL << 0)
+#define CE_URE_ELAL_SHFT		4
+#define CE_URE_ELAL_MASK		(0x7ULL << CE_URE_ELAL_SHFT)
+#define CE_URE_ELAL1_SHFT		8
+#define CE_URE_ELAL1_MASK		(0x7ULL << CE_URE_ELAL1_SHFT)
+#define CE_URE_SCC			(0x1ULL << 12)
+#define CE_URE_PN1_SHFT			16
+#define CE_URE_PN1_MASK			(0xFFULL << CE_URE_PN1_SHFT)
+#define CE_URE_PN2_SHFT			24
+#define CE_URE_PN2_MASK			(0xFFULL << CE_URE_PN2_SHFT)
+#define CE_URE_PN1_SET(n)		(((uint64_t)(n) << CE_URE_PN1_SHFT) & \
+					 CE_URE_PN1_MASK)
+#define CE_URE_PN2_SET(n)		(((uint64_t)(n) << CE_URE_PN2_SHFT) & \
+					 CE_URE_PN2_MASK)
+
+/* ce_ure_pcie_control2 register bit masks & shifts */
+#define CE_URE_ABP			(0x1ULL << 0)
+#define CE_URE_PCP			(0x1ULL << 1)
+#define CE_URE_MSP			(0x1ULL << 2)
+#define CE_URE_AIP			(0x1ULL << 3)
+#define CE_URE_PIP			(0x1ULL << 4)
+#define CE_URE_HPS			(0x1ULL << 5)
+#define CE_URE_HPC			(0x1ULL << 6)
+#define CE_URE_SPLV_SHFT		7
+#define CE_URE_SPLV_MASK		(0xFFULL << CE_URE_SPLV_SHFT)
+#define CE_URE_SPLS_SHFT		15
+#define CE_URE_SPLS_MASK		(0x3ULL << CE_URE_SPLS_SHFT)
+#define CE_URE_PSN1_SHFT		19
+#define CE_URE_PSN1_MASK		(0x1FFFULL << CE_URE_PSN1_SHFT)
+#define CE_URE_PSN2_SHFT		32
+#define CE_URE_PSN2_MASK		(0x1FFFULL << CE_URE_PSN2_SHFT)
+#define CE_URE_PSN1_SET(n)		(((uint64_t)(n) << CE_URE_PSN1_SHFT) & \
+					 CE_URE_PSN1_MASK)
+#define CE_URE_PSN2_SET(n)		(((uint64_t)(n) << CE_URE_PSN2_SHFT) & \
+					 CE_URE_PSN2_MASK)
+
+/*
+ * PIO address space ranges for CE
+ */
+
+/* Local CE Registers Space */
+#define CE_PIO_MMR			0x00000000
+#define CE_PIO_MMR_LEN			0x04000000
+
+/* PCI Compatible Config Space */
+#define CE_PIO_CONFIG_SPACE		0x04000000
+#define CE_PIO_CONFIG_SPACE_LEN		0x04000000
+
+/* PCI I/O Space Alias */
+#define CE_PIO_IO_SPACE_ALIAS		0x08000000
+#define CE_PIO_IO_SPACE_ALIAS_LEN	0x08000000
+
+/* PCI Enhanced Config Space */
+#define CE_PIO_E_CONFIG_SPACE		0x10000000
+#define CE_PIO_E_CONFIG_SPACE_LEN	0x10000000
+
+/* PCI I/O Space */
+#define CE_PIO_IO_SPACE			0x100000000
+#define CE_PIO_IO_SPACE_LEN		0x100000000
+
+/* PCI MEM Space */
+#define CE_PIO_MEM_SPACE		0x200000000
+#define CE_PIO_MEM_SPACE_LEN		TIO_HWIN_SIZE
+
+
+/*
+ * CE PCI Enhanced Config Space shifts & masks
+ */
+#define CE_E_CONFIG_BUS_SHFT		20
+#define CE_E_CONFIG_BUS_MASK		(0xFF << CE_E_CONFIG_BUS_SHFT)
+#define CE_E_CONFIG_DEVICE_SHFT		15
+#define CE_E_CONFIG_DEVICE_MASK		(0x1F << CE_E_CONFIG_DEVICE_SHFT)
+#define CE_E_CONFIG_FUNC_SHFT		12
+#define CE_E_CONFIG_FUNC_MASK		(0x7  << CE_E_CONFIG_FUNC_SHFT)
+
+#endif /* __ASM_IA64_SN_TIOCE_H__ */
diff -puN /dev/null include/asm-ia64/sn/tioce_provider.h
--- /dev/null	2004-08-10 19:55:00.000000000 -0600
+++ 25-akpm/include/asm-ia64/sn/tioce_provider.h	2005-09-01 05:34:24.000000000 -0600
@@ -0,0 +1,66 @@
+/**************************************************************************
+ *             Copyright (C) 2005, Silicon Graphics, Inc.                 *
+ *									  *
+ *  These coded instructions, statements, and computer programs	 contain  *
+ *  unpublished	 proprietary  information of Silicon Graphics, Inc., and  *
+ *  are protected by Federal copyright law.  They  may	not be disclosed  *
+ *  to	third  parties	or copied or duplicated in any form, in whole or  *
+ *  in part, without the prior written consent of Silicon Graphics, Inc.  *
+ *									  *
+ **************************************************************************/
+
+#ifndef _ASM_IA64_SN_CE_PROVIDER_H
+#define _ASM_IA64_SN_CE_PROVIDER_H
+
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioce.h>
+
+/*
+ * Common TIOCE structure shared between the prom and kernel
+ *
+ * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE
+ * PROM VERSION.
+ */
+struct tioce_common {
+	struct pcibus_bussoft	ce_pcibus;	/* common pciio header */
+
+	uint32_t		ce_rev;
+	uint64_t		ce_kernel_private;
+	uint64_t		ce_prom_private;
+};
+
+struct tioce_kernel {
+	struct tioce_common	*ce_common;
+	spinlock_t		ce_lock;
+	struct list_head	ce_dmamap_list;
+
+	uint64_t		ce_ate40_shadow[TIOCE_NUM_M40_ATES];
+	uint64_t		ce_ate3240_shadow[TIOCE_NUM_M3240_ATES];
+	uint32_t		ce_ate3240_pagesize;
+
+	uint8_t			ce_port1_secondary;
+
+	/* per-port resources */
+	struct {
+		int 		dirmap_refcnt;
+		uint64_t	dirmap_shadow;
+	} ce_port[TIOCE_NUM_PORTS];
+};
+
+struct tioce_dmamap {
+	struct list_head	ce_dmamap_list;	/* headed by tioce_kernel */
+	uint32_t		refcnt;
+
+	uint64_t		nbytes;		/* # bytes mapped */
+
+	uint64_t		ct_start;	/* coretalk start address */
+	uint64_t		pci_start;	/* bus start address */
+
+	uint64_t		*ate_hw;	/* hw ptr of first ate in map */
+	uint64_t		*ate_shadow;	/* shadow ptr of firat ate */
+	uint16_t		ate_count;	/* # ate's in the map */
+};
+
+extern int tioce_init_provider(void);
+
+#endif  /* __ASM_IA64_SN_CE_PROVIDER_H */
diff -puN include/asm-ia64/spinlock.h~git-ia64 include/asm-ia64/spinlock.h
--- 25/include/asm-ia64/spinlock.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/spinlock.h	2005-09-01 05:34:24.000000000 -0600
@@ -93,7 +93,15 @@ _raw_spin_lock_flags (spinlock_t *lock, 
 # endif /* CONFIG_MCKINLEY */
 #endif
 }
+
 #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
+
+/* Unlock by doing an ordered store and releasing the cacheline with nta */
+static inline void _raw_spin_unlock(spinlock_t *x) {
+	barrier();
+	asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
+}
+
 #else /* !ASM_SUPPORTED */
 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
 # define _raw_spin_lock(x)								\
@@ -109,16 +117,16 @@ do {											\
 		} while (ia64_spinlock_val);						\
 	}										\
 } while (0)
+#define _raw_spin_unlock(x)	do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
 #endif /* !ASM_SUPPORTED */
 
 #define spin_is_locked(x)	((x)->lock != 0)
-#define _raw_spin_unlock(x)	do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
 #define _raw_spin_trylock(x)	(cmpxchg_acq(&(x)->lock, 0, 1) == 0)
 #define spin_unlock_wait(x)	do { barrier(); } while ((x)->lock)
 
 typedef struct {
-	volatile unsigned int read_counter	: 31;
-	volatile unsigned int write_lock	:  1;
+	volatile unsigned int read_counter	: 24;
+	volatile unsigned int write_lock	:  8;
 #ifdef CONFIG_PREEMPT
 	unsigned int break_lock;
 #endif
@@ -174,6 +182,13 @@ do {										\
 	(result == 0);								\
 })
 
+static inline void _raw_write_unlock(rwlock_t *x)
+{
+	u8 *y = (u8 *)x;
+	barrier();
+	asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
+}
+
 #else /* !ASM_SUPPORTED */
 
 #define _raw_write_lock(l)								\
@@ -195,14 +210,14 @@ do {										\
 	(ia64_val == 0);						\
 })
 
+static inline void _raw_write_unlock(rwlock_t *x)
+{
+	barrier();
+	x->write_lock = 0;
+}
+
 #endif /* !ASM_SUPPORTED */
 
 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
 
-#define _raw_write_unlock(x)								\
-({											\
-	smp_mb__before_clear_bit();	/* need barrier before releasing lock... */	\
-	clear_bit(31, (x));								\
-})
-
 #endif /*  _ASM_IA64_SPINLOCK_H */
diff -puN include/asm-ia64/system.h~git-ia64 include/asm-ia64/system.h
--- 25/include/asm-ia64/system.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/system.h	2005-09-01 05:34:24.000000000 -0600
@@ -19,12 +19,13 @@
 #include <asm/pal.h>
 #include <asm/percpu.h>
 
-#define GATE_ADDR		__IA64_UL_CONST(0xa000000000000000)
+#define GATE_ADDR		RGN_BASE(RGN_GATE)
+
 /*
  * 0xa000000000000000+2*PERCPU_PAGE_SIZE
  * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
  */
-#define KERNEL_START		 __IA64_UL_CONST(0xa000000100000000)
+#define KERNEL_START		 (GATE_ADDR+0x100000000)
 #define PERCPU_ADDR		(-PERCPU_PAGE_SIZE)
 
 #ifndef __ASSEMBLY__
diff -puN include/asm-ia64/thread_info.h~git-ia64 include/asm-ia64/thread_info.h
--- 25/include/asm-ia64/thread_info.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/thread_info.h	2005-09-01 05:34:24.000000000 -0600
@@ -76,6 +76,7 @@ struct thread_info {
 #define TIF_SIGDELAYED		5	/* signal delayed from MCA/INIT/NMI/PMI context */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17
+#define TIF_MCA_INIT		18	/* this task is processing MCA or INIT */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
@@ -85,6 +86,7 @@ struct thread_info {
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_SIGDELAYED	(1 << TIF_SIGDELAYED)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
+#define _TIF_MCA_INIT		(1 << TIF_MCA_INIT)
 
 /* "work to do on user-return" bits */
 #define TIF_ALLWORK_MASK	(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
diff -puN include/asm-ia64/unwind.h~git-ia64 include/asm-ia64/unwind.h
--- 25/include/asm-ia64/unwind.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/asm-ia64/unwind.h	2005-09-01 05:34:24.000000000 -0600
@@ -114,13 +114,6 @@ extern void unw_remove_unwind_table (voi
  */
 extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
 
-/*
- * Prepare to unwind from interruption.  The pt-regs and switch-stack structures must have
- * be "adjacent" (no state modifications between pt-regs and switch-stack).
- */
-extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
-					struct pt_regs *pt, struct switch_stack *sw);
-
 extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
 				 struct switch_stack *sw);
 
diff -puN include/linux/sched.h~git-ia64 include/linux/sched.h
--- 25/include/linux/sched.h~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/include/linux/sched.h	2005-09-01 05:34:24.000000000 -0600
@@ -883,6 +883,8 @@ extern int task_curr(const task_t *p);
 extern int idle_cpu(int cpu);
 extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
 extern task_t *idle_task(int cpu);
+extern task_t *curr_task(int cpu);
+extern void set_curr_task(int cpu, task_t *p);
 
 void yield(void);
 
diff -puN kernel/sched.c~git-ia64 kernel/sched.c
--- 25/kernel/sched.c~git-ia64	2005-09-01 05:34:24.000000000 -0600
+++ 25-akpm/kernel/sched.c	2005-09-01 05:34:24.000000000 -0600
@@ -3471,6 +3471,34 @@ task_t *idle_task(int cpu)
 }
 
 /**
+ * curr_task - return the current task for a given cpu.
+ * @cpu: the processor in question.
+ */
+task_t *curr_task(int cpu)
+{
+	return cpu_curr(cpu);
+}
+EXPORT_SYMBOL_GPL(curr_task);
+
+/**
+ * set_curr_task - set the current task for a given cpu.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+ * are serviced on a separate stack.  It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner.  This function
+ * must be called with interrupts disabled, the caller must save the original
+ * value of the current task (see curr_task() above) and restore that value
+ * before reenabling interrupts.
+ */
+void set_curr_task(int cpu, task_t *p)
+{
+	cpu_curr(cpu) = p;
+}
+EXPORT_SYMBOL_GPL(set_curr_task);
+
+/**
  * find_process_by_pid - find a process with a matching PID value.
  * @pid: the pid in question.
  */
_