http://linux.bkbits.net/linux-2.5
torvalds@ppc970.osdl.org|ChangeSet|20040915004007|09309 torvalds
# This is a BitKeeper generated diff -Nru style patch.
#
# drivers/pci/hotplug/rpaphp_pci.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix real bugs uncovered by -Wno-uninitialized removal
# 
# ChangeSet
#   2004/09/14 17:40:07-07:00 torvalds@ppc970.osdl.org 
#   Fix up typo in ppc64 eeh ioport_map() code.
#   
#   Noticed by BenH, happily harmless (nothing that uses that
#   code has been committed yet, and PIO seems to be pretty much
#   unused on at least the Apple G5 machines: all the normal
#   hardware is set up purely for MMIO, to the point that I
#   couldn't even test this thing).
# 
# arch/ppc64/kernel/eeh.c
#   2004/09/14 17:40:01-07:00 torvalds@ppc970.osdl.org +1 -1
#   Fix up typo in ppc64 eeh ioport_map() code.
#   
#   Noticed by BenH, happily harmless (nothing that uses that
#   code has been committed yet, and PIO seems to be pretty much
#   unused on at least the Apple G5 machines: all the normal
#   hardware is set up purely for MMIO, to the point that I
#   couldn't even test this thing).
# 
# ChangeSet
#   2004/09/14 16:28:53-07:00 torvalds@ppc970.osdl.org 
#   Add support for "string" ioread/iowrite.
#   
#   Things like SATA use this for data transfer.
#   
#   Also export the iomap routines on ppc64.
# 
# lib/iomap.c
#   2004/09/14 16:28:47-07:00 torvalds@ppc970.osdl.org +85 -0
#   Add support for "string" ioread/iowrite.
#   
#   Things like SATA use this for data transfer.
# 
# include/asm-generic/iomap.h
#   2004/09/14 16:28:47-07:00 torvalds@ppc970.osdl.org +19 -0
#   Add support for "string" ioread/iowrite.
#   
#   Things like SATA use this for data transfer.
# 
# arch/ppc64/kernel/eeh.c
#   2004/09/14 16:28:47-07:00 torvalds@ppc970.osdl.org +54 -0
#   Add support for "string" ioread/iowrite.
#   
#   Things like SATA use this for data transfer.
# 
# ChangeSet
#   2004/09/14 10:38:50-07:00 torvalds@ppc970.osdl.org 
#   ppc64: first cut at new iomap interfaces.
#   
#   Only the EEH case (pSeries) handled for now. 
# 
# arch/ppc64/kernel/eeh.c
#   2004/09/14 10:38:43-07:00 torvalds@ppc970.osdl.org +68 -0
#   ppc64: first cut at new iomap interfaces.
#   
#   Only the EEH case (pSeries) handled for now. 
# 
# arch/ppc64/Kconfig
#   2004/09/14 10:38:43-07:00 torvalds@ppc970.osdl.org +0 -4
#   ppc64: first cut at new iomap interfaces.
#   
#   Only the EEH case (pSeries) handled for now. 
# 
# ChangeSet
#   2004/09/14 09:36:53-07:00 torvalds@ppc970.osdl.org 
#   Update shipped version of zconfig.tab.c to match bison/yacc file.
# 
# scripts/kconfig/zconf.tab.c_shipped
#   2004/09/14 09:36:46-07:00 torvalds@ppc970.osdl.org +2 -0
#   Update shipped version of zconfig.tab.c to match bison/yacc file.
# 
# ChangeSet
#   2004/09/14 09:33:17-07:00 zippel@linux-m68k.org 
#   [PATCH] properly fix double current_menu
#   
#   The two current_menu variables are really two separate variables, so keep
#   them separate.
#   
#   Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# scripts/kconfig/zconf.y
#   2004/09/14 06:46:41-07:00 zippel@linux-m68k.org +2 -0
#   properly fix double current_menu
# 
# scripts/kconfig/menu.c
#   2004/09/14 06:46:42-07:00 zippel@linux-m68k.org +0 -1
#   properly fix double current_menu
# 
# scripts/kconfig/mconf.c
#   2004/09/14 06:51:44-07:00 zippel@linux-m68k.org +1 -1
#   properly fix double current_menu
# 
# scripts/kconfig/lkc.h
#   2004/09/14 06:44:58-07:00 zippel@linux-m68k.org +0 -3
#   properly fix double current_menu
# 
# ChangeSet
#   2004/09/14 09:32:22-07:00 levon@movementarian.org 
#   [PATCH] fix OProfile locking
#   
#   This makes OProgile use get_task_mm() as discussed.  It also fixes up
#   Anton's previous patch.  Zwane's soaked this patch all night w/o
#   problems.
# 
# kernel/fork.c
#   2004/09/13 13:39:03-07:00 levon@movementarian.org +2 -0
#   fix OProfile locking
# 
# drivers/oprofile/cpu_buffer.h
#   2004/09/13 13:45:26-07:00 levon@movementarian.org +2 -2
#   fix OProfile locking
# 
# drivers/oprofile/cpu_buffer.c
#   2004/09/13 13:45:29-07:00 levon@movementarian.org +6 -6
#   fix OProfile locking
# 
# drivers/oprofile/buffer_sync.c
#   2004/09/13 13:50:51-07:00 levon@movementarian.org +11 -26
#   fix OProfile locking
# 
# ChangeSet
#   2004/09/14 09:17:49-07:00 torvalds@ppc970.osdl.org 
#   Fix up stupid last-minute edit of fork cleanup.
#   
#   I'm a retard.
# 
# kernel/fork.c
#   2004/09/14 09:17:43-07:00 torvalds@ppc970.osdl.org +1 -1
#   Fix up stupid last-minute edit of fork cleanup.
#   
#   I'm a retard.
# 
# ChangeSet
#   2004/09/14 09:15:35-07:00 torvalds@ppc970.osdl.org 
#   fivafb; Increase DDC/CI timeouts
#   
#   Nicolas Boichat reports that his monitor needs the slower
#   i2c bus timings. These values also match atyfb and the
#   original ones.
# 
# drivers/video/riva/rivafb-i2c.c
#   2004/09/14 09:15:29-07:00 torvalds@ppc970.osdl.org +2 -2
#   fivafb; Increase DDC/CI timeouts
#   
#   Nicolas Boichat reports that his monitor needs the slower
#   i2c bus timings. These values also match atyfb and the
#   original ones.
# 
# ChangeSet
#   2004/09/14 09:04:35-07:00 torvalds@ppc970.osdl.org 
#   Fix fork failure case.
#   
#   It would clear the segment registers in the parent, which
#   wasn't really intentional. Noticed by Andries Brouwer.
#   
# 
# kernel/fork.c
#   2004/09/14 09:04:28-07:00 torvalds@ppc970.osdl.org +1 -3
#   Fix fork failure case.
#   
#   It would clear the segment registers in the parent, which
#   wasn't really intentional. Noticed by Andries Brouwer.
# 
# ChangeSet
#   2004/09/14 08:54:40-07:00 mingo@elte.hu 
#   [PATCH] i386: elf_read_implies_exec() fixup
#   
#   The final ia64 related cleanup to elf_read_implies_exec() seems to have
#   broken it.  The effect is that the READ_IMPLIES_EXEC flag is never set
#   for !pt_gnu_stack binaries!
#   
#   That's a bit more secure than we need to be, and might break some legacy
#   app that doesn't expect it.
# 
# include/asm-i386/elf.h
#   2004/09/13 17:00:00-07:00 mingo@elte.hu +1 -1
#   i386: elf_read_implies_exec() fixup
# 
# ChangeSet
#   2004/09/14 08:38:02-07:00 ak@suse.de 
#   [PATCH] Fix ABI in set_mempolicy()
#   
#   Fix ABI in set_mempolicy() that got broken by an earlier change.
#   
#   Add a check for very big input values and prevent excessive looping in the
#   kernel.
#   
#   Cc: Paul "nyer, nyer, your mother wears combat boots!" Jackson <pj@sgi.com>
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/mempolicy.c
#   2004/09/13 18:56:13-07:00 ak@suse.de +3 -0
#   Fix ABI in set_mempolicy()
# 
# ChangeSet
#   2004/09/14 07:52:00-07:00 mpm@selenic.com 
#   [PATCH] netpoll endian fixes
#   
#   Correct wrong ip header in netpoll_send_udp.
#   
#   Signed-off-by: Duncan Sands <baldrick@free.fr>
#   Signed-off-by: Matt Mackall <mpm@selenic.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# net/core/netpoll.c
#   2004/09/13 17:23:26-07:00 mpm@selenic.com +2 -2
#   netpoll endian fixes
# 
# ChangeSet
#   2004/09/14 07:51:48-07:00 hch@lst.de 
#   [PATCH] mark amiflop non-unloadable
#   
#   As it's using the obsolete MOD_{INC,DEC}_USE_COUNT it's implicitly locked
#   already, but let's remove them and make it explicit so these macros can go
#   away completely without breaking m68k compile.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/block/amiflop.c
#   2004/09/13 17:23:25-07:00 hch@lst.de +2 -14
#   mark amiflop non-unloadable
# 
# ChangeSet
#   2004/09/14 07:51:36-07:00 hch@lst.de 
#   [PATCH] <asm/softirq.h> crept back in h8300 and sh64
#   
#   <asm/softirq.h> went away in 2.5, but new ports keep adding instances again
#   and again.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# BitKeeper/deleted/.del-softirq.h~5870f0a8abafdf84
#   2004/09/14 07:51:30-07:00 hch@lst.de +0 -0
#   Delete: include/asm-h8300/softirq.h
# 
# BitKeeper/deleted/.del-softirq.h~2d71a252808115bb
#   2004/09/14 07:51:30-07:00 hch@lst.de +0 -0
#   Delete: include/asm-sh64/softirq.h
# 
# ChangeSet
#   2004/09/14 07:51:24-07:00 roland@frob.com 
#   [PATCH] BSD disklabel: handle more than 8 partitions
#   
#   NetBSD allows 16 partitions, not just 8.  This patch both ups the number,
#   and makes the recognition code tell you if the count in the disklabel
#   exceeds the number supported by the kernel.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/linux/genhd.h
#   2004/09/13 17:23:25-07:00 roland@frob.com +1 -1
#   BSD disklabel: handle more than 8 partitions
# 
# fs/partitions/msdos.c
#   2004/09/13 17:23:25-07:00 roland@frob.com +3 -0
#   BSD disklabel: handle more than 8 partitions
# 
# ChangeSet
#   2004/09/14 07:51:13-07:00 hch@lst.de 
#   [PATCH] small <linux/hardirq.h> tweaks
#   
#   - I misspelled CONFIG_PREEMPT CONFIG_PREEPT as various people noticed.
#     But in fact that ifdef should just go, else we'll get drivers that
#     compile with CONFIG_PREEMPT but not without sooner or later.
#   
#   - remove unused hardirq_trylock and hardirq_endlock
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/linux/hardirq.h
#   2004/09/13 17:23:24-07:00 hch@lst.de +0 -5
#   small <linux/hardirq.h> tweaks
# 
# ChangeSet
#   2004/09/14 07:51:00-07:00 jbarnes@engr.sgi.com 
#   [PATCH] fix uninitialized warnings in mempolicy.c
#   
#   err may be used uninitialized in mempolicy.c in both compat_set_mempolicy
#   and compat_mbind.  This patch fixes that by setting them both to 0.
#   
#   Signed-off-by: Jesse Barnes <jbarnes@sgi.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/mempolicy.c
#   2004/09/13 17:23:24-07:00 jbarnes@engr.sgi.com +2 -2
#   fix uninitialized warnings in mempolicy.c
# 
# ChangeSet
#   2004/09/14 07:50:46-07:00 dsaxena@plexity.net 
#   [PATCH] Add support for word-length UART registers
#   
#   UARTS on several Intel IXP2000 systems are connected in such a way that
#   they can only be addressed using full word accesses instead of bytes.
#   Following patch adds a UPIO_MEM32 io-type to identify these UARTs.
#   
#   Signed-off-by: Deepak Saxena <dsaxena@plexity.net>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/linux/serial_core.h
#   2004/09/13 17:23:24-07:00 dsaxena@plexity.net +1 -0
#   Add support for word-length UART registers
# 
# drivers/serial/serial_core.c
#   2004/09/13 17:23:24-07:00 dsaxena@plexity.net +1 -0
#   Add support for word-length UART registers
# 
# drivers/serial/8250.c
#   2004/09/13 17:23:24-07:00 dsaxena@plexity.net +7 -0
#   Add support for word-length UART registers
# 
# ChangeSet
#   2004/09/14 07:50:33-07:00 axboe@suse.de 
#   [PATCH] block highmem flushes
#   
#   Add a couple of missing cache flushes to the bouncing code.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/highmem.c
#   2004/09/13 17:23:24-07:00 axboe@suse.de +2 -0
#   block highmem flushes
# 
# ChangeSet
#   2004/09/14 07:50:21-07:00 blaisorblade_spam@yahoo.it 
#   [PATCH] uml: remove CONFIG_UML_SMP
#   
#   Using CONFIG_UML_SMP and then making CONFIG_SMP = CONFIG_UML_SMP is useless
#   (there was a reason in 2.4, to have different help texts, but not now).
#   
#   Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/um/Kconfig
#   2004/09/13 17:23:23-07:00 blaisorblade_spam@yahoo.it +2 -5
#   uml: remove CONFIG_UML_SMP
# 
# ChangeSet
#   2004/09/14 07:50:09-07:00 blaisorblade_spam@yahoo.it 
#   [PATCH] uml: smp build fix
#   
#   From: Sonny Rao <sonny@burdell.org>
#   
#   Make the SMP code compile, at least, to make testing possible, and remove
#   its dependency on CONFIG_BROKEN.
#   
#   Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/asm-um/spinlock.h
#   2004/09/13 17:23:23-07:00 blaisorblade_spam@yahoo.it +6 -0
#   uml: smp build fix
# 
# include/asm-um/smp.h
#   2004/09/13 17:23:23-07:00 blaisorblade_spam@yahoo.it +2 -0
#   uml: smp build fix
# 
# arch/um/kernel/smp.c
#   2004/09/13 17:23:23-07:00 blaisorblade_spam@yahoo.it +7 -1
#   uml: smp build fix
# 
# include/asm-um/spinlock.h
#   2004/09/13 17:23:23-07:00 blaisorblade_spam@yahoo.it +0 -0
#   BitKeeper file /home/torvalds/v2.6/linux/include/asm-um/spinlock.h
# 
# ChangeSet
#   2004/09/14 07:49:54-07:00 blaisorblade_spam@yahoo.it 
#   [PATCH] uml: remove commented old code in Kconfig
#   
#   Drop a config option which has disappeared from all archs.  Btw, this
#   shouldn't be in the UML-specific part, but since we cannot include generic
#   Kconfigs to avoid problem with hardware-related configs, it's duplicated
#   for now.
#   
#   Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/um/Kconfig_char
#   2004/09/13 17:23:23-07:00 blaisorblade_spam@yahoo.it +0 -5
#   uml: remove commented old code in Kconfig
# 
# ChangeSet
#   2004/09/14 07:49:42-07:00 blaisorblade_spam@yahoo.it 
#   [PATCH] uml: refer to CONFIG_USERMODE, not to CONFIG_UM
#   
#   Correct one Kconfig dependency, which should refer to CONFIG_USERMODE
#   rather than to CONFIG_UM.
#   
#   We should also figure out how to make the config process work better for
#   UML.  We would like to make UML able to "source drivers/Kconfig" and have
#   the right drivers selectable (i.e.  LVM, ramdisk, and so on) and the ones
#   for actual hardware excluded.  I've been reading such a request even from
#   Jeff Dike at the last Kernel Summit, (in the lwn.net coverage) but without
#   any followup.
#   
#   Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/char/Kconfig
#   2004/09/13 17:23:23-07:00 blaisorblade_spam@yahoo.it +1 -1
#   uml: refer to CONFIG_USERMODE, not to CONFIG_UM
# 
# ChangeSet
#   2004/09/14 07:49:28-07:00 jdike@addtoit.com 
#   [PATCH] uml: disable pending signals across a reboot
#   
#   On reboot, all signals and signal sources are disabled so that
#   late-arriving signals don't show up after the reboot exec, confusing the
#   new image, which is not expecting signals yet.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/um/os-Linux/file.c
#   2004/09/13 17:23:23-07:00 jdike@addtoit.com +10 -0
#   uml: disable pending signals across a reboot
# 
# arch/um/kernel/time.c
#   2004/09/13 17:23:23-07:00 jdike@addtoit.com +9 -0
#   uml: disable pending signals across a reboot
# 
# arch/um/kernel/irq_user.c
#   2004/09/13 17:23:23-07:00 jdike@addtoit.com +14 -0
#   uml: disable pending signals across a reboot
# 
# arch/um/include/time_user.h
#   2004/09/13 17:23:23-07:00 jdike@addtoit.com +1 -0
#   uml: disable pending signals across a reboot
# 
# arch/um/include/os.h
#   2004/09/13 17:23:23-07:00 jdike@addtoit.com +1 -0
#   uml: disable pending signals across a reboot
# 
# arch/um/include/irq_user.h
#   2004/09/13 17:23:23-07:00 jdike@addtoit.com +1 -0
#   uml: disable pending signals across a reboot
# 
# ChangeSet
#   2004/09/14 07:49:16-07:00 jdike@addtoit.com 
#   [PATCH] uml: fix binary layout assumption
#   
#   This patch calculates section boundaries differently so as to not get
#   tripped up by holes in the binary such as are introduced by exec-shield.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/um/kernel/um_arch.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +11 -1
#   uml: fix binary layout assumption
# 
# arch/um/kernel/tt/process_kern.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +1 -1
#   uml: fix binary layout assumption
# 
# ChangeSet
#   2004/09/14 07:49:04-07:00 jdike@addtoit.com 
#   [PATCH] uml: fix scheduler race
#   
#   This fixes a use-after-free bug in the context switching.  A process going
#   out of context after exiting wakes up the next process and then kills
#   itself.  The problem is that when it gets around to killing itself is up to
#   the host and can happen a long time later, including after the incoming
#   process has freed its stack, and that memory is possibly being used for
#   something else.
#   
#   The fix is to have the incoming process kill the exiting process just to
#   make sure it can't be running at the point that its stack is freed.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/um/kernel/tt/process_kern.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +13 -1
#   uml: fix scheduler race
# 
# ChangeSet
#   2004/09/14 07:48:53-07:00 jdike@addtoit.com 
#   [PATCH] uml: eliminate useless thread field
#   
#   This patch eliminates use of task.thread.kernel_stack.  It was unnecessary,
#   confusing, and was masking some kernel stack size assumptions.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/asm-um/processor-generic.h
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +0 -2
#   uml: eliminate useless thread field
# 
# arch/um/kernel/um_arch.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +0 -3
#   uml: eliminate useless thread field
# 
# arch/um/kernel/tt/process_kern.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +4 -5
#   uml: eliminate useless thread field
# 
# arch/um/kernel/tt/exec_kern.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +1 -2
#   uml: eliminate useless thread field
# 
# arch/um/kernel/skas/process_kern.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +2 -3
#   uml: eliminate useless thread field
# 
# arch/um/kernel/process_kern.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +1 -4
#   uml: eliminate useless thread field
# 
# arch/um/kernel/process.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +1 -1
#   uml: eliminate useless thread field
# 
# ChangeSet
#   2004/09/14 07:48:40-07:00 jdike@addtoit.com 
#   [PATCH] uml: remove ghash.h
#   
#   Remove dependency on ghash.h.  Remvoe ghash.h, too.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/um/kernel/physmem.c
#   2004/09/13 17:23:22-07:00 jdike@addtoit.com +48 -48
#   uml: remove ghash.h
# 
# BitKeeper/deleted/.del-ghash.h~17597075be73f07b
#   2004/09/14 07:48:32-07:00 jdike@addtoit.com +0 -0
#   Delete: include/linux/ghash.h
# 
# ChangeSet
#   2004/09/14 07:48:26-07:00 rsa@us.ibm.com 
#   [PATCH] HVCS fix to replace yield with tty_wait_until_sent in hvcs_close
#   
#   Following the same advice you gave in a recent hvc_console patch I have
#   modified HVCS to remove a while() { yield(); } from hvcs_close() which may
#   cause problems where real time scheduling is concerned and replaced it with
#   tty_wait_until_sent() which uses a real wait queue and is the proper method
#   for blocking a tty operation while waiting for data to be sent.  This patch
#   has been tested to verify that all the paths of code that were changed were
#   hit during the code run and performed as expected including hotplug remove
#   of hvcs adapters and hangup of ttys.
#   
#   - Replaced yield() in hvcs_close() with tty_wait_until_sent() to prevent
#     possible lockup with realtime scheduling.
#   
#   - Removed hvcs_final_close() and reordered cleanup operations to prevent
#     discarding of pending data during an hvcs_close() call.
#   
#   - Removed spinlock protection of hvcs_struct data members in
#     hvcs_write_room() and hvcs_chars_in_buffer() because they aren't needed.
#   
#   Signed-off-by: Ryan S. Arnold <rsa@us.ibm.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/char/hvcs.c
#   2004/09/13 17:23:21-07:00 rsa@us.ibm.com +43 -64
#   HVCS fix to replace yield with tty_wait_until_sent in hvcs_close
# 
# ChangeSet
#   2004/09/14 07:48:14-07:00 mingo@elte.hu 
#   [PATCH] blk: max_sectors tunables
#   
#   Introduces two new /sys/block values:
#   
#     /sys/block/*/queue/max_hw_sectors_kb
#     /sys/block/*/queue/max_sectors_kb
#   
#   max_hw_sectors_kb is the maximum that the driver can handle and is
#   readonly.  max_sectors_kb is the current max_sectors value and can be tuned
#   by root.  PAGE_SIZE granularity is enforced.
#   
#   It's all locking-safe and all affected layered drivers have been updated as
#   well.  The patch has been in testing for a couple of weeks already as part
#   of the voluntary-preempt patches and it works just fine - people use it to
#   reduce IDE IRQ handling latencies.
#   
#   Signed-off-by: Ingo Molnar <mingo@elte.hu>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/linux/blkdev.h
#   2004/09/13 17:23:21-07:00 mingo@elte.hu +1 -0
#   blk: max_sectors tunables
# 
# drivers/md/raid1.c
#   2004/09/13 17:23:21-07:00 mingo@elte.hu +2 -2
#   blk: max_sectors tunables
# 
# drivers/md/raid0.c
#   2004/09/13 17:23:21-07:00 mingo@elte.hu +1 -1
#   blk: max_sectors tunables
# 
# drivers/md/multipath.c
#   2004/09/13 17:23:21-07:00 mingo@elte.hu +2 -2
#   blk: max_sectors tunables
# 
# drivers/md/linear.c
#   2004/09/13 17:23:21-07:00 mingo@elte.hu +1 -1
#   blk: max_sectors tunables
# 
# drivers/md/dm-table.c
#   2004/09/13 17:23:21-07:00 mingo@elte.hu +1 -1
#   blk: max_sectors tunables
# 
# drivers/block/ll_rw_blk.c
#   2004/09/13 17:23:21-07:00 mingo@elte.hu +66 -4
#   blk: max_sectors tunables
# 
# ChangeSet
#   2004/09/14 07:48:01-07:00 ak@suse.de 
#   [PATCH] Add prctl to modify current->comm
#   
#   This patch adds a prctl to modify current->comm as shown in /proc.  This
#   feature was requested by KDE developers.  In KDE most programs are started by
#   forking from a kdeinit program that already has the libraries loaded and some
#   other state.
#   
#   Problem is to give these forked programs the proper name.  It already writes
#   the command line in the environment (as seen in ps), but top uses a different
#   field in /proc/pid/status that reports current->comm.  And that was always
#   "kdeinit" instead of the real command name.  So you ended up with lots of
#   kdeinits in your top listing, which was not very useful.
#   
#   This patch adds a new prctl PR_SET_NAME to allow a program to change its comm
#   field.
#   
#   I considered the potential security issues of a program obscuring itself with
#   this interface, but I don't think it matters much because a program can
#   already obscure itself when the admin uses ps instead of top.  In case of a
#   KDE desktop calling everything kdeinit is much more obfuscation than the
#   alternative.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/sys.c
#   2004/09/13 17:23:21-07:00 ak@suse.de +11 -0
#   Add prctl to modify current->comm
# 
# include/linux/prctl.h
#   2004/09/13 17:23:21-07:00 ak@suse.de +2 -0
#   Add prctl to modify current->comm
# 
# ChangeSet
#   2004/09/14 07:47:51-07:00 hch@lst.de 
#   [PATCH] remove exports from audit code
#   
#   Tons of exports in the new audit code, but not a single module that
#   actually uses one of them.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/auditsc.c
#   2004/09/13 17:23:21-07:00 hch@lst.de +1 -8
#   remove exports from audit code
# 
# kernel/audit.c
#   2004/09/13 17:23:21-07:00 hch@lst.de +0 -13
#   remove exports from audit code
# 
# ChangeSet
#   2004/09/14 07:47:38-07:00 hch@lst.de 
#   [PATCH] mark dq_list_lock static
#   
#   Only used in dq_list_lock these days.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/linux/quota.h
#   2004/09/13 17:23:20-07:00 hch@lst.de +0 -1
#   mark dq_list_lock static
# 
# fs/dquot.c
#   2004/09/13 17:23:20-07:00 hch@lst.de +1 -2
#   mark dq_list_lock static
# 
# ChangeSet
#   2004/09/14 07:47:27-07:00 hch@lst.de 
#   [PATCH] mark md_interrupt_thread static
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/linux/raid/md.h
#   2004/09/13 17:23:20-07:00 hch@lst.de +0 -1
#   mark md_interrupt_thread static
# 
# drivers/md/md.c
#   2004/09/13 17:23:20-07:00 hch@lst.de +1 -2
#   mark md_interrupt_thread static
# 
# ChangeSet
#   2004/09/14 07:47:15-07:00 hch@lst.de 
#   [PATCH] make kmem_find_general_cachep static in slab.c
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/slab.c
#   2004/09/13 17:23:20-07:00 hch@lst.de +16 -18
#   make kmem_find_general_cachep static in slab.c
# 
# include/linux/slab.h
#   2004/09/13 17:23:20-07:00 hch@lst.de +0 -1
#   make kmem_find_general_cachep static in slab.c
# 
# ChangeSet
#   2004/09/14 07:47:02-07:00 juhl-lkml@dif.dk 
#   [PATCH] __copy_to_user() check in cdrom_read_cdda_old()
#   
#   akpm: really, reads are supposed to return the number-of-bytes-read on faults,
#   or -EFAULT of no bytes were read.  This patch returns either zero or -EFAULT,
#   ignoring any successfully transferred data.  But the user interface (whcih is
#   an ioctl()) was never set up to do that.
#   
#   Signed-off-by: Jesper Juhl <juhl-lkml@dif.dk>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/cdrom/cdrom.c
#   2004/09/13 17:23:20-07:00 juhl-lkml@dif.dk +10 -5
#   __copy_to_user() check in cdrom_read_cdda_old()
# 
# ChangeSet
#   2004/09/14 07:46:50-07:00 anton@samba.org 
#   [PATCH] Allocate correct amount of memory for pid hash
#   
#   We are now allocating twice as much memory as required.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/pid.c
#   2004/09/13 17:23:19-07:00 anton@samba.org +1 -1
#   Allocate correct amount of memory for pid hash
# 
# ChangeSet
#   2004/09/14 07:46:38-07:00 hugh@veritas.com 
#   [PATCH] shmem: Copyright file_setup trivia
#   
#   I _think_ shmem_file_setup is protected against negative loff_t size by the
#   TASK_SIZE in each arch, but prefer the security of an explicit test.  Wipe
#   those parentheses off its return(file), and update our Copyright.
#   
#   Signed-off-by: Hugh Dickins <hugh@veritas.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/shmem.c
#   2004/09/13 17:23:19-07:00 hugh@veritas.com +4 -4
#   shmem: Copyright file_setup trivia
# 
# ChangeSet
#   2004/09/14 07:46:25-07:00 hugh@veritas.com 
#   [PATCH] shmem: rework majmin and ZERO_PAGE
#   
#   Very minor adjustments to shmem_getpage return path: I now prefer it to return
#   NULL and let do_shmem_file_read use ZERO_PAGE(0) in that case; and we don't
#   need a local majmin variable, do_no_page initializes *type to VM_FAULT_MINOR
#   already.
#   
#   Signed-off-by: Hugh Dickins <hugh@veritas.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/shmem.c
#   2004/09/13 17:23:18-07:00 hugh@veritas.com +12 -14
#   shmem: rework majmin and ZERO_PAGE
# 
# ChangeSet
#   2004/09/14 07:46:13-07:00 hugh@veritas.com 
#   [PATCH] shmem: avoid the shmem_inodes list
#   
#   If we're thinking about shmem scalability...  isn't it silly that each shmem
#   object is added to the shmem_inodes list on creation, and removed on deletion,
#   yet the only use for that list is in swapoff (shmem_unuse)?
#   
#   Call it shmem_swaplist; shmem_writepage add inode to swaplist when first swap
#   allocated (usually never); shmem_delete_inode remove inode from the list after
#   truncating (if called before, inode could be re-added to it).
#   
#   Inode can remain on the swaplist after all its pages are swapped back in, just
#   be lazy about it; but if shmem_unuse finds swapped count now 0, save itself
#   time by then removing that inode from the swaplist.
#   
#   Signed-off-by: Hugh Dickins <hugh@veritas.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/shmem.c
#   2004/09/13 17:23:18-07:00 hugh@veritas.com +24 -19
#   shmem: avoid the shmem_inodes list
# 
# include/linux/shmem_fs.h
#   2004/09/13 17:23:18-07:00 hugh@veritas.com +1 -1
#   shmem: avoid the shmem_inodes list
# 
# ChangeSet
#   2004/09/14 07:46:01-07:00 hugh@veritas.com 
#   [PATCH] shmem: no sbinfo for tmpfs mount?
#   
#   Some might want a tmpfs mount with the improved scalability afforded by
#   omitting shmem superblock accounting; or some might just want to test it in an
#   externally-visible tmpfs mount instance.
#   
#   Adopt the convention that mount option -o nr_blocks=0,nr_inodes=0 means
#   without resource limits, and hence no shmem_sb_info.  Not recommended for
#   general use, but no worse than ramfs.
#   
#   Disallow remounting from unlimited to limited (no accounting has been done so
#   far, so no idea whether it's permissible), and from limited to unlimited
#   (because we'd need then to free the sbinfo, and visit each inode to reset its
#   i_blocks to 0: why bother?).
#   
#   Signed-off-by: Hugh Dickins <hugh@veritas.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/shmem.c
#   2004/09/13 17:23:18-07:00 hugh@veritas.com +41 -19
#   shmem: no sbinfo for tmpfs mount?
# 
# Documentation/filesystems/tmpfs.txt
#   2004/09/13 17:23:18-07:00 hugh@veritas.com +6 -0
#   shmem: no sbinfo for tmpfs mount?
# 
# ChangeSet
#   2004/09/14 07:45:49-07:00 hugh@veritas.com 
#   [PATCH] shmem: no sbinfo for shm mount
#   
#   SGI investigations have shown a dramatic contrast in scalability between
#   anonymous memory and shmem objects.  Processes building distinct shmem objects
#   in parallel hit heavy contention on shmem superblock stat_lock.  Across 256
#   cpus an intensive test runs 300 times slower than anonymous.
#   
#   Jack Steiner has observed that all the shmem superblock free_blocks and
#   free_inodes accounting is redundant in the case of the internal mount used for
#   SysV shared memory and for shared writable /dev/zero objects (the cases which
#   most concern them): it specifically declines to limit.
#   
#   Based upon Brent Casavant's SHMEM_NOSBINFO patch, this instead just removes
#   the shmem_sb_info structure from the internal kernel mount, testing where
#   necessary for null sbinfo pointer.  shmem_set_size moved within CONFIG_TMPFS,
#   its arg named "sbinfo" as elsewhere.
#   
#   This brings shmem object scalability up to that of anonymous memory, in the
#   case where distinct processes are building (faulting to allocate) distinct
#   objects.  It significantly improves parallel building of a shared shmem object
#   (that test runs 14 times faster across 256 cpus), but other issues remain in
#   that case: to be addressed in later patches.
#   
#   Signed-off-by: Hugh Dickins <hugh@veritas.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/shmem.c
#   2004/09/13 17:23:18-07:00 hugh@veritas.com +88 -76
#   shmem: no sbinfo for shm mount
# 
# ChangeSet
#   2004/09/14 07:45:36-07:00 hugh@veritas.com 
#   [PATCH] shmem: inodes and links need lowmem
#   
#   Keith Mannthey's Bugzilla #3268 drew attention to how tmpfs inodes and
#   dentries and long names and radix-tree nodes pin lowmem.  Assuming about 1k of
#   lowmem per inode, we need to lower the default nr_inodes limit on machines
#   with significant highmem.
#   
#   Be conservative, but more generous than in the original patch to Keith: limit
#   to number of lowmem pages, which works out around 200,000 on i386.  Easily
#   overridden by giving the nr_inodes= mount option: those who want to sail
#   closer to the rocks should be allowed to do so.
#   
#   Notice how tmpfs dentries cannot be reclaimed in the way that disk-based
#   dentries can: so even hard links need to be costed.  They are cheaper than
#   inodes, but easier all round to charge the same.  This way, the limit for hard
#   links is equally visible through "df -i": but expect occasional bugreports
#   that tmpfs links are being treated like this.
#   
#   Would have been simpler just to move the free_inodes accounting from
#   shmem_delete_inode to shmem_unlink; but that would lose the charge on unlinked
#   but open files.
#   
#   Signed-off-by: Hugh Dickins <hugh@veritas.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/shmem.c
#   2004/09/13 17:23:17-07:00 hugh@veritas.com +26 -2
#   shmem: inodes and links need lowmem
# 
# Documentation/filesystems/tmpfs.txt
#   2004/09/13 17:23:17-07:00 hugh@veritas.com +4 -2
#   shmem: inodes and links need lowmem
# 
# ChangeSet
#   2004/09/14 07:45:23-07:00 hugh@veritas.com 
#   [PATCH] shmem: don't SLAB_HWCACHE_ALIGN
#   
#   Anton recently removed SLAB_HWCACHE_ALIGN from the fs inode caches, now do the
#   same for tmpfs inode cache: fits 9 per page where 7 before.
#   
#   Was saying SLAB_RECLAIM_ACCOUNT too, but that's wrong: tmpfs inodes are not
#   reclaimed under pressure; and hugetlbfs had copied that too.
#   
#   Rearrange shmem_inode_info fields so those most likely to be needed are most
#   likely to be in the same cacheline as the spinlock guarding them.
#   
#   Signed-off-by: Hugh Dickins <hugh@veritas.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# mm/shmem.c
#   2004/09/13 17:23:17-07:00 hugh@veritas.com +1 -2
#   shmem: don't SLAB_HWCACHE_ALIGN
# 
# include/linux/shmem_fs.h
#   2004/09/13 17:23:17-07:00 hugh@veritas.com +7 -7
#   shmem: don't SLAB_HWCACHE_ALIGN
# 
# fs/hugetlbfs/inode.c
#   2004/09/13 17:23:17-07:00 hugh@veritas.com +1 -2
#   shmem: don't SLAB_HWCACHE_ALIGN
# 
# ChangeSet
#   2004/09/14 07:45:11-07:00 castet.matthieu@free.fr 
#   [PATCH] pnpbios parser bugfix
#   
#   this patch fix a pnpbios problem with independant
#   resource(http://bugzilla.kernel.org/show_bug.cgi?id=3295) :
#   the old code assume that they are given at the beggining (before any
#   SMALL_TAG_STARTDEP entry), but in some case there are found after
#   SMALL_TAG_ENDDEP entry.
#   
#   tag : 6 SMALL_TAG_STARTDEP
#   tag : 8 SMALL_TAG_PORT
#   tag : 6 SMALL_TAG_STARTDEP
#   tag : 8 SMALL_TAG_PORT
#   tag : 7 SMALL_TAG_ENDDEP
#   tag : 4 SMALL_TAG_IRQ   <-- independant resource
#   tag : f SMALL_TAG_END
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/pnp/pnpbios/rsparser.c
#   2004/09/13 17:23:17-07:00 castet.matthieu@free.fr +7 -2
#   pnpbios parser bugfix
# 
# ChangeSet
#   2004/09/14 07:44:59-07:00 yuvalt@gmail.com 
#   [PATCH] searching for parameters in 'make menuconfig'
#   
#   I added the ability to search for parameters in make menuconfig (find a
#   given parameter's location in the tree).
#   
#   You use '/' to invoke the feature.  Regular expression searches are supported.
#   
#   Signed-off-by: Yuval Turgeman <yuvalt@gmail.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# scripts/lxdialog/menubox.c
#   2004/09/13 17:23:16-07:00 yuvalt@gmail.com +9 -0
#   searching for parameters in 'make menuconfig'
# 
# scripts/kconfig/mconf.c
#   2004/09/13 17:23:16-07:00 yuvalt@gmail.com +145 -16
#   searching for parameters in 'make menuconfig'
# 
# ChangeSet
#   2004/09/14 07:44:46-07:00 benh@kernel.crashing.org 
#   [PATCH] ppc32: pmac cpufreq for ibook 2 600
#   
#   This patch adds support for the 750CX based ibook2 600Mhz to the cpufreq
#   powermac driver.
#   
#   Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc/platforms/pmac_cpufreq.c
#   2004/09/13 17:23:16-07:00 benh@kernel.crashing.org +2 -5
#   ppc32: pmac cpufreq for ibook 2 600
# 
# ChangeSet
#   2004/09/14 07:44:34-07:00 anton@samba.org 
#   [PATCH] ppc32: remove -Wno-uninitialized
#   
#   Remove -Wno-uninitialized on ppc32 too.  Ive just found a number of real bugs
#   on ppc64 by doing the same.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc/Makefile
#   2004/09/13 17:23:16-07:00 anton@samba.org +1 -1
#   ppc32: remove -Wno-uninitialized
# 
# ChangeSet
#   2004/09/14 07:44:21-07:00 david@gibson.dropbear.id.au 
#   [PATCH] ppc64: improved VSID allocation algorithm
#   
#   This patch has been tested both on SLB and segment table machines.  This
#   new approach is far from the final word in VSID/context allocation, but
#   it's a noticeable improvement on the old method.
#   
#   Replace the VSID allocation algorithm.  The new algorithm first generates a
#   36-bit "proto-VSID" (with 0xfffffffff reserved).  For kernel addresses this
#   is equal to the ESID (address >> 28), for user addresses it is:
#   
#   	(context << 15) | (esid & 0x7fff)
#   
#   These are distinguishable from kernel proto-VSIDs because the top bit is
#   clear.  Proto-VSIDs with the top two bits equal to 0b10 are reserved for
#   now.
#   
#   The proto-VSIDs are then scrambled into real VSIDs with the multiplicative
#   hash:
#   
#   	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
#   	where	VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
#   		VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
#   
#   This scramble is 1:1, because VSID_MULTIPLIER and VSID_MODULUS are co-prime
#   since VSID_MULTIPLIER is prime (the largest 28-bit prime, in fact).
#   
#   This scheme has a number of advantages over the old one:
#   
#   - We now have VSIDs for every kernel address (i.e.  everything above
#     0xC000000000000000), except the very top segment.  That simplifies a
#     number of things.
#   
#   - We allow for 15 significant bits of ESID for user addresses with 20
#     bits of context.  i.e.  8T (43 bits) of address space for up to 1M
#     contexts, significantly more than the old method (although we will need
#     changes in the hash path and context allocation to take advantage of
#     this).
#   
#   - Because we use a real multiplicative hash function, we have better and
#     more robust hash scattering with this VSID algorithm (at least based on
#     some initial results).
#   
#   Because the MODULUS is 2^n-1 we can use a trick to compute it efficiently
#   without a divide or extra multiply.  This makes the new algorithm barely
#   slower than the old one.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/asm-ppc64/pgtable.h
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +10 -4
#   ppc64: improved VSID allocation algorithm
# 
# include/asm-ppc64/page.h
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +0 -11
#   ppc64: improved VSID allocation algorithm
# 
# include/asm-ppc64/mmu_context.h
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +79 -38
#   ppc64: improved VSID allocation algorithm
# 
# include/asm-ppc64/mmu.h
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +39 -6
#   ppc64: improved VSID allocation algorithm
# 
# arch/ppc64/mm/stab.c
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +1 -5
#   ppc64: improved VSID allocation algorithm
# 
# arch/ppc64/mm/slb_low.S
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +21 -32
#   ppc64: improved VSID allocation algorithm
# 
# arch/ppc64/mm/hash_utils.c
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +5 -5
#   ppc64: improved VSID allocation algorithm
# 
# arch/ppc64/kernel/head.S
#   2004/09/13 17:23:16-07:00 david@gibson.dropbear.id.au +8 -17
#   ppc64: improved VSID allocation algorithm
# 
# ChangeSet
#   2004/09/14 07:44:08-07:00 anton@samba.org 
#   [PATCH] hvc: uninitialised variable
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/char/hvc_console.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   hvc: uninitialised variable
# 
# ChangeSet
#   2004/09/14 07:43:55-07:00 anton@samba.org 
#   [PATCH] ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
#   
#   Here are fixes for some false positives.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/macintosh/via-pmu.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# drivers/char/hvsi.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/xmon/xmon.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/oprofile/op_model_rs64.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +2 -2
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/sysfs.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +7 -6
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/signal32.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/signal.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/setup.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/rtasd.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/pSeries_pci.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/nvram.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/iommu.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix spurious warnings uncovered by -Wno-uninitialized removal
# 
# ChangeSet
#   2004/09/14 07:43:43-07:00 anton@samba.org 
#   [PATCH] ppc64: Fix real bugs uncovered by -Wno-uninitialized removal
#   
#   The removal of -Wno-uninitialized on ppc64 revealed a number of real
#   bugs.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/pci/hotplug/rpaphp_core.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +1 -1
#   ppc64: Fix real bugs uncovered by -Wno-uninitialized removal
# 
# drivers/net/ibmveth.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +5 -2
#   ppc64: Fix real bugs uncovered by -Wno-uninitialized removal
# 
# drivers/char/hvcs.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +3 -3
#   ppc64: Fix real bugs uncovered by -Wno-uninitialized removal
# 
# arch/ppc64/kernel/iSeries_pci_reset.c
#   2004/09/13 17:23:15-07:00 anton@samba.org +2 -1
#   ppc64: Fix real bugs uncovered by -Wno-uninitialized removal
# 
# ChangeSet
#   2004/09/14 07:43:30-07:00 anton@samba.org 
#   [PATCH] ppc64: remove -Wno-uninitialized
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/Makefile
#   2004/09/13 17:23:14-07:00 anton@samba.org +1 -2
#   ppc64: remove -Wno-uninitialized
# 
# ChangeSet
#   2004/09/14 07:43:18-07:00 anton@samba.org 
#   [PATCH] ppc64: clean up idle loop code
#   
#   Clean up our idle loop code:
#   
#   - Remove a bunch of useless includes and make most functions static
#   - There were places where we werent disabling interrupts before checking
#     need_resched then calling the hypervisor to sleep our thread. We might
#     race with an IPI and end up missing a reschedule. Disable interrupts
#     around these regions to make them safe.
#   - We forgot to turn off the polling flag when exiting the dedicated_idle
#     idle loop. This could have resulted in all manner problems as other
#     cpus would avoid sending IPIs to force reschedules.
#   - Add a missing check for cpu_is_offline in the shared cpu idle loop.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/idle.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +60 -64
#   ppc64: clean up idle loop code
# 
# ChangeSet
#   2004/09/14 07:43:06-07:00 anton@samba.org 
#   [PATCH] ppc64: enable POWER5 low power mode in idle loop
#   
#   Now that we understand (and have fixed) the problem with using low power mode
#   in the idle loop, lets enable it.  It should save a fair amount of power.
#   
#   (The problem was that our exceptions were inheriting the low power mode and so
#   were executing at a fraction of the normal cpu issue rate.  We fixed it by
#   always bumping our priority to medium at the start of every exception).
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/idle.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +13 -9
#   ppc64: enable POWER5 low power mode in idle loop
# 
# ChangeSet
#   2004/09/14 07:42:54-07:00 anton@samba.org 
#   [PATCH] ppc64: restore smt-enabled=off kernel command line option
#   
#   Restore the smt-enabled=off kernel command line functionality:
#   
#   - Remove the SMT_DYNAMIC state now that smt_snooze_delay allows for the
#     same thing.
#   - Remove the early prom.c parsing for the option, put it into an
#     early_param instead.
#   - In setup_cpu_maps honour the smt-enabled setting
#   
#   Note to Nathan: In order to allow cpu hotplug add of secondary threads after
#   booting with smt-enabled=off, I had to initialise cpu_present_map to
#   cpu_online_map in smp_cpus_done.  Im not sure how you want to handle this but
#   it seems our present map currently does not allow cpus to be added into the
#   partition that werent there at boot (but were in the possible map).
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# include/asm-ppc64/smp.h
#   2004/09/13 17:23:14-07:00 anton@samba.org +2 -0
#   ppc64: restore smt-enabled=off kernel command line option
# 
# include/asm-ppc64/naca.h
#   2004/09/13 17:23:14-07:00 anton@samba.org +0 -3
#   ppc64: restore smt-enabled=off kernel command line option
# 
# include/asm-ppc64/memory.h
#   2004/09/13 17:23:14-07:00 anton@samba.org +0 -10
#   ppc64: restore smt-enabled=off kernel command line option
# 
# arch/ppc64/kernel/smp.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +10 -0
#   ppc64: restore smt-enabled=off kernel command line option
# 
# arch/ppc64/kernel/setup.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +55 -2
#   ppc64: restore smt-enabled=off kernel command line option
# 
# arch/ppc64/kernel/prom.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +3 -67
#   ppc64: restore smt-enabled=off kernel command line option
# 
# arch/ppc64/kernel/idle.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +1 -6
#   ppc64: restore smt-enabled=off kernel command line option
# 
# ChangeSet
#   2004/09/14 07:42:41-07:00 anton@samba.org 
#   [PATCH] ppc64: use early_param
#   
#   Make use of Rusty's early_param code. Its good stuff.
#   
#   We appear to be the first user :)
#   
#   Move vpa_init and idle_setup later in boot, we dont have to do them
#   right up front in setup_system.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/mm/numa.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +18 -4
#   ppc64: use early_param
# 
# arch/ppc64/kernel/setup.c
#   2004/09/13 17:23:14-07:00 anton@samba.org +43 -47
#   ppc64: use early_param
# 
# ChangeSet
#   2004/09/14 07:42:28-07:00 anton@samba.org 
#   [PATCH] ppc64: remove EEH command line device matching code
#   
#   We have had reports of people attempting to disable EEH on POWER5 boxes.  This
#   is not supported, and the device will most likely not respond to config space
#   reads/writes.  Remove the IBM location matching code that was being used to
#   disable devices as well as the global option.
#   
#   We already have the ability to ignore EEH erros via the panic_on_oops sysctl
#   option, advanced users should make use of that instead.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/eeh.c
#   2004/09/13 17:23:13-07:00 anton@samba.org +5 -160
#   ppc64: remove EEH command line device matching code
# 
# ChangeSet
#   2004/09/14 07:42:16-07:00 anton@samba.org 
#   [PATCH] ppc64: remove unused ppc64_calibrate_delay
#   
#   - Remove ppc64_calibrate_delay, no longer used
#   - Formatting fixups
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/setup.c
#   2004/09/13 17:23:13-07:00 anton@samba.org +5 -18
#   ppc64: remove unused ppc64_calibrate_delay
# 
# ChangeSet
#   2004/09/14 07:42:04-07:00 anton@samba.org 
#   [PATCH] ppc64: clean up kernel command line code
#   
#   Clean up some of our command line code:
#   
#   - We were copying the command line out of the device tree twice, but the
#     first time we forgot to add CONFIG_CMDLINE. Fix this and remove the
#     second copy.
#   - The command line birec code ran after we had done some command line
#     parsing in prom.c. This had the opportunity to really confuse the
#     user, with some options being parsed out of the device tree and the
#     other out of birecs. Luckily we could find no user of the command
#     line birecs, so remove them.
#   - remove duplicate printing of kernel command line;
#   - clean up iseries inits and create an iSeries_parse_cmdline.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/setup.c
#   2004/09/13 17:23:13-07:00 anton@samba.org +5 -65
#   ppc64: clean up kernel command line code
# 
# arch/ppc64/kernel/prom.c
#   2004/09/13 17:23:13-07:00 anton@samba.org +3 -0
#   ppc64: clean up kernel command line code
# 
# arch/ppc64/kernel/iSeries_setup.c
#   2004/09/13 17:23:13-07:00 anton@samba.org +9 -10
#   ppc64: clean up kernel command line code
# 
# arch/ppc64/kernel/chrp_setup.c
#   2004/09/13 17:23:13-07:00 anton@samba.org +0 -2
#   ppc64: clean up kernel command line code
# 
# ChangeSet
#   2004/09/14 07:41:51-07:00 anton@samba.org 
#   [PATCH] ppc64: use nm --synthetic where available
#   
#   On new toolchains we need to use nm --synthetic or we miss code symbols.  Sam,
#   I'm not thrilled about this patch but Im not sure of an easier way.  Any ideas?
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/Makefile
#   2004/09/13 17:23:13-07:00 anton@samba.org +6 -0
#   ppc64: use nm --synthetic where available
# 
# ChangeSet
#   2004/09/14 07:41:39-07:00 anton@samba.org 
#   [PATCH] ppc64: give the kernel an OPD section
#   
#   From: Alan Modra <amodra@bigpond.net.au>
#   
#   Give the kernel an OPD section, required for recent ppc64 toolchains.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/vmlinux.lds.S
#   2004/09/13 17:23:12-07:00 anton@samba.org +4 -1
#   ppc64: give the kernel an OPD section
# 
# ChangeSet
#   2004/09/14 07:41:27-07:00 anton@samba.org 
#   [PATCH] ppc64: Enable NUMA API
#   
#   Plumb the NUMA API syscalls into ppc64.  Also add some missing cond_syscalls
#   so we still link with NUMA API disabled.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/sys.c
#   2004/09/13 17:23:12-07:00 anton@samba.org +2 -0
#   ppc64: Enable NUMA API
# 
# include/asm-ppc64/unistd.h
#   2004/09/13 17:23:12-07:00 anton@samba.org +3 -3
#   ppc64: Enable NUMA API
# 
# arch/ppc64/kernel/misc.S
#   2004/09/13 17:23:12-07:00 anton@samba.org +6 -6
#   ppc64: Enable NUMA API
# 
# ChangeSet
#   2004/09/14 07:41:15-07:00 anton@samba.org 
#   [PATCH] ppc64: RTAS error logs can appear twice in dmesg
#   
#   I've started seeing rtas errors printed twice.  Remove the second call to
#   printk_log_rtas.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/rtasd.c
#   2004/09/13 17:23:12-07:00 anton@samba.org +5 -7
#   ppc64: RTAS error logs can appear twice in dmesg
# 
# ChangeSet
#   2004/09/14 07:41:03-07:00 anton@samba.org 
#   [PATCH] ppc64: remove SPINLINE config option
#   
#   After the spinlock rework, CONFIG_SPINLINE doesnt work and causes a compile
#   error.  Remove it for now.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/lib/locks.c
#   2004/09/13 17:23:12-07:00 anton@samba.org +0 -4
#   ppc64: remove SPINLINE config option
# 
# arch/ppc64/Kconfig.debug
#   2004/09/13 17:23:12-07:00 anton@samba.org +0 -10
#   ppc64: remove SPINLINE config option
# 
# ChangeSet
#   2004/09/14 07:40:51-07:00 willschm@us.ibm.com 
#   [PATCH] ppc64: lparcfg whitespace and wordwrap cleanup.
#   
#   This patch is the result of running Lindent against
#   arch/ppc64/kernel/lparcfg.c.
#   
#   This cleans up an assortment of whitespace and wordwrap inconsistencies.
#   
#   Signed-off-by: Will Schmidt <willschm@us.ibm.com>
#   Signed-off-by: Paul Mackerras <paulus@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/lparcfg.c
#   2004/09/13 17:23:12-07:00 willschm@us.ibm.com +99 -91
#   ppc64: lparcfg whitespace and wordwrap cleanup.
# 
# ChangeSet
#   2004/09/14 07:40:39-07:00 willschm@us.ibm.com 
#   [PATCH] ppc64: lparcfg fixes for processor counts
#   
#   This patch corrects how the lparcfg interface was presenting the number of
#   active and potential processors.  (As reported in LTC bugzilla number 10889).
#   
#   - Correct output for partition_potential_processors and
#     system_active_processors.
#   - suppress pool related values in scenarios where they do not make
#     sense. (non-shared processor configurations)
#   - Display pool_capacity as a percentage, to match the behavior from
#     iSeries code.
#   
#   Signed-off-by: Will Schmidt <willschm@us.ibm.com>
#   Signed-off-by: Paul Mackerras <paulus@samba.org>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/ppc64/kernel/lparcfg.c
#   2004/09/13 17:23:11-07:00 willschm@us.ibm.com +29 -23
#   ppc64: lparcfg fixes for processor counts
# 
# ChangeSet
#   2004/09/14 07:40:27-07:00 davej@redhat.com 
#   [PATCH] Pointer dereference before NULL check in ACPI thermal driver
#   
#   Again, found with coverity's checker.
#   
#   Signed-off-by: Dave Jones <davej@redhat.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/acpi/thermal.c
#   2004/09/13 17:23:11-07:00 davej@redhat.com +3 -1
#   Pointer dereference before NULL check in ACPI thermal driver
# 
# ChangeSet
#   2004/09/14 07:40:15-07:00 jason.davis@unisys.com 
#   [PATCH] ES7000 subarch update
#   
#   The patch below implements an algorithm to determine an unique GSI override
#   for mapping GSIs to IO-APIC pins correctly.  GSI overrides are required in
#   order for ES7000 machines to function properly since IRQ to pin mappings
#   are NOT all one-to-one.  This patch applies only to the Unisys specific
#   ES7000 machines and has been tested thoroughly on several models of the
#   ES7000 line.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# arch/i386/mach-es7000/es7000plat.c
#   2004/09/13 17:23:11-07:00 jason.davis@unisys.com +64 -17
#   ES7000 subarch update
# 
# arch/i386/mach-es7000/es7000.h
#   2004/09/13 17:23:11-07:00 jason.davis@unisys.com +7 -0
#   ES7000 subarch update
# 
# ChangeSet
#   2004/09/14 07:40:03-07:00 nathanl@austin.ibm.com 
#   [PATCH] fix schedstats null deref in sched_exec
#   
#   In sched_exec, schedstat_inc will dereference a null pointer if no domain
#   is found with the SD_BALANCE_EXEC flag set.  This was exposed during
#   testing of the previous patches where cpus are temporarily attached to a
#   dummy domain without SD_BALANCE_EXEC set.
#   
#   Signed-off-by: Nathan Lynch <nathanl@austin.ibm.com>
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/sched.c
#   2004/09/13 17:23:11-07:00 nathanl@austin.ibm.com +1 -1
#   fix schedstats null deref in sched_exec
# 
# ChangeSet
#   2004/09/14 03:03:26-07:00 rth@kanga.twiddle.home 
#   [ALPHA] Add waitid.
# 
# include/asm-alpha/unistd.h
#   2004/09/14 03:03:06-07:00 rth@kanga.twiddle.home +2 -1
#   Add waitid.
# 
# arch/alpha/kernel/systbls.S
#   2004/09/14 03:03:06-07:00 rth@kanga.twiddle.home +1 -0
#   Add waitid.
# 
# ChangeSet
#   2004/09/14 03:02:28-07:00 rth@kanga.twiddle.home 
#   [ALPHA] Arrange to return EINTR for sigsuspend on signal path.
# 
# arch/alpha/kernel/signal.c
#   2004/09/14 03:02:09-07:00 rth@kanga.twiddle.home +14 -4
#   Arrange to return EINTR for sigsuspend on signal path.
# 
# ChangeSet
#   2004/09/14 02:39:33-07:00 rth@kanga.twiddle.home 
#   [ALPHA] Use "long" on some internal bitops routines.
# 
# include/asm-alpha/compiler.h
#   2004/09/14 02:39:13-07:00 rth@kanga.twiddle.home +3 -3
#   Use long versions of ctz, clz, popcount.
# 
# include/asm-alpha/bitops.h
#   2004/09/14 02:39:13-07:00 rth@kanga.twiddle.home +2 -2
#   Use longs for floor/ceil_log2.
# 
# ChangeSet
#   2004/09/14 02:36:46-07:00 rth@kanga.twiddle.home 
#   [ALPHA] Check set_fd_set return.
# 
# arch/alpha/kernel/osf_sys.c
#   2004/09/14 02:36:25-07:00 rth@kanga.twiddle.home +4 -3
#   Check set_fd_set return.
# 
# ChangeSet
#   2004/09/14 09:54:04+01:00 nico@org.rmk.(none) 
#   [ARM PATCH] 2094/1: don't lose the system timer after resuming from sleep on SA11x0 and
#    PXA2xx
#   
#   Patch from Nicolas Pitre
#   
#   Let's make sure OSCR doesn't end up to be restored with a value
#   past OSMR0 otherwise the system timer won't start ticking until
#   OSCR wraps around (aprox 17 min.
#   
#   Also set OSCR _after_ OIER is restored to avoid matching when
#   corresponding match interrupt is masked out.
#   
#   Signed-off-by: Nicolas Pitre 
# 
# arch/arm/mach-sa1100/pm.c
#   2004/09/14 02:51:46+01:00 nico@org.rmk.(none) +4 -3
#   [PATCH] 2094/1: don't lose the system timer after resuming from sleep on SA11x0 and
#    PXA2xx
# 
# arch/arm/mach-pxa/pm.c
#   2004/09/14 02:56:28+01:00 nico@org.rmk.(none) +4 -3
#   [PATCH] 2094/1: don't lose the system timer after resuming from sleep on SA11x0 and
#    PXA2xx
# 
# ChangeSet
#   2004/09/13 22:13:59-07:00 davem@nuts.davemloft.net 
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# include/asm-sparc64/io.h
#   2004/09/13 22:13:36-07:00 davem@nuts.davemloft.net +77 -67
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# include/asm-sparc64/ebus.h
#   2004/09/13 22:13:36-07:00 davem@nuts.davemloft.net +1 -1
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# arch/sparc64/lib/Makefile
#   2004/09/13 22:13:36-07:00 davem@nuts.davemloft.net +1 -1
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# arch/sparc64/kernel/power.c
#   2004/09/13 22:13:36-07:00 davem@nuts.davemloft.net +4 -4
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# arch/sparc64/kernel/pci_schizo.c
#   2004/09/13 22:13:36-07:00 davem@nuts.davemloft.net +2 -2
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# arch/sparc64/kernel/auxio.c
#   2004/09/13 22:13:36-07:00 davem@nuts.davemloft.net +6 -5
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# arch/sparc64/lib/iomap.c
#   2004/09/13 22:13:27-07:00 davem@nuts.davemloft.net +48 -0
#   [SPARC64]: __iomem annotations and iomap implementation.
# 
# arch/sparc64/lib/iomap.c
#   2004/09/13 22:13:27-07:00 davem@nuts.davemloft.net +0 -0
#   BitKeeper file /disk1/BK/sparc-2.6/arch/sparc64/lib/iomap.c
# 
# ChangeSet
#   2004/09/13 21:40:12-07:00 davem@nuts.davemloft.net 
#   [TCP]: Fix logic error in packets_out accounting.
#   
#   Noticed by Herbert Xu.
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# net/ipv4/tcp_output.c
#   2004/09/13 21:39:17-07:00 davem@nuts.davemloft.net +1 -1
#   [TCP]: Fix logic error in packets_out accounting.
# 
# ChangeSet
#   2004/09/13 19:01:14-07:00 davem@nuts.davemloft.net 
#   [IOMAP]: Make ioport_map() take unsigned long port argument.
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# lib/iomap.c
#   2004/09/13 19:00:43-07:00 davem@nuts.davemloft.net +4 -4
#   [IOMAP]: Make ioport_map() take unsigned long port argument.
# 
# include/asm-generic/iomap.h
#   2004/09/13 19:00:43-07:00 davem@nuts.davemloft.net +1 -1
#   [IOMAP]: Make ioport_map() take unsigned long port argument.
# 
# ChangeSet
#   2004/09/13 16:17:24-07:00 shemminger@osdl.org 
#   [B44]: Fix b44 I/O mem space access warnings.
#   
#   B44 driver was using unsigned long as an io memory address.
#   Recent changes caused this to be a warning.  This patch fixes that
#   and makes the readl/writel wrapper into inline's instead of macros
#   with magic variable side effect (yuck).
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# drivers/net/b44.h
#   2004/09/13 16:17:06-07:00 shemminger@osdl.org +1 -4
#   [B44]: Fix b44 I/O mem space access warnings.
#   
#   B44 driver was using unsigned long as an io memory address.
#   Recent changes caused this to be a warning.  This patch fixes that
#   and makes the readl/writel wrapper into inline's instead of macros
#   with magic variable side effect (yuck).
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# drivers/net/b44.c
#   2004/09/13 16:17:06-07:00 shemminger@osdl.org +103 -92
#   [B44]: Fix b44 I/O mem space access warnings.
#   
#   B44 driver was using unsigned long as an io memory address.
#   Recent changes caused this to be a warning.  This patch fixes that
#   and makes the readl/writel wrapper into inline's instead of macros
#   with magic variable side effect (yuck).
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# ChangeSet
#   2004/09/13 16:08:46-07:00 martin.wilck@fujitsu-siemens.com 
#   [TG3]: Fix pause handling, we had duplicate flags for the same thing.
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# drivers/net/tg3.h
#   2004/09/13 16:08:18-07:00 martin.wilck@fujitsu-siemens.com +0 -2
#   [TG3]: Fix pause handling, we had duplicate flags for the same thing.
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# drivers/net/tg3.c
#   2004/09/13 16:08:17-07:00 martin.wilck@fujitsu-siemens.com +34 -27
#   [TG3]: Fix pause handling, we had duplicate flags for the same thing.
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# ChangeSet
#   2004/09/13 16:04:36-07:00 i@stingr.net 
#   [IPV4]: Add wccp v1/v2 support to ip_gre.c
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# net/ipv4/ip_gre.c
#   2004/09/13 16:04:06-07:00 i@stingr.net +12 -1
#   [IPV4]: Add wccp v1/v2 support to ip_gre.c
# 
# include/linux/if_ether.h
#   2004/09/13 16:04:06-07:00 i@stingr.net +2 -0
#   [IPV4]: Add wccp v1/v2 support to ip_gre.c
# 
# ChangeSet
#   2004/09/13 23:52:16+01:00 ben-linux@org.rmk.(none) 
#   [ARM PATCH] 2093/1: S3C2410 - remove un-necessary resource from NAND
#   
#   Patch from Ben Dooks
#   
#   Removed resource description left by copying error
#   
#   Thanks to Klaus Fetscher for pointing this out.
#   
#   Signed-off-by: Ben Dooks 
# 
# arch/arm/mach-s3c2410/devs.c
#   2004/09/13 13:18:54+01:00 ben-linux@org.rmk.(none) +0 -6
#   [PATCH] 2093/1: S3C2410 - remove un-necessary resource from NAND
# 
# ChangeSet
#   2004/09/13 23:47:50+01:00 ben-linux@org.rmk.(none) 
#   [ARM PATCH] 2092/1: S3C2410 - gpio bugfix and additions
#   
#   Patch from Ben Dooks
#   
#   Fix inverted mask in s3c2410_gpio_setpin() function,
#   add s3c2410_modify_misccr() for shared register, and
#   add s3c2410_gpio_getpin()
#   
#   Signed-off-by: Ben Dooks 
# 
# include/asm-arm/arch-s3c2410/hardware.h
#   2004/09/12 17:15:18+01:00 ben-linux@org.rmk.(none) +5 -0
#   [PATCH] 2092/1: S3C2410 - gpio bugfix and additions
# 
# arch/arm/mach-s3c2410/gpio.c
#   2004/09/12 16:59:08+01:00 ben-linux@org.rmk.(none) +28 -1
#   [PATCH] 2092/1: S3C2410 - gpio bugfix and additions
# 
# ChangeSet
#   2004/09/13 23:43:25+01:00 ben-linux@org.rmk.(none) 
#   [ARM PATCH] 2091/1: S3C2410 - change id of s3c2410-ohci
#   
#   Patch from Ben Dooks
#   
#   Fix missed ID change on s3c2410-usb
#   
#   Signed-off-by: Ben DOoks 
# 
# ChangeSet
#   2004/09/13 15:43:14-07:00 davem@nuts.davemloft.net 
#   [TCP]: Just silently ignore ICMP Source Quench messages.
#   
#   Recommended by draft-gont-tcpm-icmp-attacks-01.txt
#   
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# net/ipv4/tcp_ipv4.c
#   2004/09/13 15:42:33-07:00 davem@nuts.davemloft.net +1 -5
#   [TCP]: Just silently ignore ICMP Source Quench messages.
# 
# arch/arm/mach-s3c2410/devs.c
#   2004/09/12 12:39:29+01:00 ben-linux@org.rmk.(none) +1 -1
#   [PATCH] 2091/1: S3C2410 - change id of s3c2410-ohci
# 
# ChangeSet
#   2004/09/13 23:39:13+01:00 ben-linux@org.rmk.(none) 
#   [ARM PATCH] 2090/2: S3C2410 - usb gadged (udc) include
#   
#   Patch from Ben Dooks
#   
#   Header file for USB gadget controller (udc) for the
#   Samsung S3C2410 SoC
#   
#   Signed-off-by: Herbert Poetzl 
#   Signed-off-by: Ben Dooks 
# 
# include/asm-arm/arch-s3c2410/regs-udc.h
#   2004/09/12 01:55:24+01:00 ben-linux@org.rmk.(none) +162 -0
#   [PATCH] 2090/2: S3C2410 - usb gadged (udc) include
# 
# include/asm-arm/arch-s3c2410/regs-udc.h
#   2004/09/12 01:55:24+01:00 ben-linux@org.rmk.(none) +0 -0
#   BitKeeper file /usr/src/bk/linux-2.6-rmk/include/asm-arm/arch-s3c2410/regs-udc.h
# 
# ChangeSet
#   2004/09/13 23:33:33+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Revive kapmd and provide apm_queue_event()
#   
#   Add kapmd thread to provide a process context to handle "APM"
#   events submitted via apm_queue_event().
#   
#   Add apm_queue_event(), which can be called from hardware
#   interrupt handlers and the like, typically to fire off a
#   suspend.
# 
# include/asm-arm/apm.h
#   2004/09/13 23:30:04+01:00 rmk@flint.arm.linux.org.uk +5 -0
#   Add apm_queue_event() prototype
# 
# arch/arm/kernel/apm.c
#   2004/09/13 23:29:42+01:00 rmk@flint.arm.linux.org.uk +79 -28
#   Revive kapmd
# 
# ChangeSet
#   2004/09/13 23:28:34+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Update APM state definitions
#   
#   Move existing APM state definitions into struct apm_power_info,
#   and add further definitions describing other fields.
# 
# include/asm-arm/apm.h
#   2004/09/13 23:24:14+01:00 rmk@flint.arm.linux.org.uk +23 -19
#   Move existing APM state definitions into struct apm_power_info,
#   and add further definitions describing other fields.
# 
# ChangeSet
#   2004/09/13 23:22:23+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Keep APM threads frozen
#   
#   Ensure threads waiting for suspend to occur in apm_ioctl are
#   not woken by the pm_suspend thread freezing - they're already
#   frozen.
# 
# arch/arm/kernel/apm.c
#   2004/09/13 23:19:11+01:00 rmk@flint.arm.linux.org.uk +22 -13
#   Keep APM threads frozen
# 
# ChangeSet
#   2004/09/13 23:18:17+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] No point having "nonblock" local variable - kill it.
# 
# arch/arm/kernel/apm.c
#   2004/09/13 23:14:58+01:00 rmk@flint.arm.linux.org.uk +2 -2
#   No point having "nonblock" local variable - kill it.
# 
# ChangeSet
#   2004/09/13 23:13:03+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Convert suspend to a state machine.
#   
#   The original version had issues when two suspend events came
#   in at around the same time, causing APM to get confused:
#   threads became stuck in APM_IOC_SUSPEND and suspends_pending
#   incremented on each apm --suspend call.
#   
#   Now, we only add a suspend event to a users queue and increment
#   suspends_pending if the user isn't already in the middle of
#   handling a suspend event.
# 
# ChangeSet
#   2004/09/13 15:11:27-07:00 nico@cam.org 
#   [PATCH] linux/dma-mapping.h needs linux/device.h
#   
#   It seems that most architectures already include linux/device.h in their
#   own asm/dma-mapping.h.  Most but not all, and some drivers fail to
#   compile on those architectures that don't.  Since everybody needs it
#   let's include device.h from one place only and fix compilation for
#   everybody.
# 
# include/linux/dma-mapping.h
#   2004/09/10 08:28:58-07:00 nico@cam.org +1 -0
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-x86_64/dma-mapping.h
#   2004/09/10 08:33:31-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-sparc/dma-mapping.h
#   2004/09/10 08:30:54-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-sh64/dma-mapping.h
#   2004/09/10 08:33:57-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-sh/dma-mapping.h
#   2004/09/10 08:34:27-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-ppc64/dma-mapping.h
#   2004/09/10 08:34:51-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-ppc/dma-mapping.h
#   2004/09/10 08:35:12-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-mips/dma-mapping.h
#   2004/09/10 08:35:32-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-ia64/dma-mapping.h
#   2004/09/10 08:32:59-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# include/asm-i386/dma-mapping.h
#   2004/09/10 08:30:26-07:00 nico@cam.org +0 -1
#   linux/dma-mapping.h needs linux/device.h
# 
# arch/arm/kernel/apm.c
#   2004/09/13 23:09:29+01:00 rmk@flint.arm.linux.org.uk +44 -29
#   Convert suspend to a state machine.
# 
# ChangeSet
#   2004/09/13 23:04:59+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Remove APM standby support - it's unused.
# 
# arch/arm/kernel/apm.c
#   2004/09/13 23:01:58+01:00 rmk@flint.arm.linux.org.uk +3 -29
#   Remove APM standby support - it's unused.
# 
# ChangeSet
#   2004/09/13 22:59:57+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] APM: "Battery life" needs to be a signed integer.
# 
# include/asm-arm/apm.h
#   2004/09/13 22:54:57+01:00 rmk@flint.arm.linux.org.uk +1 -1
#   "Battery life" needs to be a signed integer.
# 
# arch/arm/kernel/apm.c
#   2004/09/13 22:54:52+01:00 rmk@flint.arm.linux.org.uk +1 -1
#   "Battery life" needs to be a signed integer.
# 
# ChangeSet
#   2004/09/13 22:52:50+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Convert APM user list lock to r/w sem
#   
#   Convert user_list_lock spinlock to a read/write semaphore;
#   the spinlock was affording us very little protection.
# 
# arch/arm/kernel/apm.c
#   2004/09/13 22:49:32+01:00 rmk@flint.arm.linux.org.uk +9 -9
#   Convert user_list_lock spinlock to a read/write semaphore;
#   the spinlock was affording us very little protection.
# 
# ChangeSet
#   2004/09/13 22:43:47+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Abstract APM circular queue object.
# 
# arch/arm/kernel/apm.c
#   2004/09/13 22:39:18+01:00 rmk@flint.arm.linux.org.uk +33 -21
#   Abstract APM circular queue object.
# 
# ChangeSet
#   2004/09/13 22:38:26+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Convert list_for_each()/list_entry() to list_for_each_entry()
# 
# arch/arm/kernel/apm.c
#   2004/09/13 22:35:58+01:00 rmk@flint.arm.linux.org.uk +4 -8
#   Convert list_for_each()/list_entry() to list_for_each_entry()
# 
# ChangeSet
#   2004/09/13 22:34:49+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Remove the hh.org H3600 "example" code.
# 
# arch/arm/kernel/apm.c
#   2004/09/13 22:29:29+01:00 rmk@flint.arm.linux.org.uk +0 -13
#   Remove the hh.org H3600 "example" code.
# 
# ChangeSet
#   2004/09/13 22:28:23+01:00 rmk@flint.arm.linux.org.uk 
#   [ARM] Fix ARM APM emulation sparse errors
#   
#   arch/arm/kernel/apm.c:57:16: warning: dubious one-bit signed bitfield
#   arch/arm/kernel/apm.c:58:17: warning: dubious one-bit signed bitfield
#   arch/arm/kernel/apm.c:59:17: warning: dubious one-bit signed bitfield
#   arch/arm/kernel/apm.c:60:23: warning: dubious one-bit signed bitfield
# 
# arch/arm/kernel/apm.c
#   2004/09/13 22:25:13+01:00 rmk@flint.arm.linux.org.uk +4 -4
#   Fix a few extra sparse errors
# 
# ChangeSet
#   2004/09/13 14:05:42-07:00 akpm@osdl.org 
#   [PATCH] isofs buffer management fix
#   
#   There's a double-free in the isofs filesystem.
#   
#   Invalidate this pointer so it doesn't get freed twice.
#   
#   Signed-off-by: Andrew Morton <akpm@osdl.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# fs/isofs/rock.c
#   2004/09/10 01:47:00-07:00 akpm@osdl.org +1 -1
#   isofs buffer management fix
# 
# ChangeSet
#   2004/09/13 14:05:30-07:00 anton@samba.org 
#   [PATCH] Backward compatibility for compat sched_getaffinity
#   
#   The follow patch special cases the NR_CPUS <= BITS_PER_COMPAT_LONG case.
#   Without this patch, a 32bit task would be required to have a 64bit
#   cpumask no matter what value of NR_CPUS are used.
#   
#   With this patch a compat long sized bitmask is allowed if NR_CPUS is
#   small enough to fit within it.
#   
#   Of course applications should be using the glibc wrappers that use an
#   opaque cpu_mask_t type, but there could be older applications using the
#   syscalls directly.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/compat.c
#   2004/09/09 05:59:05-07:00 anton@samba.org +7 -3
#   Backward compatibility for compat sched_getaffinity
# 
# ChangeSet
#   2004/09/13 14:05:18-07:00 anton@samba.org 
#   [PATCH] Clean up compat sched affinity syscalls
#   
#   Remove the set_fs hack in the compat affinity calls. Create
#   sched_getaffinity and sched_setaffinity helper functions that both the
#   native and compat affinity syscalls use.
#   
#   Also make the compat functions match what the native ones are doing now,
#   setaffinity calls succeed no matter what length the bitmask is, but
#   getaffinity calls must pass in bitmasks at least as long as the kernel
#   type.
#   
#   Signed-off-by: Anton Blanchard <anton@samba.org>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# kernel/sched.c
#   2004/09/09 05:09:57-07:00 anton@samba.org +60 -42
#   Clean up compat sched affinity syscalls
# 
# kernel/compat.c
#   2004/09/09 05:58:30-07:00 anton@samba.org +29 -67
#   Clean up compat sched affinity syscalls
# 
# include/linux/sched.h
#   2004/09/09 05:09:57-07:00 anton@samba.org +3 -0
#   Clean up compat sched affinity syscalls
# 
# ChangeSet
#   2004/09/13 13:47:41-07:00 torvalds@ppc970.osdl.org 
#   Export new PCI iomem access interfaces to modules too.
# 
# lib/iomap.c
#   2004/09/13 13:47:35-07:00 torvalds@ppc970.osdl.org +11 -0
#   Export new PCI iomem access interfaces to modules too.
# 
# ChangeSet
#   2004/09/13 13:03:56-07:00 herbert@gondor.apana.org.au 
#   [IPV6]: Add option to copy DSCP in decap in ip6_tunnel.
#   
#   Here is a patch that allows the copying of the DSCP during decapsulation
#   for ip6_tunnel.  I've made it a separate option from the one that
#   determines the copying during encapsulation since the DSCP processing
#   may be asymmetric.  It also means that we preserve compatibility should
#   anyone be relying on the current behaviour.
#   
#   inet_ecn.h might appear to be an odd place for ipv6_copy_dscp, but
#   I couldn't put it in dsfield.h since I want to use ipv6_get_dsfield
#   in inet_ecn.h later on.  The other alternative would be to define
#   INET_ECN_MASK in dsfield.h.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# net/ipv6/ip6_tunnel.c
#   2004/09/13 13:03:39-07:00 herbert@gondor.apana.org.au +2 -0
#   [IPV6]: Add option to copy DSCP in decap in ip6_tunnel.
#   
#   Here is a patch that allows the copying of the DSCP during decapsulation
#   for ip6_tunnel.  I've made it a separate option from the one that
#   determines the copying during encapsulation since the DSCP processing
#   may be asymmetric.  It also means that we preserve compatibility should
#   anyone be relying on the current behaviour.
#   
#   inet_ecn.h might appear to be an odd place for ipv6_copy_dscp, but
#   I couldn't put it in dsfield.h since I want to use ipv6_get_dsfield
#   in inet_ecn.h later on.  The other alternative would be to define
#   INET_ECN_MASK in dsfield.h.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# include/net/inet_ecn.h
#   2004/09/13 13:03:39-07:00 herbert@gondor.apana.org.au +7 -0
#   [IPV6]: Add option to copy DSCP in decap in ip6_tunnel.
#   
#   Here is a patch that allows the copying of the DSCP during decapsulation
#   for ip6_tunnel.  I've made it a separate option from the one that
#   determines the copying during encapsulation since the DSCP processing
#   may be asymmetric.  It also means that we preserve compatibility should
#   anyone be relying on the current behaviour.
#   
#   inet_ecn.h might appear to be an odd place for ipv6_copy_dscp, but
#   I couldn't put it in dsfield.h since I want to use ipv6_get_dsfield
#   in inet_ecn.h later on.  The other alternative would be to define
#   INET_ECN_MASK in dsfield.h.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# include/linux/ip6_tunnel.h
#   2004/09/13 13:03:39-07:00 herbert@gondor.apana.org.au +2 -0
#   [IPV6]: Add option to copy DSCP in decap in ip6_tunnel.
#   
#   Here is a patch that allows the copying of the DSCP during decapsulation
#   for ip6_tunnel.  I've made it a separate option from the one that
#   determines the copying during encapsulation since the DSCP processing
#   may be asymmetric.  It also means that we preserve compatibility should
#   anyone be relying on the current behaviour.
#   
#   inet_ecn.h might appear to be an odd place for ipv6_copy_dscp, but
#   I couldn't put it in dsfield.h since I want to use ipv6_get_dsfield
#   in inet_ecn.h later on.  The other alternative would be to define
#   INET_ECN_MASK in dsfield.h.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# ChangeSet
#   2004/09/13 12:58:04-07:00 ak@muc.de 
#   [NET]: Fix missing spin lock in lltx path.
#   
#   This fixes a silly missing spin lock in the relock path. For some 
#   reason it seems to still work when you don't have spinlock debugging
#   enabled.
#   
#   Please apply.
#   
#   Thanks to Arjan's spinlock debug kernel for finding it.
#   
#   Signed-off-by: Andi Kleen <ak@muc.de>
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# net/sched/sch_generic.c
#   2004/09/13 12:57:46-07:00 ak@muc.de +3 -1
#   [NET]: Fix missing spin lock in lltx path.
#   
#   This fixes a silly missing spin lock in the relock path. For some 
#   reason it seems to still work when you don't have spinlock debugging
#   enabled.
#   
#   Please apply.
#   
#   Thanks to Arjan's spinlock debug kernel for finding it.
#   
#   Signed-off-by: Andi Kleen <ak@muc.de>
#   Signed-off-by: David S. Miller <davem@davemloft.net>
# 
# ChangeSet
#   2004/09/13 18:55:39+00:00 aegl@agluck-lia64.sc.intel.com 
#   Merge agluck-lia64.sc.intel.com:/data/home/aegl/BK/work/stephane
#   into agluck-lia64.sc.intel.com:/data/home/aegl/BK/linux-ia64-release-2.6.9
# 
# arch/ia64/Makefile
#   2004/09/13 18:55:33+00:00 aegl@agluck-lia64.sc.intel.com +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/09/13 18:48:29+00:00 eranian@hpl.hp.com 
#   [IA64] Makefile: fix for the PTRACE_SYSCALL corruption bug
#   
#   Thanks to David for his help in tracking it down.
#   
#     compile the kernel with sibling call optimization
#     turned off. There is a problem with all functions
#     using the optimization and the asmlinkage attribute.
#     The compiler should not perform the optimization on
#     these functions because it cannot preserve the syscall
#     parameters in the callee. This caused SIGSEGV on programs
#     traced with PTRACE_SYSCALL, for instance.
#   
#   signed-off-by: stephane eranian <eranian@hpl.hp.com>
#   Signed-off-by: Tony Luck <tony.luck@intel.com>
# 
# arch/ia64/Makefile
#   2004/09/13 18:44:37+00:00 aegl@agluck-lia64.sc.intel.com +1 -1
#   Turn off sibling call optimizations (avoid asmlinkage breakage).
# 
# ChangeSet
#   2004/09/13 11:32:00-07:00 torvalds@ppc970.osdl.org 
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# lib/iomap.c
#   2004/09/13 11:31:53-07:00 torvalds@ppc970.osdl.org +110 -0
# 
# include/asm-generic/iomap.h
#   2004/09/13 11:31:53-07:00 torvalds@ppc970.osdl.org +44 -0
# 
# lib/iomap.c
#   2004/09/13 11:31:53-07:00 torvalds@ppc970.osdl.org +0 -0
#   BitKeeper file /home/torvalds/v2.6/linux/lib/iomap.c
# 
# lib/Makefile
#   2004/09/13 11:31:53-07:00 torvalds@ppc970.osdl.org +1 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# include/asm-generic/iomap.h
#   2004/09/13 11:31:53-07:00 torvalds@ppc970.osdl.org +0 -0
#   BitKeeper file /home/torvalds/v2.6/linux/include/asm-generic/iomap.h
# 
# include/asm-ppc64/io.h
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +2 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# include/asm-i386/io.h
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +2 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# arch/x86_64/Kconfig
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +4 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# arch/ppc64/Kconfig
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +4 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# arch/ppc/Kconfig
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +4 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# arch/ia64/Kconfig
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +4 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# arch/i386/Kconfig
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +4 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# arch/arm/Kconfig
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +4 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# arch/alpha/Kconfig
#   2004/09/13 11:31:52-07:00 torvalds@ppc970.osdl.org +4 -0
#   Add skeleton "generic IO mapping" infrastructure.
#   
#   Jeff wants to use this to clean up SATA and some network drivers.
# 
# ChangeSet
#   2004/09/13 09:41:04-07:00 bzolnier@elka.pw.edu.pl 
#   [PATCH] ide: small cleanup for sis5513
#   
#   Sigh, Thomas Gleixner pointed out that his
#   sis5518 fix didn't need forward porting.
#   
#   Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@elka.pw.edu.pl>
#   Signed-off-by: Linus Torvalds <torvalds@osdl.org>
# 
# drivers/ide/pci/sis5513.c
#   2004/09/13 05:53:51-07:00 bzolnier@elka.pw.edu.pl +10 -16
#   ide: small cleanup for sis5513
# 
# ChangeSet
#   2004/09/13 16:04:16+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] purge routes via non-router neighbour but gateway.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 16:02:20+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] ensure to aging default routes.
#   
#   This patch is product of corraboration with Ville Nuorvala 
#   <vnuorval@tcs.hut.fi>.
#   
#   Signed-off-by: Hideaki YOSHIFUJi <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:59:11+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] don't use expired default routes.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:57:40+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] NDISC: Fix message validation against Redirects.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:56:55+09:00 yoshfuji@linux-ipv6.org 
#   [NET] NEIGHBOUR: improve neighbour state machine.
#   
#   This centralizes neighbour state transition by timer into
#   neigh_timer_handler(), and kill neigh_sync().
#   This improves timing accuracy of state transition.
#   
#   neigh_timer_handler() for each entry is now reponsible 
#   for state transition of the entry, and 
#   neigh_periodic_timer() is just for garbage collection.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:56:10+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] NDISC: update entry appropriately when receiving NS.
#   
#   Update neighbour entry appropriately by passing correct flags
#   when receiving NS.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:54:32+09:00 yoshfuji@linux-ipv6.org 
#   [NET] NEIGHBOUR: use time_after() and its friends.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:54:11+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] NDISC: update IsRouter flag appropriately.
#   
#   Update IsRouter (NTF_ROUTER) flag approrpriately.
#   Specifically, 
#    - we should not update it blindly; if Override Flag is 
#      unset and lladdr is differnt, we should NOT.
#    - we should set it when we have received RA.
#    - we should set it when we have received Redirect
#      whose target is off-link.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:52:13+09:00 yoshfuji@linux-ipv6.org 
#   [NET] NEIGHBOUR: merge two flags for neigh_update() into one.
#   
#   This is because SUSPECT_CONNECTED can be effective 
#   only if OVERRIDE is unset, and used only if RETAIN_STATE is set.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:51:20+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] NDISC: keep original state if new state is STALE and lladdr is unchanged
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:50:56+09:00 yoshfuji@linux-ipv6.org 
#   [IPV6] NDISC: suspect REACHABLE entry if new lladdr is different.
#   
#   When we receive NA without Override flag, if it comes with
#   different lladdr from one in our REACHABLE entry,
#   set the state to STALE. (RFC2461 7.2.5)
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/13 15:50:00+09:00 yoshfuji@linux-ipv6.org 
#   [NET] NEIGHBOUR: save number of arguments for neigh_update() by flags.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ip6_fib.c
#   2004/09/13 16:04:07+09:00 yoshfuji@linux-ipv6.org +5 -0
#   [IPV6] purge routes via non-router neighbour but gateway.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/route.c
#   2004/09/13 16:02:09+09:00 yoshfuji@linux-ipv6.org +4 -4
#   [IPV6] ensure to aging default routes.
#   
#   This patch is product of corraboration with Ville Nuorvala 
#   <vnuorval@tcs.hut.fi>.
#   
#   Signed-off-by: Hideaki YOSHIFUJi <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ip6_fib.c
#   2004/09/13 16:02:09+09:00 yoshfuji@linux-ipv6.org +7 -0
#   [IPV6] ensure to aging default routes.
#   
#   This patch is product of corraboration with Ville Nuorvala 
#   <vnuorval@tcs.hut.fi>.
#   
#   Signed-off-by: Hideaki YOSHIFUJi <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/route.c
#   2004/09/13 15:59:01+09:00 yoshfuji@linux-ipv6.org +4 -0
#   [IPV6] don't use expired default routes.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/route.c
#   2004/09/13 15:57:29+09:00 yoshfuji@linux-ipv6.org +26 -18
#   [IPV6] NDISC: Fix message validation against Redirects.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ndisc.c
#   2004/09/13 15:57:29+09:00 yoshfuji@linux-ipv6.org +2 -15
#   [IPV6] NDISC: Fix message validation against Redirects.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# include/net/ip6_route.h
#   2004/09/13 15:57:29+09:00 yoshfuji@linux-ipv6.org +1 -0
#   [IPV6] NDISC: Fix message validation against Redirects.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/core/neighbour.c
#   2004/09/13 15:56:45+09:00 yoshfuji@linux-ipv6.org +74 -80
#   [NET] NEIGHBOUR: improve neighbour state machine.
#   
#   This centralizes neighbour state transition by timer into
#   neigh_timer_handler(), and kill neigh_sync().
#   This improves timing accuracy of state transition.
#   
#   neigh_timer_handler() for each entry is now reponsible 
#   for state transition of the entry, and 
#   neigh_periodic_timer() is just for garbage collection.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# include/net/neighbour.h
#   2004/09/13 15:56:45+09:00 yoshfuji@linux-ipv6.org +1 -1
#   [NET] NEIGHBOUR: improve neighbour state machine.
#   
#   This centralizes neighbour state transition by timer into
#   neigh_timer_handler(), and kill neigh_sync().
#   This improves timing accuracy of state transition.
#   
#   neigh_timer_handler() for each entry is now reponsible 
#   for state transition of the entry, and 
#   neigh_periodic_timer() is just for garbage collection.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ndisc.c
#   2004/09/13 15:55:59+09:00 yoshfuji@linux-ipv6.org +5 -2
#   [IPV6] NDISC: update entry appropriately when receiving NS.
#   
#   Update neighbour entry appropriately by passing correct flags
#   when receiving NS.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/core/neighbour.c
#   2004/09/13 15:54:22+09:00 yoshfuji@linux-ipv6.org +14 -15
#   [NET] NEIGHBOUR: use time_after() and its friends.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ndisc.c
#   2004/09/13 15:54:01+09:00 yoshfuji@linux-ipv6.org +22 -17
#   [IPV6] NDISC: update IsRouter flag appropriately.
#   
#   Update IsRouter (NTF_ROUTER) flag approrpriately.
#   Specifically, 
#    - we should not update it blindly; if Override Flag is 
#      unset and lladdr is differnt, we should NOT.
#    - we should set it when we have received RA.
#    - we should set it when we have received Redirect
#      whose target is off-link.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/core/neighbour.c
#   2004/09/13 15:54:01+09:00 yoshfuji@linux-ipv6.org +13 -0
#   [IPV6] NDISC: update IsRouter flag appropriately.
#   
#   Update IsRouter (NTF_ROUTER) flag approrpriately.
#   Specifically, 
#    - we should not update it blindly; if Override Flag is 
#      unset and lladdr is differnt, we should NOT.
#    - we should set it when we have received RA.
#    - we should set it when we have received Redirect
#      whose target is off-link.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# include/net/neighbour.h
#   2004/09/13 15:54:01+09:00 yoshfuji@linux-ipv6.org +2 -0
#   [IPV6] NDISC: update IsRouter flag appropriately.
#   
#   Update IsRouter (NTF_ROUTER) flag approrpriately.
#   Specifically, 
#    - we should not update it blindly; if Override Flag is 
#      unset and lladdr is differnt, we should NOT.
#    - we should set it when we have received RA.
#    - we should set it when we have received Redirect
#      whose target is off-link.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ndisc.c
#   2004/09/13 15:52:03+09:00 yoshfuji@linux-ipv6.org +3 -4
#   [NET] NEIGHBOUR: merge two flags for neigh_update() into one.
#   
#   This is because SUSPECT_CONNECTED can be effective 
#   only if OVERRIDE is unset, and used only if RETAIN_STATE is set.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/core/neighbour.c
#   2004/09/13 15:52:03+09:00 yoshfuji@linux-ipv6.org +6 -5
#   [NET] NEIGHBOUR: merge two flags for neigh_update() into one.
#   
#   This is because SUSPECT_CONNECTED can be effective 
#   only if OVERRIDE is unset, and used only if RETAIN_STATE is set.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# include/net/neighbour.h
#   2004/09/13 15:52:03+09:00 yoshfuji@linux-ipv6.org +1 -2
#   [NET] NEIGHBOUR: merge two flags for neigh_update() into one.
#   
#   This is because SUSPECT_CONNECTED can be effective 
#   only if OVERRIDE is unset, and used only if RETAIN_STATE is set.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ndisc.c
#   2004/09/13 15:51:10+09:00 yoshfuji@linux-ipv6.org +7 -2
#   [IPV6] NDISC: keep original state if new state is STALE and lladdr is unchanged
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/core/neighbour.c
#   2004/09/13 15:51:10+09:00 yoshfuji@linux-ipv6.org +5 -2
#   [IPV6] NDISC: keep original state if new state is STALE and lladdr is unchanged
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# include/net/neighbour.h
#   2004/09/13 15:51:10+09:00 yoshfuji@linux-ipv6.org +1 -0
#   [IPV6] NDISC: keep original state if new state is STALE and lladdr is unchanged
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ndisc.c
#   2004/09/13 15:50:46+09:00 yoshfuji@linux-ipv6.org +2 -1
#   [IPV6] NDISC: suspect REACHABLE entry if new lladdr is different.
#   
#   When we receive NA without Override flag, if it comes with
#   different lladdr from one in our REACHABLE entry,
#   set the state to STALE. (RFC2461 7.2.5)
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/core/neighbour.c
#   2004/09/13 15:50:46+09:00 yoshfuji@linux-ipv6.org +20 -9
#   [IPV6] NDISC: suspect REACHABLE entry if new lladdr is different.
#   
#   When we receive NA without Override flag, if it comes with
#   different lladdr from one in our REACHABLE entry,
#   set the state to STALE. (RFC2461 7.2.5)
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# include/net/neighbour.h
#   2004/09/13 15:50:46+09:00 yoshfuji@linux-ipv6.org +1 -0
#   [IPV6] NDISC: suspect REACHABLE entry if new lladdr is different.
#   
#   When we receive NA without Override flag, if it comes with
#   different lladdr from one in our REACHABLE entry,
#   set the state to STALE. (RFC2461 7.2.5)
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv6/ndisc.c
#   2004/09/13 15:49:50+09:00 yoshfuji@linux-ipv6.org +3 -3
#   [NET] NEIGHBOUR: save number of arguments for neigh_update() by flags.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/ipv4/arp.c
#   2004/09/13 15:49:50+09:00 yoshfuji@linux-ipv6.org +7 -3
#   [NET] NEIGHBOUR: save number of arguments for neigh_update() by flags.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/core/neighbour.c
#   2004/09/13 15:49:50+09:00 yoshfuji@linux-ipv6.org +15 -8
#   [NET] NEIGHBOUR: save number of arguments for neigh_update() by flags.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# net/atm/clip.c
#   2004/09/13 15:49:50+09:00 yoshfuji@linux-ipv6.org +4 -2
#   [NET] NEIGHBOUR: save number of arguments for neigh_update() by flags.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# include/net/neighbour.h
#   2004/09/13 15:49:50+09:00 yoshfuji@linux-ipv6.org +6 -1
#   [NET] NEIGHBOUR: save number of arguments for neigh_update() by flags.
#   
#   Signed-off-by: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
# 
# ChangeSet
#   2004/09/09 21:50:47+00:00 jbarnes@sgi.com 
#   [IA64-SGI]: disable non-display ROM resources
#    
#   This patch is needed to correctly support the new sysfs rom file.  On sn2, we
#   only allocate PIO space for display option ROMs since PIO space is a
#   relatively scarce resource (we've seen exhaustion when running with several
#   qla cards in the same domain).  And without this patch we won't zero out
#   non-display ROM resources which can lead to panics if anyone tries to use the
#   bogus addresses left over there from the generic PCI probing code.
#   
#   Signed-off-by: Jesse Barnes <jbarnes@sgi.com>
#   Signed-off-by: Tony Luck <tony.luck@intel.com>
# 
# arch/ia64/sn/io/machvec/pci_bus_cvlink.c
#   2004/09/09 21:47:56+00:00 aegl@agluck-lia64.sc.intel.com +14 -1
#   sn2: disable non-display ROM resources
# 
# ChangeSet
#   2004/09/09 21:45:01+00:00 markgw@sgi.com 
#   [IA64-SGI] sn_proc_fs.c: convert to use seq_file API
#   
#   Signed-off-by: Mark Goodwin <markgw@sgi.com>
#   Signed-off-by: Tony Luck <tony.luck@intel.com>
# 
# arch/ia64/sn/kernel/sn2/sn_proc_fs.c
#   2004/09/09 21:42:19+00:00 aegl@agluck-lia64.sc.intel.com +86 -99
#   convert to use seq_file API
# 
diff -Nru a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
--- a/Documentation/filesystems/tmpfs.txt	2004-09-15 20:29:40 -07:00
+++ b/Documentation/filesystems/tmpfs.txt	2004-09-15 20:29:40 -07:00
@@ -62,13 +62,21 @@
            since the OOM handler will not be able to free that memory.
 nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
 nr_inodes: The maximum number of inodes for this instance. The default
-           is half of the number of your physical RAM pages.
+           is half of the number of your physical RAM pages, or (on a
+           a machine with highmem) the number of lowmem RAM pages,
+           whichever is the lower.
 
 These parameters accept a suffix k, m or g for kilo, mega and giga and
 can be changed on remount.  The size parameter also accepts a suffix %
 to limit this tmpfs instance to that percentage of your physical RAM:
 the default, when neither size nor nr_blocks is specified, is size=50%
 
+If both nr_blocks (or size) and nr_inodes are set to 0, neither blocks
+nor inodes will be limited in that instance.  It is generally unwise to
+mount with such options, since it allows any user with write access to
+use up all the memory on the machine; but enhances the scalability of
+that instance in a system with many cpus making intensive use of it.
+
 
 To specify the initial root directory you can use the following mount
 options:
@@ -89,4 +97,4 @@
 Author:
    Christoph Rohland <cr@sap.com>, 1.12.01
 Updated:
-   Hugh Dickins <hugh@veritas.com>, 01 April 2003
+   Hugh Dickins <hugh@veritas.com>, 01 September 2004
diff -Nru a/arch/alpha/Kconfig b/arch/alpha/Kconfig
--- a/arch/alpha/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/arch/alpha/Kconfig	2004-09-15 20:29:40 -07:00
@@ -32,6 +32,10 @@
 	bool
 	default y
 
+config GENERIC_IOMAP
+	bool
+	default y
+
 source "init/Kconfig"
 
 
diff -Nru a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
--- a/arch/alpha/kernel/osf_sys.c	2004-09-15 20:29:40 -07:00
+++ b/arch/alpha/kernel/osf_sys.c	2004-09-15 20:29:40 -07:00
@@ -1053,9 +1053,10 @@
 		ret = 0;
 	}
 
-	set_fd_set(n, inp->fds_bits, fds.res_in);
-	set_fd_set(n, outp->fds_bits, fds.res_out);
-	set_fd_set(n, exp->fds_bits, fds.res_ex);
+	if (set_fd_set(n, inp->fds_bits, fds.res_in) ||
+	    set_fd_set(n, outp->fds_bits, fds.res_out) ||
+	    set_fd_set(n, exp->fds_bits, fds.res_ex))
+		ret = -EFAULT;
 
  out:
 	kfree(bits);
diff -Nru a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
--- a/arch/alpha/kernel/signal.c	2004-09-15 20:29:40 -07:00
+++ b/arch/alpha/kernel/signal.c	2004-09-15 20:29:40 -07:00
@@ -145,7 +145,7 @@
  * Atomically swap in the new signal mask, and wait for a signal.
  */
 asmlinkage int
-do_sigsuspend(old_sigset_t mask, struct pt_regs *reg, struct switch_stack *sw)
+do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
 {
 	sigset_t oldset;
 
@@ -156,17 +156,22 @@
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
+	/* Indicate EINTR on return from any possible signal handler,
+	   which will not come back through here, but via sigreturn.  */
+	regs->r0 = EINTR;
+	regs->r19 = 1;
+
 	while (1) {
 		current->state = TASK_INTERRUPTIBLE;
 		schedule();
-		if (do_signal(&oldset, reg, sw, 0, 0))
+		if (do_signal(&oldset, regs, sw, 0, 0))
 			return -EINTR;
 	}
 }
 
 asmlinkage int
 do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
-		 struct pt_regs *reg, struct switch_stack *sw)
+		 struct pt_regs *regs, struct switch_stack *sw)
 {
 	sigset_t oldset, set;
 
@@ -183,10 +188,15 @@
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
+	/* Indicate EINTR on return from any possible signal handler,
+	   which will not come back through here, but via sigreturn.  */
+	regs->r0 = EINTR;
+	regs->r19 = 1;
+
 	while (1) {
 		current->state = TASK_INTERRUPTIBLE;
 		schedule();
-		if (do_signal(&oldset, reg, sw, 0, 0))
+		if (do_signal(&oldset, regs, sw, 0, 0))
 			return -EINTR;
 	}
 }
diff -Nru a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
--- a/arch/alpha/kernel/systbls.S	2004-09-15 20:29:40 -07:00
+++ b/arch/alpha/kernel/systbls.S	2004-09-15 20:29:40 -07:00
@@ -457,6 +457,7 @@
 	.quad sys_mq_timedreceive		/* 435 */
 	.quad sys_mq_notify
 	.quad sys_mq_getsetattr
+	.quad sys_waitid
 
 	.size sys_call_table, . - sys_call_table
 	.type sys_call_table, @object
diff -Nru a/arch/arm/Kconfig b/arch/arm/Kconfig
--- a/arch/arm/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/arch/arm/Kconfig	2004-09-15 20:29:40 -07:00
@@ -63,6 +63,10 @@
 config GENERIC_ISA_DMA
 	bool
 
+config GENERIC_IOMAP
+	bool
+	default y
+
 source "init/Kconfig"
 
 menu "System Type"
diff -Nru a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c
--- a/arch/arm/kernel/apm.c	2004-09-15 20:29:40 -07:00
+++ b/arch/arm/kernel/apm.c	2004-09-15 20:29:40 -07:00
@@ -46,7 +46,13 @@
 /*
  * Maximum number of events stored
  */
-#define APM_MAX_EVENTS		20
+#define APM_MAX_EVENTS		16
+
+struct apm_queue {
+	unsigned int		event_head;
+	unsigned int		event_tail;
+	apm_event_t		events[APM_MAX_EVENTS];
+};
 
 /*
  * The per-file APM data
@@ -54,27 +60,25 @@
 struct apm_user {
 	struct list_head	list;
 
-	int			suser: 1;
-	int			writer: 1;
-	int			reader: 1;
-	int			suspend_wait: 1;
-	int			suspend_result;
+	unsigned int		suser: 1;
+	unsigned int		writer: 1;
+	unsigned int		reader: 1;
 
-	int			suspends_pending;
-	int			standbys_pending;
-	unsigned int		suspends_read;
-	unsigned int		standbys_read;
+	int			suspend_result;
+	unsigned int		suspend_state;
+#define SUSPEND_NONE	0		/* no suspend pending */
+#define SUSPEND_PENDING	1		/* suspend pending read */
+#define SUSPEND_READ	2		/* suspend read, pending ack */
+#define SUSPEND_ACKED	3		/* suspend acked */
+#define SUSPEND_DONE	4		/* suspend completed */
 
-	int			event_head;
-	int			event_tail;
-	apm_event_t		events[APM_MAX_EVENTS];
+	struct apm_queue	queue;
 };
 
 /*
  * Local variables
  */
 static int suspends_pending;
-static int standbys_pending;
 static int apm_disabled;
 
 static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
@@ -83,14 +87,19 @@
 /*
  * This is a list of everyone who has opened /dev/apm_bios
  */
-static spinlock_t user_list_lock = SPIN_LOCK_UNLOCKED;
+static DECLARE_RWSEM(user_list_lock);
 static LIST_HEAD(apm_user_list);
 
 /*
- * The kapmd info.
+ * kapmd info.  kapmd provides us a process context to handle
+ * "APM" events within - specifically necessary if we're going
+ * to be suspending the system.
  */
-static struct task_struct *kapmd;
+static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
 static DECLARE_COMPLETION(kapmd_exit);
+static spinlock_t kapmd_queue_lock = SPIN_LOCK_UNLOCKED;
+static struct apm_queue kapmd_queue;
+
 
 static const char driver_version[] = "1.13";	/* no spaces */
 
@@ -102,19 +111,6 @@
  */
 static void __apm_get_power_status(struct apm_power_info *info)
 {
-#if 0 && defined(CONFIG_SA1100_H3600) && defined(CONFIG_TOUCHSCREEN_H3600)
-	extern int h3600_apm_get_power_status(u_char *, u_char *, u_char *,
-					      u_char *, u_short *);
-
-	if (machine_is_h3600()) {
-		int dx;
-		h3600_apm_get_power_status(&info->ac_line_status,
-				&info->battery_status, &info->battery_flag,
-				&info->battery_life, &dx);
-		info->time = dx & 0x7fff;
-		info->units = dx & 0x8000 ? 0 : 1;
-	}
-#endif
 }
 
 /*
@@ -123,65 +119,71 @@
 void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
 EXPORT_SYMBOL(apm_get_power_status);
 
-static int queue_empty(struct apm_user *as)
+
+/*
+ * APM event queue management.
+ */
+static inline int queue_empty(struct apm_queue *q)
 {
-	return as->event_head == as->event_tail;
+	return q->event_head == q->event_tail;
 }
 
-static apm_event_t get_queued_event(struct apm_user *as)
+static inline apm_event_t queue_get_event(struct apm_queue *q)
 {
-	as->event_tail = (as->event_tail + 1) % APM_MAX_EVENTS;
-	return as->events[as->event_tail];
+	q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+	return q->events[q->event_tail];
 }
 
-static void queue_event_one_user(struct apm_user *as, apm_event_t event)
+static void queue_add_event(struct apm_queue *q, apm_event_t event)
 {
-	as->event_head = (as->event_head + 1) % APM_MAX_EVENTS;
-	if (as->event_head == as->event_tail) {
+	q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
+	if (q->event_head == q->event_tail) {
 		static int notified;
 
 		if (notified++ == 0)
 		    printk(KERN_ERR "apm: an event queue overflowed\n");
-		as->event_tail = (as->event_tail + 1) % APM_MAX_EVENTS;
+		q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
 	}
-	as->events[as->event_head] = event;
-
-	if (!as->suser || !as->writer)
-		return;
+	q->events[q->event_head] = event;
+}
 
-	switch (event) {
-	case APM_SYS_SUSPEND:
-	case APM_USER_SUSPEND:
-		as->suspends_pending++;
-		suspends_pending++;
-		break;
+static void queue_event_one_user(struct apm_user *as, apm_event_t event)
+{
+	if (as->suser && as->writer) {
+		switch (event) {
+		case APM_SYS_SUSPEND:
+		case APM_USER_SUSPEND:
+			/*
+			 * If this user already has a suspend pending,
+			 * don't queue another one.
+			 */
+			if (as->suspend_state != SUSPEND_NONE)
+				return;
 
-	case APM_SYS_STANDBY:
-	case APM_USER_STANDBY:
-		as->standbys_pending++;
-		standbys_pending++;
-		break;
+			as->suspend_state = SUSPEND_PENDING;
+			suspends_pending++;
+			break;
+		}
 	}
+	queue_add_event(&as->queue, event);
 }
 
 static void queue_event(apm_event_t event, struct apm_user *sender)
 {
-	struct list_head *l;
-
-	spin_lock(&user_list_lock);
-	list_for_each(l, &apm_user_list) {
-		struct apm_user *as = list_entry(l, struct apm_user, list);
+	struct apm_user *as;
 
+	down_read(&user_list_lock);
+	list_for_each_entry(as, &apm_user_list, list) {
 		if (as != sender && as->reader)
 			queue_event_one_user(as, event);
 	}
-	spin_unlock(&user_list_lock);
+	up_read(&user_list_lock);
 	wake_up_interruptible(&apm_waitqueue);
 }
 
-static int apm_suspend(void)
+static void apm_suspend(void)
 {
-	struct list_head *l;
+	struct apm_user *as;
 	int err = pm_suspend(PM_SUSPEND_MEM);
 
 	/*
@@ -193,52 +195,39 @@
 	/*
 	 * Finally, wake up anyone who is sleeping on the suspend.
 	 */
-	spin_lock(&user_list_lock);
-	list_for_each(l, &apm_user_list) {
-		struct apm_user *as = list_entry(l, struct apm_user, list);
-
+	down_read(&user_list_lock);
+	list_for_each_entry(as, &apm_user_list, list) {
 		as->suspend_result = err;
-		as->suspend_wait = 0;
+		as->suspend_state = SUSPEND_DONE;
 	}
-	spin_unlock(&user_list_lock);
+	up_read(&user_list_lock);
 
 	wake_up_interruptible(&apm_suspend_waitqueue);
-	return err;
 }
 
 static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
 {
 	struct apm_user *as = fp->private_data;
 	apm_event_t event;
-	int i = count, ret = 0, nonblock = fp->f_flags & O_NONBLOCK;
+	int i = count, ret = 0;
 
 	if (count < sizeof(apm_event_t))
 		return -EINVAL;
 
-	if (queue_empty(as) && nonblock)
+	if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
 		return -EAGAIN;
 
-	wait_event_interruptible(apm_waitqueue, !queue_empty(as));
+	wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
 
-	while ((i >= sizeof(event)) && !queue_empty(as)) {
-		event = get_queued_event(as);
-		printk("  apm_read: event=%d\n", event);
+	while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
+		event = queue_get_event(&as->queue);
 
 		ret = -EFAULT;
 		if (copy_to_user(buf, &event, sizeof(event)))
 			break;
 
-		switch (event) {
-		case APM_SYS_SUSPEND:
-		case APM_USER_SUSPEND:
-			as->suspends_read++;
-			break;
-
-		case APM_SYS_STANDBY:
-		case APM_USER_STANDBY:
-			as->standbys_read++;
-			break;
-		}
+		if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
+			as->suspend_state = SUSPEND_READ;
 
 		buf += sizeof(event);
 		i -= sizeof(event);
@@ -252,10 +241,10 @@
 
 static unsigned int apm_poll(struct file *fp, poll_table * wait)
 {
-	struct apm_user * as = fp->private_data;
+	struct apm_user *as = fp->private_data;
 
 	poll_wait(fp, &apm_waitqueue, wait);
-	return queue_empty(as) ? 0 : POLLIN | POLLRDNORM;
+	return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
 }
 
 /*
@@ -272,43 +261,57 @@
 apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
 {
 	struct apm_user *as = filp->private_data;
+	unsigned long flags;
 	int err = -EINVAL;
 
 	if (!as->suser || !as->writer)
 		return -EPERM;
 
 	switch (cmd) {
-	case APM_IOC_STANDBY:
-		break;
-
 	case APM_IOC_SUSPEND:
-		/*
-		 * If we read a suspend command from /dev/apm_bios,
-		 * then the corresponding APM_IOC_SUSPEND ioctl is
-		 * interpreted as an acknowledge.
-		 */
-		if (as->suspends_read > 0) {
-			as->suspends_read--;
-			as->suspends_pending--;
+		as->suspend_result = -EINTR;
+
+		if (as->suspend_state == SUSPEND_READ) {
+			/*
+			 * If we read a suspend command from /dev/apm_bios,
+			 * then the corresponding APM_IOC_SUSPEND ioctl is
+			 * interpreted as an acknowledge.
+			 */
+			as->suspend_state = SUSPEND_ACKED;
 			suspends_pending--;
 		} else {
+			/*
+			 * Otherwise it is a request to suspend the system.
+			 * Queue an event for all readers, and expect an
+			 * acknowledge from all writers who haven't already
+			 * acknowledged.
+			 */
 			queue_event(APM_USER_SUSPEND, as);
 		}
 
 		/*
-		 * If there are outstanding suspend requests for other
-		 * people on /dev/apm_bios, we must sleep for them.
-		 * Last one to bed turns the lights out.
+		 * If there are no further acknowledges required, suspend
+		 * the system.
 		 */
-		if (suspends_pending > 0) {
-			as->suspend_wait = 1;
-			err = wait_event_interruptible(apm_suspend_waitqueue,
-						 as->suspend_wait == 0);
-			if (err == 0)
-				err = as->suspend_result;
-		} else {			
-			err = apm_suspend();
-		}
+		if (suspends_pending == 0)
+			apm_suspend();
+
+		/*
+		 * Wait for the suspend/resume to complete.  If there are
+		 * pending acknowledges, we wait here for them.
+		 *
+		 * Note that we need to ensure that the PM subsystem does
+		 * not kick us out of the wait when it suspends the threads.
+		 */
+		flags = current->flags;
+		current->flags |= PF_NOFREEZE;
+
+		wait_event_interruptible(apm_suspend_waitqueue,
+					 as->suspend_state == SUSPEND_DONE);
+
+		current->flags = flags;
+		err = as->suspend_result;
+		as->suspend_state = SUSPEND_NONE;
 		break;
 	}
 
@@ -320,24 +323,19 @@
 	struct apm_user *as = filp->private_data;
 	filp->private_data = NULL;
 
-	spin_lock(&user_list_lock);
+	down_write(&user_list_lock);
 	list_del(&as->list);
-	spin_unlock(&user_list_lock);
+	up_write(&user_list_lock);
 
 	/*
 	 * We are now unhooked from the chain.  As far as new
 	 * events are concerned, we no longer exist.  However, we
-	 * need to balance standbys_pending and suspends_pending,
-	 * which means the possibility of sleeping.
+	 * need to balance suspends_pending, which means the
+	 * possibility of sleeping.
 	 */
-	if (as->standbys_pending > 0) {
-		standbys_pending -= as->standbys_pending;
-//		if (standbys_pending <= 0)
-//			standby();
-	}
-	if (as->suspends_pending > 0) {
-		suspends_pending -= as->suspends_pending;
-		if (suspends_pending <= 0)
+	if (as->suspend_state != SUSPEND_NONE) {
+		suspends_pending -= 1;
+		if (suspends_pending == 0)
 			apm_suspend();
 	}
 
@@ -364,9 +362,9 @@
 		as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
 		as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
 
-		spin_lock(&user_list_lock);
+		down_write(&user_list_lock);
 		list_add(&as->list, &apm_user_list);
-		spin_unlock(&user_list_lock);
+		up_write(&user_list_lock);
 
 		filp->private_data = as;
 	}
@@ -438,7 +436,7 @@
 	info.ac_line_status = 0xff;
 	info.battery_status = 0xff;
 	info.battery_flag   = 0xff;
-	info.battery_life   = 255;
+	info.battery_life   = -1;
 	info.time	    = -1;
 	info.units	    = -1;
 
@@ -461,34 +459,53 @@
 }
 #endif
 
-#if 0
-static int kapmd(void *startup)
+static int kapmd(void *arg)
 {
-	struct task_struct *tsk = current;
+	daemonize("kapmd");
+	current->flags |= PF_NOFREEZE;
 
-	daemonize();
-	strcpy(tsk->comm, "kapmd");
-	kapmd = tsk;
+	do {
+		apm_event_t event;
 
-	spin_lock_irq(&tsk->sigmask_lock);
-	siginitsetinv(&tsk->blocked, sigmask(SIGQUIT));
-	recalc_sigpending(tsk);
-	spin_unlock_irq(&tsk->sigmask_lock);
+		wait_event_interruptible(kapmd_wait,
+				!queue_empty(&kapmd_queue) || !pm_active);
 
-	complete((struct completion *)startup);
+		if (!pm_active)
+			break;
 
-	do {
-		set_task_state(tsk, TASK_INTERRUPTIBLE);
-		schedule();
-	} while (!signal_pending(tsk));
+		spin_lock_irq(&kapmd_queue_lock);
+		event = 0;
+		if (!queue_empty(&kapmd_queue))
+			event = queue_get_event(&kapmd_queue);
+		spin_unlock_irq(&kapmd_queue_lock);
+
+		switch (event) {
+		case 0:
+			break;
+
+		case APM_LOW_BATTERY:
+		case APM_POWER_STATUS_CHANGE:
+			queue_event(event, NULL);
+			break;
+
+		case APM_USER_SUSPEND:
+		case APM_SYS_SUSPEND:
+			queue_event(event, NULL);
+			if (suspends_pending == 0)
+				apm_suspend();
+			break;
+
+		case APM_CRITICAL_SUSPEND:
+			apm_suspend();
+			break;
+		}
+	} while (1);
 
 	complete_and_exit(&kapmd_exit, 0);
 }
-#endif
 
 static int __init apm_init(void)
 {
-//	struct completion startup = COMPLETION_INITIALIZER(startup);
 	int ret;
 
 	if (apm_disabled) {
@@ -501,22 +518,24 @@
 		return -EINVAL;
 	}
 
-//	ret = kernel_thread(kapmd, &startup, CLONE_FS | CLONE_FILES);
-//	if (ret)
-//		return ret;
-//	wait_for_completion(&startup);
-
 	pm_active = 1;
 
+	ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
+	if (ret < 0) {
+		pm_active = 0;
+		return ret;
+	}
+
 #ifdef CONFIG_PROC_FS
 	create_proc_info_entry("apm", 0, NULL, apm_get_info);
 #endif
 
 	ret = misc_register(&apm_device);
 	if (ret != 0) {
-		pm_active = 0;
 		remove_proc_entry("apm", NULL);
-		send_sig(SIGQUIT, kapmd, 1);
+
+		pm_active = 0;
+		wake_up(&kapmd_wait);
 		wait_for_completion(&kapmd_exit);
 	}
 
@@ -527,9 +546,10 @@
 {
 	misc_deregister(&apm_device);
 	remove_proc_entry("apm", NULL);
+
 	pm_active = 0;
-//	send_sig(SIGQUIT, kapmd, 1);
-//	wait_for_completion(&kapmd_exit);
+	wake_up(&kapmd_wait);
+	wait_for_completion(&kapmd_exit);
 }
 
 module_init(apm_init);
@@ -556,3 +576,27 @@
 
 __setup("apm=", apm_setup);
 #endif
+
+/**
+ * apm_queue_event - queue an APM event for kapmd
+ * @event: APM event
+ *
+ * Queue an APM event for kapmd to process and ultimately take the
+ * appropriate action.  Only a subset of events are handled:
+ *   %APM_LOW_BATTERY
+ *   %APM_POWER_STATUS_CHANGE
+ *   %APM_USER_SUSPEND
+ *   %APM_SYS_SUSPEND
+ *   %APM_CRITICAL_SUSPEND
+ */
+void apm_queue_event(apm_event_t event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kapmd_queue_lock, flags);
+	queue_add_event(&kapmd_queue, event);
+	spin_unlock_irqrestore(&kapmd_queue_lock, flags);
+
+	wake_up_interruptible(&kapmd_wait);
+}
+EXPORT_SYMBOL(apm_queue_event);
diff -Nru a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
--- a/arch/arm/mach-pxa/pm.c	2004-09-15 20:29:40 -07:00
+++ b/arch/arm/mach-pxa/pm.c	2004-09-15 20:29:40 -07:00
@@ -45,7 +45,7 @@
  */
 enum {	SLEEP_SAVE_START = 0,
 
-	SLEEP_SAVE_OSCR, SLEEP_SAVE_OIER,
+	SLEEP_SAVE_OIER,
 	SLEEP_SAVE_OSMR0, SLEEP_SAVE_OSMR1, SLEEP_SAVE_OSMR2, SLEEP_SAVE_OSMR3,
 
 	SLEEP_SAVE_GPLR0, SLEEP_SAVE_GPLR1, SLEEP_SAVE_GPLR2,
@@ -78,7 +78,6 @@
 	delta = xtime.tv_sec - RCNR;
 
 	/* save vital registers */
-	SAVE(OSCR);
 	SAVE(OSMR0);
 	SAVE(OSMR1);
 	SAVE(OSMR2);
@@ -149,8 +148,10 @@
 	RESTORE(OSMR1);
 	RESTORE(OSMR2);
 	RESTORE(OSMR3);
-	RESTORE(OSCR);
 	RESTORE(OIER);
+
+	/* OSMR0 is the system timer: make sure OSCR is sufficiently behind */
+	OSCR = OSMR0 - LATCH;
 
 	RESTORE(CKEN);
 
diff -Nru a/arch/arm/mach-s3c2410/devs.c b/arch/arm/mach-s3c2410/devs.c
--- a/arch/arm/mach-s3c2410/devs.c	2004-09-15 20:29:40 -07:00
+++ b/arch/arm/mach-s3c2410/devs.c	2004-09-15 20:29:40 -07:00
@@ -53,7 +53,7 @@
 
 struct platform_device s3c_device_usb = {
 	.name		  = "s3c2410-ohci",
-	.id		  = 0,
+	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s3c_usb_resource),
 	.resource	  = s3c_usb_resource,
 	.dev              = {
@@ -102,13 +102,7 @@
 		.start = S3C2410_PA_NAND,
 		.end   = S3C2410_PA_NAND + S3C2410_SZ_NAND,
 		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.start = IRQ_S3CUART_RX0,
-		.end   = IRQ_S3CUART_ERR0,
-		.flags = IORESOURCE_IRQ,
 	}
-
 };
 
 struct platform_device s3c_device_nand = {
diff -Nru a/arch/arm/mach-s3c2410/gpio.c b/arch/arm/mach-s3c2410/gpio.c
--- a/arch/arm/mach-s3c2410/gpio.c	2004-09-15 20:29:40 -07:00
+++ b/arch/arm/mach-s3c2410/gpio.c	2004-09-15 20:29:40 -07:00
@@ -19,6 +19,10 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
+ * Changelog
+ *	13-Sep-2004  BJD  Implemented change of MISCCR
+ *	14-Sep-2004  BJD  Added getpin call
+ *	14-Sep-2004  BJD  Fixed bug in setpin() call
  */
 
 
@@ -90,9 +94,32 @@
 	local_irq_save(flags);
 
 	dat = __raw_readl(base + 0x04);
-	dat &= 1 << offs;
+	dat &= ~(1 << offs);
 	dat |= to << offs;
 	__raw_writel(dat, base + 0x04);
 
 	local_irq_restore(flags);
+}
+
+unsigned int s3c2410_gpio_getpin(unsigned int pin)
+{
+	unsigned long base = S3C2410_GPIO_BASE(pin);
+	unsigned long offs = S3C2410_GPIO_OFFSET(pin);
+
+	return __raw_readl(base + 0x04) & (1<< offs);
+}
+
+unsigned int s3c2410_modify_misccr(unsigned int clear, unsigned int change)
+{
+	unsigned long flags;
+	unsigned long misccr;
+
+	local_irq_save(flags);
+	misccr = __raw_readl(S3C2410_MISCCR);
+	misccr &= ~clear;
+	misccr ^= change;
+	__raw_writel(misccr, S3C2410_MISCCR);
+	local_irq_restore(flags);
+
+	return misccr;
 }
diff -Nru a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
--- a/arch/arm/mach-sa1100/pm.c	2004-09-15 20:29:40 -07:00
+++ b/arch/arm/mach-sa1100/pm.c	2004-09-15 20:29:40 -07:00
@@ -44,7 +44,7 @@
  */
 enum {	SLEEP_SAVE_SP = 0,
 
-	SLEEP_SAVE_OSCR, SLEEP_SAVE_OIER,
+	SLEEP_SAVE_OIER,
 	SLEEP_SAVE_OSMR0, SLEEP_SAVE_OSMR1, SLEEP_SAVE_OSMR2, SLEEP_SAVE_OSMR3,
 
 	SLEEP_SAVE_GPDR, SLEEP_SAVE_GAFR,
@@ -69,7 +69,6 @@
 	gpio = GPLR;
 
 	/* save vital registers */
-	SAVE(OSCR);
 	SAVE(OSMR0);
 	SAVE(OSMR1);
 	SAVE(OSMR2);
@@ -131,8 +130,10 @@
 	RESTORE(OSMR1);
 	RESTORE(OSMR2);
 	RESTORE(OSMR3);
-	RESTORE(OSCR);
 	RESTORE(OIER);
+
+	/* OSMR0 is the system timer: make sure OSCR is sufficiently behind */
+	OSCR = OSMR0 - LATCH;
 
 	/* restore current time */
 	xtime.tv_sec = RCNR + delta;
diff -Nru a/arch/i386/Kconfig b/arch/i386/Kconfig
--- a/arch/i386/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/arch/i386/Kconfig	2004-09-15 20:29:40 -07:00
@@ -29,6 +29,10 @@
 	bool
 	default y
 
+config GENERIC_IOMAP
+	bool
+	default y
+
 source "init/Kconfig"
 
 menu "Processor type and features"
diff -Nru a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
--- a/arch/i386/mach-es7000/es7000.h	2004-09-15 20:29:40 -07:00
+++ b/arch/i386/mach-es7000/es7000.h	2004-09-15 20:29:40 -07:00
@@ -104,6 +104,13 @@
 #define	MIP_SW_APIC		0x1020b
 #define	MIP_FUNC(VALUE) 	(VALUE & 0xff)
 
+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
+#define IOAPIC_GSI_BOUND(ioapic) ((ioapic+1) * (nr_ioapic_registers[ioapic]-1))
+#define MAX_GSI_MAPSIZE 32
+#endif
+
+extern unsigned long io_apic_irqs;
+
 extern int parse_unisys_oem (char *oemptr, int oem_entries);
 extern int find_unisys_acpi_oem_table(unsigned long *oem_addr, int *length);
 extern int es7000_start_cpu(int cpu, unsigned long eip);
diff -Nru a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
--- a/arch/i386/mach-es7000/es7000plat.c	2004-09-15 20:29:40 -07:00
+++ b/arch/i386/mach-es7000/es7000plat.c	2004-09-15 20:29:40 -07:00
@@ -51,27 +51,74 @@
 int 			mip_port;
 unsigned long		mip_addr, host_addr;
 
+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
+static unsigned long cycle_irqs = 0;
+static unsigned long free_irqs = 0;
+static int gsi_map[MAX_GSI_MAPSIZE] = { [0 ... MAX_GSI_MAPSIZE-1] = -1 };
+
+/*
+ * GSI override for ES7000 platforms.
+ */
+
+static int __init
+es7000_gsi_override(int ioapic, int gsi)
+{
+	static int newgsi = 0;
+
+	if (gsi_map[gsi] != -1)
+		gsi = gsi_map[gsi];
+	else if (cycle_irqs ^ free_irqs) {
+		newgsi = find_next_bit(&cycle_irqs, IOAPIC_GSI_BOUND(0), newgsi);
+		__set_bit(newgsi, &free_irqs);
+		gsi_map[gsi] = newgsi;
+		gsi = newgsi;
+		newgsi++;
+		Dprintk("es7000_gsi_override: free_irqs = 0x%lx\n", free_irqs);
+	}
+
+	return gsi;
+}
+
 static int __init
 es7000_rename_gsi(int ioapic, int gsi)
 {
+	static int initialized = 0;
+	int i;
+
+	/*
+	 * These should NEVER be true at this point but we'd rather be
+	 * safe than sorry.
+	 */
+	if (acpi_disabled || acpi_pci_disabled || acpi_noirq)
+ 		return gsi;
+
 	if (ioapic)
-		return gsi;
-	else {
-		if (gsi == 0)
-			return 13;
-		if (gsi == 1)
-			return 16;
-		if (gsi == 4)
-			return 17;
-		if (gsi == 6)
-			return 18;
-		if (gsi == 7)
-			return 19;
-		if (gsi == 8)
-			return 20;
-		return gsi;
-        }
+ 		return gsi;
+
+	if (!initialized) {
+		unsigned long tmp_irqs = 0;
+
+		for (i = 0; i < nr_ioapic_registers[0]; i++)
+			__set_bit(mp_irqs[i].mpc_srcbusirq, &tmp_irqs);
+
+		cycle_irqs = (~tmp_irqs & io_apic_irqs & ((1 << IOAPIC_GSI_BOUND(0)) - 1));
+
+		initialized = 1;
+		Dprintk("es7000_rename_gsi: cycle_irqs = 0x%lx\n", cycle_irqs);
+	}
+
+	for (i = 0; i < nr_ioapic_registers[0]; i++) {
+		if (mp_irqs[i].mpc_srcbusirq == gsi) {
+			if (mp_irqs[i].mpc_dstirq == gsi)
+				return gsi;
+			else
+				return es7000_gsi_override(0, gsi);
+		}
+	}
+
+	return gsi;
 }
+#endif // (CONFIG_X86_IO_APIC) && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)
 
 /*
  * Parse the OEM Table
@@ -193,7 +240,7 @@
 			}
 		}
 	}
-	printk("ES7000: did not find Unisys ACPI OEM table!\n");
+	Dprintk("ES7000: did not find Unisys ACPI OEM table!\n");
 	return -1;
 }
 
diff -Nru a/arch/ia64/Kconfig b/arch/ia64/Kconfig
--- a/arch/ia64/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/arch/ia64/Kconfig	2004-09-15 20:29:40 -07:00
@@ -38,6 +38,10 @@
 	bool
 	default y
 
+config GENERIC_IOMAP
+	bool
+	default y
+
 choice
 	prompt "System type"
 	default IA64_GENERIC
diff -Nru a/arch/ia64/Makefile b/arch/ia64/Makefile
--- a/arch/ia64/Makefile	2004-09-15 20:29:40 -07:00
+++ b/arch/ia64/Makefile	2004-09-15 20:29:40 -07:00
@@ -20,7 +20,7 @@
 EXTRA		:=
 
 cflags-y	:= -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
-		   -falign-functions=32 -frename-registers
+		   -falign-functions=32 -frename-registers -fno-optimize-sibling-calls
 CFLAGS_KERNEL	:= -mconstant-gp
 
 GCC_VERSION     := $(call cc-version)
diff -Nru a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c
--- a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c	2004-09-15 20:29:40 -07:00
@@ -357,7 +357,20 @@
                         if (dev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_MEM)
                                 cmd |= PCI_COMMAND_MEMORY;
                 }
-        }
+        } else {
+		/*
+		 * Remove other ROM resources since they don't have valid
+		 * CPU addresses.
+		 */
+                size = dev->resource[PCI_ROM_RESOURCE].end -
+                        dev->resource[PCI_ROM_RESOURCE].start;
+
+		if (size) {
+			dev->resource[PCI_ROM_RESOURCE].start = 0;
+			dev->resource[PCI_ROM_RESOURCE].end = 0;
+			dev->resource[PCI_ROM_RESOURCE].flags = 0;
+		}
+	}
 
 	/*
 	 * Update the Command Word on the Card.
diff -Nru a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c	2004-09-15 20:29:40 -07:00
@@ -10,67 +10,41 @@
 
 #ifdef CONFIG_PROC_FS
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <asm/sn/sgi.h>
 #include <asm/sn/sn_sal.h>
 
-
-static int partition_id_read_proc(char *page, char **start, off_t off,
-		int count, int *eof, void *data) {
-
-	return sprintf(page, "%d\n", sn_local_partid());
+static int partition_id_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "%d\n", sn_local_partid());
+	return 0;
 }
 
-static struct proc_dir_entry * sgi_proc_dir;
-
-void
-register_sn_partition_id(void) {
-	struct proc_dir_entry *entry;
-
-	if (!sgi_proc_dir) {
-		sgi_proc_dir = proc_mkdir("sgi_sn", 0);
-	}
-	entry = create_proc_entry("partition_id", 0444, sgi_proc_dir);
-	if (entry) {
-		entry->nlink = 1;
-		entry->data = 0;
-		entry->read_proc = partition_id_read_proc;
-		entry->write_proc = NULL;
-	}
+static int partition_id_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, partition_id_show, NULL);
 }
 
-static int
-system_serial_number_read_proc(char *page, char **start, off_t off,
-		int count, int *eof, void *data) {
-	return sprintf(page, "%s\n", sn_system_serial_number());
+static int system_serial_number_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "%s\n", sn_system_serial_number());
+	return 0;
 }
 
-static int
-licenseID_read_proc(char *page, char **start, off_t off,
-		int count, int *eof, void *data) {
-	return sprintf(page, "0x%lx\n",sn_partition_serial_number_val());
+static int system_serial_number_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, system_serial_number_show, NULL);
 }
 
-void
-register_sn_serial_numbers(void) {
-	struct proc_dir_entry *entry;
+static int licenseID_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "0x%lx\n", sn_partition_serial_number_val());
+	return 0;
+}
 
-	if (!sgi_proc_dir) {
-		sgi_proc_dir = proc_mkdir("sgi_sn", 0);
-	}
-	entry = create_proc_entry("system_serial_number", 0444, sgi_proc_dir);
-	if (entry) {
-		entry->nlink = 1;
-		entry->data = 0;
-		entry->read_proc = system_serial_number_read_proc;
-		entry->write_proc = NULL;
-	}
-	entry = create_proc_entry("licenseID", 0444, sgi_proc_dir);
-	if (entry) {
-		entry->nlink = 1;
-		entry->data = 0;
-		entry->read_proc = licenseID_read_proc;
-		entry->write_proc = NULL;
-	}
+static int licenseID_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, licenseID_show, NULL);
 }
 
 /*
@@ -81,70 +55,83 @@
  */
 int sn_force_interrupt_flag = 1;
 
-static int
-sn_force_interrupt_read_proc(char *page, char **start, off_t off,
-		int count, int *eof, void *data) {
-	if (sn_force_interrupt_flag) {
-		return sprintf(page, "Force interrupt is enabled\n");
-	}
-	return sprintf(page, "Force interrupt is disabled\n");
+static int sn_force_interrupt_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "Force interrupt is %s\n",
+		sn_force_interrupt_flag ? "enabled" : "disabled");
+	return 0;
 }
 
-static int 
-sn_force_interrupt_write_proc(struct file *file, const char *buffer,
-                                        unsigned long count, void *data)
+static ssize_t sn_force_interrupt_write_proc(struct file *file,
+		const __user char *buffer, size_t count, loff_t *data)
 {
-	if (*buffer == '0') {
-		sn_force_interrupt_flag = 0;
-	} else {
-		sn_force_interrupt_flag = 1;
-	}
-	return 1;
+	sn_force_interrupt_flag = (*buffer == '0') ? 0 : 1;
+	return count;
 }
 
-void
-register_sn_force_interrupt(void) {
-	struct proc_dir_entry *entry;
+static int sn_force_interrupt_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sn_force_interrupt_show, NULL);
+}
 
-	if (!sgi_proc_dir) {
-		sgi_proc_dir = proc_mkdir("sgi_sn", 0);
-	}
-	entry = create_proc_entry("sn_force_interrupt",0444, sgi_proc_dir);
-	if (entry) {
-		entry->nlink = 1;
-		entry->data = 0;
-		entry->read_proc = sn_force_interrupt_read_proc;
-		entry->write_proc = sn_force_interrupt_write_proc;
-	}
+static int coherence_id_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "%d\n", cpuid_to_coherence_id(smp_processor_id()));
+	return 0;
 }
 
-static int coherence_id_read_proc(char *page, char **start, off_t off,
-		int count, int *eof, void *data) {
-	return sprintf(page, "%d\n", cpuid_to_coherence_id(smp_processor_id()));
+static int coherence_id_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, coherence_id_show, NULL);
 }
 
-void
-register_sn_coherence_id(void) {
-	struct proc_dir_entry *entry;
+static struct proc_dir_entry *sn_procfs_create_entry(
+	const char *name, struct proc_dir_entry *parent,
+	int (*openfunc)(struct inode *, struct file *),
+	int (*releasefunc)(struct inode *, struct file *))
+{
+	struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
 
-	if (!sgi_proc_dir) {
-		sgi_proc_dir = proc_mkdir("sgi_sn", 0);
-	}
-	entry = create_proc_entry("coherence_id", 0444, sgi_proc_dir);
-	if (entry) {
-		entry->nlink = 1;
-		entry->data = 0;
-		entry->read_proc = coherence_id_read_proc;
-		entry->write_proc = NULL;
+	if (e) {
+		e->proc_fops = (struct file_operations *)kmalloc(
+			sizeof(struct file_operations), GFP_KERNEL);
+		if (e->proc_fops) {
+			memset(e->proc_fops, 0, sizeof(struct file_operations));
+			e->proc_fops->open = openfunc;
+			e->proc_fops->read = seq_read;
+			e->proc_fops->llseek = seq_lseek;
+			e->proc_fops->release = releasefunc;
+		}
 	}
+
+	return e;
 }
 
-void
-register_sn_procfs(void) {
-	register_sn_partition_id();
-	register_sn_serial_numbers();
-	register_sn_force_interrupt();
-	register_sn_coherence_id();
+void register_sn_procfs(void)
+{
+	static struct proc_dir_entry *sgi_proc_dir = NULL;
+	struct proc_dir_entry *e;
+
+	BUG_ON(sgi_proc_dir != NULL);
+	if (!(sgi_proc_dir = proc_mkdir("sgi_sn", 0)))
+		return;
+
+	sn_procfs_create_entry("partition_id", sgi_proc_dir,
+		partition_id_open, single_release);
+
+	sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
+		system_serial_number_open, single_release);
+
+	sn_procfs_create_entry("licenseID", sgi_proc_dir, 
+		licenseID_open, single_release);
+
+	e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, 
+		sn_force_interrupt_open, single_release);
+	if (e) 
+		e->proc_fops->write = sn_force_interrupt_write_proc;
+
+	sn_procfs_create_entry("coherence_id", sgi_proc_dir, 
+		coherence_id_open, single_release);
 }
 
 #endif /* CONFIG_PROC_FS */
diff -Nru a/arch/ppc/Kconfig b/arch/ppc/Kconfig
--- a/arch/ppc/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc/Kconfig	2004-09-15 20:29:40 -07:00
@@ -35,6 +35,10 @@
 	bool
 	default y
 
+config GENERIC_IOMAP
+	bool
+	default y
+
 source "init/Kconfig"
 
 menu "Processor"
diff -Nru a/arch/ppc/Makefile b/arch/ppc/Makefile
--- a/arch/ppc/Makefile	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc/Makefile	2004-09-15 20:29:40 -07:00
@@ -24,7 +24,7 @@
 CPPFLAGS	+= -Iarch/$(ARCH)
 AFLAGS		+= -Iarch/$(ARCH)
 CFLAGS		+= -Iarch/$(ARCH) -msoft-float -pipe \
-		-ffixed-r2 -Wno-uninitialized -mmultiple
+		-ffixed-r2 -mmultiple
 CPP		= $(CC) -E $(CFLAGS)
 
 CHECKFLAGS	+= -D__powerpc__=1
diff -Nru a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c
--- a/arch/ppc/platforms/pmac_cpufreq.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc/platforms/pmac_cpufreq.c	2004-09-15 20:29:40 -07:00
@@ -498,7 +498,7 @@
  *  - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
  *  - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
  *  - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
- *  - iBook2 500 (PMU based, 400Mhz & 500Mhz)
+ *  - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
  *  - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
  *  - Recent MacRISC3 laptops
  *  - iBook G4s and PowerBook G4s with 7447A CPUs
@@ -533,11 +533,8 @@
 		   machine_is_compatible("PowerBook3,5") ||
 		   machine_is_compatible("MacRISC3")) {
 		pmac_cpufreq_init_MacRISC3(cpunode);
-	/* Else check for iBook2 500 */
+	/* Else check for iBook2 500/600 */
 	} else if (machine_is_compatible("PowerBook4,1")) {
-		/* We only know about 500Mhz model */
-		if (cur_freq < 450000 || cur_freq > 550000)
-			goto out;
 		hi_freq = cur_freq;
 		low_freq = 400000;
 		set_speed_proc = pmu_set_cpu_speed;
diff -Nru a/arch/ppc64/Kconfig.debug b/arch/ppc64/Kconfig.debug
--- a/arch/ppc64/Kconfig.debug	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/Kconfig.debug	2004-09-15 20:29:40 -07:00
@@ -44,16 +44,6 @@
 	  for handling hard and soft interrupts.  This can help avoid
 	  overflowing the process kernel stacks.
 
-config SPINLINE
-	bool "Inline spinlock code at each call site"
-	depends on SMP && !PPC_SPLPAR && !PPC_ISERIES
-	help
-	  Say Y if you want to have the code for acquiring spinlocks
-	  and rwlocks inlined at each call site.  This makes the kernel
-	  somewhat bigger, but can be useful when profiling the kernel.
-
-	  If in doubt, say N.
-
 config SCHEDSTATS
 	bool "Collect scheduler statistics"
 	depends on DEBUG_KERNEL && PROC_FS
diff -Nru a/arch/ppc64/Makefile b/arch/ppc64/Makefile
--- a/arch/ppc64/Makefile	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/Makefile	2004-09-15 20:29:40 -07:00
@@ -22,12 +22,17 @@
 CC		:= $(CC) -m64
 endif
 
+new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
+
+ifeq ($(new_nm),y)
+NM		:= $(NM) --synthetic
+endif
+
 CHECKFLAGS	+= -m64 -D__powerpc__=1
 
 LDFLAGS		:= -m elf64ppc
 LDFLAGS_vmlinux	:= -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
-CFLAGS		+= -msoft-float -pipe -Wno-uninitialized -mminimal-toc \
-		   -mtraceback=none
+CFLAGS		+= -msoft-float -pipe -mminimal-toc -mtraceback=none
 
 ifeq ($(CONFIG_POWER4_ONLY),y)
 	CFLAGS += $(call cc-option,-mcpu=power4)
diff -Nru a/arch/ppc64/kernel/chrp_setup.c b/arch/ppc64/kernel/chrp_setup.c
--- a/arch/ppc64/kernel/chrp_setup.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/chrp_setup.c	2004-09-15 20:29:40 -07:00
@@ -140,8 +140,6 @@
 		ROOT_DEV = Root_SDA2;
 	}
 
-	printk("Boot arguments: %s\n", cmd_line);
-
 	fwnmi_init();
 
 #ifndef CONFIG_PPC_ISERIES
diff -Nru a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
--- a/arch/ppc64/kernel/eeh.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/eeh.c	2004-09-15 20:29:40 -07:00
@@ -48,9 +48,6 @@
 static int ibm_slot_error_detail;
 
 static int eeh_subsystem_enabled;
-#define EEH_MAX_OPTS 4096
-static char *eeh_opts;
-static int eeh_opts_last;
 
 /* Buffer for reporting slot-error-detail rtas calls */
 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
@@ -62,10 +59,6 @@
 static DEFINE_PER_CPU(unsigned long, false_positives);
 static DEFINE_PER_CPU(unsigned long, ignored_failures);
 
-static int eeh_check_opts_config(struct device_node *dn, int class_code,
-				 int vendor_id, int device_id,
-				 int default_state);
-
 /**
  * The pci address cache subsystem.  This subsystem places
  * PCI device address resources into a red-black tree, sorted
@@ -497,7 +490,6 @@
 struct eeh_early_enable_info {
 	unsigned int buid_hi;
 	unsigned int buid_lo;
-	int force_off;
 };
 
 /* Enable eeh for the given device node. */
@@ -539,18 +531,8 @@
 	if ((*class_code >> 16) == PCI_BASE_CLASS_DISPLAY)
 		enable = 0;
 
-	if (!eeh_check_opts_config(dn, *class_code, *vendor_id, *device_id,
-				   enable)) {
-		if (enable) {
-			printk(KERN_WARNING "EEH: %s user requested to run "
-			       "without EEH checking.\n", dn->full_name);
-			enable = 0;
-		}
-	}
-
-	if (!enable || info->force_off) {
+	if (!enable)
 		dn->eeh_mode |= EEH_MODE_NOCHECK;
-	}
 
 	/* Ok... see if this device supports EEH.  Some do, some don't,
 	 * and the only way to find out is to check each and every one. */
@@ -604,15 +586,12 @@
 {
 	struct device_node *phb, *np;
 	struct eeh_early_enable_info info;
-	char *eeh_force_off = strstr(saved_command_line, "eeh-force-off");
 
 	init_pci_config_tokens();
 
 	np = of_find_node_by_path("/rtas");
-	if (np == NULL) {
-		printk(KERN_WARNING "EEH: RTAS not found !\n");
+	if (np == NULL)
 		return;
-	}
 
 	ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
 	ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
@@ -632,13 +611,6 @@
 		eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
 	}
 
-	info.force_off = 0;
-	if (eeh_force_off) {
-		printk(KERN_WARNING "EEH: WARNING: PCI Enhanced I/O Error "
-		       "Handling is user disabled\n");
-		info.force_off = 1;
-	}
-
 	/* Enable EEH for all adapters.  Note that eeh requires buid's */
 	for (phb = of_find_node_by_name(NULL, "pci"); phb;
 	     phb = of_find_node_by_name(phb, "pci")) {
@@ -653,11 +625,10 @@
 		traverse_pci_devices(phb, early_enable_eeh, &info);
 	}
 
-	if (eeh_subsystem_enabled) {
+	if (eeh_subsystem_enabled)
 		printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n");
-	} else {
-		printk(KERN_WARNING "EEH: disabled PCI Enhanced I/O Error Handling\n");
-	}
+	else
+		printk(KERN_WARNING "EEH: No capable adapters found\n");
 }
 
 /**
@@ -734,6 +705,128 @@
 EXPORT_SYMBOL(eeh_remove_device);
 
 /*
+ * Here comes the EEH implementation of the IOMAP 
+ * interfaces.
+ */
+unsigned int fastcall ioread8(void __iomem *addr)
+{
+	return readb(addr);
+}
+unsigned int fastcall ioread16(void __iomem *addr)
+{
+	return readw(addr);
+}
+unsigned int fastcall ioread32(void __iomem *addr)
+{
+	return readl(addr);
+}
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread32);
+
+void fastcall iowrite8(u8 val, void __iomem *addr)
+{
+	writeb(val, addr);
+}
+void fastcall iowrite16(u16 val, void __iomem *addr)
+{
+	writew(val, addr);
+}
+void fastcall iowrite32(u32 val, void __iomem *addr)
+{
+	writel(val, addr);
+}
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite32);
+
+/*
+ * These are the "repeat read/write" functions. Note the
+ * non-CPU byte order. We do things in "IO byteorder"
+ * here.
+ *
+ * FIXME! We could make these do EEH handling if we really
+ * wanted. Not clear if we do.
+ */
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	_insb((void *) IO_TOKEN_TO_ADDR(addr), dst, count);
+}
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	_insw_ns((void *) IO_TOKEN_TO_ADDR(addr), dst, count);
+}
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	_insl_ns((void *) IO_TOKEN_TO_ADDR(addr), dst, count);
+}
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	_outsb((void *) IO_TOKEN_TO_ADDR(addr), src, count);
+}
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	_outsw_ns((void *) IO_TOKEN_TO_ADDR(addr), src, count);
+}
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	_outsl_ns((void *) IO_TOKEN_TO_ADDR(addr), src, count);
+}
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+
+void __iomem *ioport_map(unsigned long port, unsigned int len)
+{
+	if (!_IO_IS_VALID(port))
+		return NULL;
+	return (void __iomem *) IO_ADDR_TO_TOKEN(port+pci_io_base);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+	unsigned long start = pci_resource_start(dev, bar);
+	unsigned long len = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+	if (!len)
+		return NULL;
+	if (max && len > max)
+		len = max;
+	if (flags & IORESOURCE_IO)
+		return ioport_map(start, len);
+	if (flags & IORESOURCE_MEM) {
+		void __iomem *vaddr = (void __iomem *) start;
+		if (dev  && eeh_subsystem_enabled) {
+			struct device_node *dn = pci_device_to_OF_node(dev);
+			if (dn && !(dn->eeh_mode & EEH_MODE_NOCHECK))
+				return (void __iomem *) IO_ADDR_TO_TOKEN(vaddr);
+		}
+		return vaddr;
+	}
+	/* What? */
+	return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
+
+/*
  * If EEH is implemented, find the PCI device using given phys addr
  * and check to see if eeh failure checking is disabled.
  * Remap the addr (trivially) to the EEH region if EEH checking enabled.
@@ -816,129 +909,3 @@
 	return 0;
 }
 __initcall(eeh_init_proc);
-
-/*
- * Test if "dev" should be configured on or off.
- * This processes the options literally from left to right.
- * This lets the user specify stupid combinations of options,
- * but at least the result should be very predictable.
- */
-static int eeh_check_opts_config(struct device_node *dn,
-				 int class_code, int vendor_id, int device_id,
-				 int default_state)
-{
-	char devname[32], classname[32];
-	char *strs[8], *s;
-	int nstrs, i;
-	int ret = default_state;
-
-	/* Build list of strings to match */
-	nstrs = 0;
-	s = (char *)get_property(dn, "ibm,loc-code", NULL);
-	if (s)
-		strs[nstrs++] = s;
-	sprintf(devname, "dev%04x:%04x", vendor_id, device_id);
-	strs[nstrs++] = devname;
-	sprintf(classname, "class%04x", class_code);
-	strs[nstrs++] = classname;
-	strs[nstrs++] = "";	/* yes, this matches the empty string */
-
-	/*
-	 * Now see if any string matches the eeh_opts list.
-	 * The eeh_opts list entries start with + or -.
-	 */
-	for (s = eeh_opts; s && (s < (eeh_opts + eeh_opts_last));
-	     s += strlen(s)+1) {
-		for (i = 0; i < nstrs; i++) {
-			if (strcasecmp(strs[i], s+1) == 0) {
-				ret = (strs[i][0] == '+') ? 1 : 0;
-			}
-		}
-	}
-	return ret;
-}
-
-/*
- * Handle kernel eeh-on & eeh-off cmd line options for eeh.
- *
- * We support:
- *	eeh-off=loc1,loc2,loc3...
- *
- * and this option can be repeated so
- *      eeh-off=loc1,loc2 eeh-off=loc3
- * is the same as eeh-off=loc1,loc2,loc3
- *
- * loc is an IBM location code that can be found in a manual or
- * via openfirmware (or the Hardware Management Console).
- *
- * We also support these additional "loc" values:
- *
- *	dev#:#    vendor:device id in hex (e.g. dev1022:2000)
- *	class#    class id in hex (e.g. class0200)
- *
- * If no location code is specified all devices are assumed
- * so eeh-off means eeh by default is off.
- */
-
-/*
- * This is implemented as a null separated list of strings.
- * Each string looks like this:  "+X" or "-X"
- * where X is a loc code, vendor:device, class (as shown above)
- * or empty which is used to indicate all.
- *
- * We interpret this option string list so that it will literally
- * behave left-to-right even if some combinations don't make sense.
- */
-static int __init eeh_parm(char *str, int state)
-{
-	char *s, *cur, *curend;
-
-	if (!eeh_opts) {
-		eeh_opts = alloc_bootmem(EEH_MAX_OPTS);
-		eeh_opts[eeh_opts_last++] = '+'; /* default */
-		eeh_opts[eeh_opts_last++] = '\0';
-	}
-	if (*str == '\0') {
-		eeh_opts[eeh_opts_last++] = state ? '+' : '-';
-		eeh_opts[eeh_opts_last++] = '\0';
-		return 1;
-	}
-	if (*str == '=')
-		str++;
-	for (s = str; s && *s != '\0'; s = curend) {
-		cur = s;
-		/* ignore empties.  Don't treat as "all-on" or "all-off" */
-		while (*cur == ',')
-			cur++;
-		curend = strchr(cur, ',');
-		if (!curend)
-			curend = cur + strlen(cur);
-		if (*cur) {
-			int curlen = curend-cur;
-			if (eeh_opts_last + curlen > EEH_MAX_OPTS-2) {
-				printk(KERN_WARNING "EEH: sorry...too many "
-				       "eeh cmd line options\n");
-				return 1;
-			}
-			eeh_opts[eeh_opts_last++] = state ? '+' : '-';
-			strncpy(eeh_opts+eeh_opts_last, cur, curlen);
-			eeh_opts_last += curlen;
-			eeh_opts[eeh_opts_last++] = '\0';
-		}
-	}
-
-	return 1;
-}
-
-static int __init eehoff_parm(char *str)
-{
-	return eeh_parm(str, 0);
-}
-
-static int __init eehon_parm(char *str)
-{
-	return eeh_parm(str, 1);
-}
-
-__setup("eeh-off", eehoff_parm);
-__setup("eeh-on", eehon_parm);
diff -Nru a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
--- a/arch/ppc64/kernel/head.S	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/head.S	2004-09-15 20:29:40 -07:00
@@ -548,15 +548,15 @@
 	.llong	0		/* Reserved */
 	.llong	0		/* Reserved */
 	.llong	0		/* Reserved */
-	.llong	0xc00000000	/* KERNELBASE ESID */
-	.llong	0x6a99b4b14	/* KERNELBASE VSID */
+	.llong	(KERNELBASE>>SID_SHIFT)
+	.llong	0x40bffffd5	/* KERNELBASE VSID */
 	/* We have to list the bolted VMALLOC segment here, too, so that it
 	 * will be restored on shared processor switch */
-	.llong	0xd00000000	/* VMALLOCBASE ESID */
-	.llong	0x08d12e6ab	/* VMALLOCBASE VSID */
+	.llong	(VMALLOCBASE>>SID_SHIFT)
+	.llong	0xb0cffffd1	/* VMALLOCBASE VSID */
 	.llong	8192		/* # pages to map (32 MB) */
 	.llong	0		/* Offset from start of loadarea to start of map */
-	.llong	0x0006a99b4b140000	/* VPN of first page to map */
+	.llong	0x40bffffd50000	/* VPN of first page to map */
 
 	. = 0x6100
 
@@ -1064,18 +1064,9 @@
 	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
 
 	/* Calculate VSID */
-	/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
-	rldic	r11,r11,15,36
-	ori	r11,r11,0xc
-
-	/* VSID_RANDOMIZER */
-	li	r9,9
-	sldi	r9,r9,32
-	oris	r9,r9,58231
-	ori	r9,r9,39831
-
-	mulld	r9,r11,r9
-	rldic	r9,r9,12,16	/* r9 = vsid << 12 */
+	/* This is a kernel address, so protovsid = ESID */
+	ASM_VSID_SCRAMBLE(r11, r9)
+	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
 
 	/* Search the primary group for a free entry */
 1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
diff -Nru a/arch/ppc64/kernel/iSeries_pci_reset.c b/arch/ppc64/kernel/iSeries_pci_reset.c
--- a/arch/ppc64/kernel/iSeries_pci_reset.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/iSeries_pci_reset.c	2004-09-15 20:29:40 -07:00
@@ -65,7 +65,8 @@
 		AssertDelay = (5 * HZ) / 10;
 	else
 		AssertDelay = (AssertTime * HZ) / 10;
-	if (WaitDelay == 0)
+
+	if (DelayTime == 0)
 		WaitDelay = (30 * HZ) / 10;
 	else
 		WaitDelay = (DelayTime * HZ) / 10;
diff -Nru a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
--- a/arch/ppc64/kernel/iSeries_setup.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/iSeries_setup.c	2004-09-15 20:29:40 -07:00
@@ -333,32 +333,31 @@
 #endif
 	if (itLpNaca.xPirEnvironMode == 0) 
 		piranha_simulator = 1;
+
+	/* Associate Lp Event Queue 0 with processor 0 */
+	HvCallEvent_setLpEventQueueInterruptProc(0, 0);
+
+	mf_init();
+	mf_initialized = 1;
+	mb();
 }
 
-void __init iSeries_init(unsigned long r3, unsigned long r4, unsigned long r5, 
-	   unsigned long r6, unsigned long r7)
+void __init iSeries_parse_cmdline(void)
 {
 	char *p, *q;
 
-	/* Associate Lp Event Queue 0 with processor 0 */
-	HvCallEvent_setLpEventQueueInterruptProc(0, 0);
-
 	/* copy the command line parameter from the primary VSP  */
 	HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
 			HvLpDma_Direction_RemoteToLocal);
 
 	p = cmd_line;
 	q = cmd_line + 255;
-	while( p < q ) {
+	while(p < q) {
 		if (!*p || *p == '\n')
 			break;
 		++p;
 	}
 	*p = 0;
-
-	mf_init();
-	mf_initialized = 1;
-	mb();
 }
 
 /*
diff -Nru a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
--- a/arch/ppc64/kernel/idle.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/idle.c	2004-09-15 20:29:40 -07:00
@@ -16,28 +16,16 @@
  */
 
 #include <linux/config.h>
-#include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
-#include <linux/mm.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
 #include <linux/cpu.h>
 
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
-#include <asm/cache.h>
 #include <asm/cputable.h>
 #include <asm/time.h>
-#include <asm/iSeries/LparData.h>
 #include <asm/iSeries/HvCall.h>
 #include <asm/iSeries/ItLpQueue.h>
 
@@ -45,11 +33,11 @@
 extern long poll_pending(void);
 extern void power4_idle(void);
 
-int (*idle_loop)(void);
+static int (*idle_loop)(void);
 
 #ifdef CONFIG_PPC_ISERIES
-unsigned long maxYieldTime = 0;
-unsigned long minYieldTime = 0xffffffffffffffffUL;
+static unsigned long maxYieldTime = 0;
+static unsigned long minYieldTime = 0xffffffffffffffffUL;
 
 static void yield_shared_processor(void)
 {
@@ -80,7 +68,7 @@
 	process_iSeries_events();
 }
 
-int iSeries_idle(void)
+static int iSeries_idle(void)
 {
 	struct paca_struct *lpaca;
 	long oldval;
@@ -91,13 +79,10 @@
 	CTRL = mfspr(CTRLF);
 	CTRL &= ~RUNLATCH;
 	mtspr(CTRLT, CTRL);
-#if 0
-	init_idle();	
-#endif
 
 	lpaca = get_paca();
 
-	for (;;) {
+	while (1) {
 		if (lpaca->lppaca.xSharedProc) {
 			if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
 				process_iSeries_events();
@@ -125,11 +110,13 @@
 
 		schedule();
 	}
+
 	return 0;
 }
-#endif
 
-int default_idle(void)
+#else
+
+static int default_idle(void)
 {
 	long oldval;
 	unsigned int cpu = smp_processor_id();
@@ -142,7 +129,12 @@
 
 			while (!need_resched() && !cpu_is_offline(cpu)) {
 				barrier();
+				/*
+				 * Go into low thread priority and possibly
+				 * low power mode.
+				 */
 				HMT_low();
+				HMT_very_low();
 			}
 
 			HMT_medium();
@@ -159,8 +151,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PPC_PSERIES
-
 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
 
 int dedicated_idle(void)
@@ -174,8 +164,10 @@
 	ppaca = &paca[cpu ^ 1];
 
 	while (1) {
-		/* Indicate to the HV that we are idle.  Now would be
-		 * a good time to find other work to dispatch. */
+		/*
+		 * Indicate to the HV that we are idle. Now would be
+		 * a good time to find other work to dispatch.
+		 */
 		lpaca->lppaca.xIdle = 1;
 
 		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
@@ -184,41 +176,31 @@
 			start_snooze = __get_tb() +
 				*smt_snooze_delay * tb_ticks_per_usec;
 			while (!need_resched() && !cpu_is_offline(cpu)) {
-				/* need_resched could be 1 or 0 at this 
-				 * point.  If it is 0, set it to 0, so
-				 * an IPI/Prod is sent.  If it is 1, keep
-				 * it that way & schedule work.
+				/*
+				 * Go into low thread priority and possibly
+				 * low power mode.
 				 */
+				HMT_low();
+				HMT_very_low();
+
 				if (*smt_snooze_delay == 0 ||
-				    __get_tb() < start_snooze) {
-					HMT_low(); /* Low thread priority */
+				    __get_tb() < start_snooze)
 					continue;
-				}
 
-				HMT_very_low(); /* Low power mode */
+				HMT_medium();
 
-				/* If the SMT mode is system controlled & the 
-				 * partner thread is doing work, switch into
-				 * ST mode.
-				 */
-				if((naca->smt_state == SMT_DYNAMIC) &&
-				   (!(ppaca->lppaca.xIdle))) {
-					/* Indicate we are no longer polling for
-					 * work, and then clear need_resched.  If
-					 * need_resched was 1, set it back to 1
-					 * and schedule work
+				if (!(ppaca->lppaca.xIdle)) {
+					local_irq_disable();
+
+					/*
+					 * We are about to sleep the thread
+					 * and so wont be polling any
+					 * more.
 					 */
 					clear_thread_flag(TIF_POLLING_NRFLAG);
-					oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
-					if(oldval == 1) {
-						set_need_resched();
-						break;
-					}
-
-					/* DRENG: Go HMT_medium here ? */
-					local_irq_disable(); 
 
-					/* SMT dynamic mode.  Cede will result 
+					/*
+					 * SMT dynamic mode. Cede will result
 					 * in this thread going dormant, if the
 					 * partner thread is still doing work.
 					 * Thread wakes up if partner goes idle,
@@ -226,15 +208,21 @@
 					 * occurs.  Returning from the cede
 					 * enables external interrupts.
 					 */
-					cede_processor();
+					if (!need_resched())
+						cede_processor();
+					else
+						local_irq_enable();
 				} else {
-					/* Give the HV an opportunity at the
+					/*
+					 * Give the HV an opportunity at the
 					 * processor, since we are not doing
 					 * any work.
 					 */
 					poll_pending();
 				}
 			}
+
+			clear_thread_flag(TIF_POLLING_NRFLAG);
 		} else {
 			set_need_resched();
 		}
@@ -248,48 +236,49 @@
 	return 0;
 }
 
-int shared_idle(void)
+static int shared_idle(void)
 {
 	struct paca_struct *lpaca = get_paca();
+	unsigned int cpu = smp_processor_id();
 
 	while (1) {
-		if (cpu_is_offline(smp_processor_id()) &&
-				system_state == SYSTEM_RUNNING)
-			cpu_die();
-
-		/* Indicate to the HV that we are idle.  Now would be
-		 * a good time to find other work to dispatch. */
+		/*
+		 * Indicate to the HV that we are idle. Now would be
+		 * a good time to find other work to dispatch.
+		 */
 		lpaca->lppaca.xIdle = 1;
 
-		if (!need_resched()) {
-			local_irq_disable(); 
-			
-			/* 
+		while (!need_resched() && !cpu_is_offline(cpu)) {
+			local_irq_disable();
+
+			/*
 			 * Yield the processor to the hypervisor.  We return if
 			 * an external interrupt occurs (which are driven prior
 			 * to returning here) or if a prod occurs from another 
-			 * processor.  When returning here, external interrupts 
+			 * processor. When returning here, external interrupts
 			 * are enabled.
+			 *
+			 * Check need_resched() again with interrupts disabled
+			 * to avoid a race.
 			 */
-			cede_processor();
+			if (!need_resched())
+				cede_processor();
+			else
+				local_irq_enable();
 		}
 
 		HMT_medium();
 		lpaca->lppaca.xIdle = 0;
 		schedule();
+		if (cpu_is_offline(smp_processor_id()) &&
+		    system_state == SYSTEM_RUNNING)
+			cpu_die();
 	}
 
 	return 0;
 }
-#endif
 
-int cpu_idle(void)
-{
-	idle_loop();
-	return 0; 
-}
-
-int native_idle(void)
+static int powermac_idle(void)
 {
 	while(1) {
 		if (!need_resched())
@@ -299,6 +288,13 @@
 	}
 	return 0;
 }
+#endif
+
+int cpu_idle(void)
+{
+	idle_loop();
+	return 0;
+}
 
 int idle_setup(void)
 {
@@ -319,8 +315,8 @@
 			idle_loop = default_idle;
 		}
 	} else if (systemcfg->platform == PLATFORM_POWERMAC) {
-		printk("idle = native_idle\n");
-		idle_loop = native_idle;
+		printk("idle = powermac_idle\n");
+		idle_loop = powermac_idle;
 	} else {
 		printk("idle_setup: unknown platform, use default_idle\n");
 		idle_loop = default_idle;
@@ -329,4 +325,3 @@
 
 	return 1;
 }
-
diff -Nru a/arch/ppc64/kernel/iommu.c b/arch/ppc64/kernel/iommu.c
--- a/arch/ppc64/kernel/iommu.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/iommu.c	2004-09-15 20:29:40 -07:00
@@ -229,7 +229,7 @@
 		struct scatterlist *sglist, int nelems,
 		enum dma_data_direction direction)
 {
-	dma_addr_t dma_next, dma_addr;
+	dma_addr_t dma_next = 0, dma_addr;
 	unsigned long flags;
 	struct scatterlist *s, *outs, *segstart;
 	int outcount;
diff -Nru a/arch/ppc64/kernel/lparcfg.c b/arch/ppc64/kernel/lparcfg.c
--- a/arch/ppc64/kernel/lparcfg.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/lparcfg.c	2004-09-15 20:29:40 -07:00
@@ -34,31 +34,37 @@
 #include <asm/rtas.h>
 #include <asm/system.h>
 
-#define MODULE_VERS "1.3"
+#define MODULE_VERS "1.4"
 #define MODULE_NAME "lparcfg"
 
 /* #define LPARCFG_DEBUG */
 
 /* find a better place for this function... */
-void log_plpar_hcall_return(unsigned long rc,char * tag)
+void log_plpar_hcall_return(unsigned long rc, char *tag)
 {
-	if (rc ==0 ) /* success, return */
+	if (rc == 0)		/* success, return */
 		return;
 /* check for null tag ? */
 	if (rc == H_Hardware)
-		printk(KERN_INFO "plpar-hcall (%s) failed with hardware fault\n",tag);
+		printk(KERN_INFO
+		       "plpar-hcall (%s) failed with hardware fault\n", tag);
 	else if (rc == H_Function)
-		printk(KERN_INFO "plpar-hcall (%s) failed; function not allowed\n",tag);
+		printk(KERN_INFO
+		       "plpar-hcall (%s) failed; function not allowed\n", tag);
 	else if (rc == H_Authority)
-		printk(KERN_INFO "plpar-hcall (%s) failed; not authorized to this function\n",tag);
+		printk(KERN_INFO
+		       "plpar-hcall (%s) failed; not authorized to this function\n",
+		       tag);
 	else if (rc == H_Parameter)
-		printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",tag);
+		printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
+		       tag);
 	else
-		printk(KERN_INFO "plpar-hcall (%s) failed with unexpected rc(0x%lx)\n",tag,rc);
+		printk(KERN_INFO
+		       "plpar-hcall (%s) failed with unexpected rc(0x%lx)\n",
+		       tag, rc);
 
 }
 
-
 static struct proc_dir_entry *proc_ppc64_lparcfg;
 #define LPARCFG_BUFF_SIZE 4096
 
@@ -78,59 +84,60 @@
 
 	shared = (int)(lpaca->lppaca_ptr->xSharedProc);
 	seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
-		      e2a(xItExtVpdPanel.mfgID[2]),
-		      e2a(xItExtVpdPanel.mfgID[3]),
-		      e2a(xItExtVpdPanel.systemSerial[1]),
-		      e2a(xItExtVpdPanel.systemSerial[2]),
-		      e2a(xItExtVpdPanel.systemSerial[3]),
-		      e2a(xItExtVpdPanel.systemSerial[4]),
-		      e2a(xItExtVpdPanel.systemSerial[5])); 
+		   e2a(xItExtVpdPanel.mfgID[2]),
+		   e2a(xItExtVpdPanel.mfgID[3]),
+		   e2a(xItExtVpdPanel.systemSerial[1]),
+		   e2a(xItExtVpdPanel.systemSerial[2]),
+		   e2a(xItExtVpdPanel.systemSerial[3]),
+		   e2a(xItExtVpdPanel.systemSerial[4]),
+		   e2a(xItExtVpdPanel.systemSerial[5]));
 
 	seq_printf(m, "system_type=%c%c%c%c\n",
-		      e2a(xItExtVpdPanel.machineType[0]),
-		      e2a(xItExtVpdPanel.machineType[1]),
-		      e2a(xItExtVpdPanel.machineType[2]),
-		      e2a(xItExtVpdPanel.machineType[3])); 
+		   e2a(xItExtVpdPanel.machineType[0]),
+		   e2a(xItExtVpdPanel.machineType[1]),
+		   e2a(xItExtVpdPanel.machineType[2]),
+		   e2a(xItExtVpdPanel.machineType[3]));
 
-	lp_index = HvLpConfig_getLpIndex(); 
+	lp_index = HvLpConfig_getLpIndex();
 	seq_printf(m, "partition_id=%d\n", (int)lp_index);
 
 	seq_printf(m, "system_active_processors=%d\n",
-		      (int)HvLpConfig_getSystemPhysicalProcessors()); 
+		   (int)HvLpConfig_getSystemPhysicalProcessors());
 
 	seq_printf(m, "system_potential_processors=%d\n",
-		      (int)HvLpConfig_getSystemPhysicalProcessors()); 
+		   (int)HvLpConfig_getSystemPhysicalProcessors());
 
-	processors = (int)HvLpConfig_getPhysicalProcessors(); 
+	processors = (int)HvLpConfig_getPhysicalProcessors();
 	seq_printf(m, "partition_active_processors=%d\n", processors);
 
-	max_processors = (int)HvLpConfig_getMaxPhysicalProcessors(); 
+	max_processors = (int)HvLpConfig_getMaxPhysicalProcessors();
 	seq_printf(m, "partition_potential_processors=%d\n", max_processors);
 
-	if(shared) {
-		entitled_capacity = HvLpConfig_getSharedProcUnits(); 
-		max_entitled_capacity = HvLpConfig_getMaxSharedProcUnits(); 
+	if (shared) {
+		entitled_capacity = HvLpConfig_getSharedProcUnits();
+		max_entitled_capacity = HvLpConfig_getMaxSharedProcUnits();
 	} else {
-		entitled_capacity = processors * 100; 
-		max_entitled_capacity = max_processors * 100; 
+		entitled_capacity = processors * 100;
+		max_entitled_capacity = max_processors * 100;
 	}
 	seq_printf(m, "partition_entitled_capacity=%d\n", entitled_capacity);
 
 	seq_printf(m, "partition_max_entitled_capacity=%d\n",
-		      max_entitled_capacity);
+		   max_entitled_capacity);
 
-	if(shared) {
-		pool_id = HvLpConfig_getSharedPoolIndex(); 
+	if (shared) {
+		pool_id = HvLpConfig_getSharedPoolIndex();
 		seq_printf(m, "pool=%d\n", (int)pool_id);
 		seq_printf(m, "pool_capacity=%d\n",
-		    (int)(HvLpConfig_getNumProcsInSharedPool(pool_id)*100));
+			   (int)(HvLpConfig_getNumProcsInSharedPool(pool_id) *
+				 100));
 	}
 
 	seq_printf(m, "shared_processor_mode=%d\n", shared);
 
 	return 0;
 }
-#endif /* CONFIG_PPC_ISERIES */
+#endif				/* CONFIG_PPC_ISERIES */
 
 #ifdef CONFIG_PPC_PSERIES
 /* 
@@ -158,11 +165,13 @@
  *                  XXXX  - Processors active on platform. 
  */
 static unsigned int h_get_ppp(unsigned long *entitled,
-		unsigned long  *unallocated, unsigned long *aggregation,
-		unsigned long *resource)
+			      unsigned long *unallocated,
+			      unsigned long *aggregation,
+			      unsigned long *resource)
 {
 	unsigned long rc;
-	rc = plpar_hcall_4out(H_GET_PPP,0,0,0,0,entitled,unallocated,aggregation,resource);
+	rc = plpar_hcall_4out(H_GET_PPP, 0, 0, 0, 0, entitled, unallocated,
+			      aggregation, resource);
 
 	log_plpar_hcall_return(rc, "H_GET_PPP");
 
@@ -185,7 +194,7 @@
  */
 static unsigned long get_purr()
 {
-	unsigned long sum_purr=0;
+	unsigned long sum_purr = 0;
 	return sum_purr;
 }
 
@@ -202,7 +211,7 @@
 {
 	int call_status;
 
-	char * local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+	char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
 	if (!local_buffer) {
 		printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
 		       __FILE__, __FUNCTION__, __LINE__);
@@ -219,22 +228,23 @@
 	spin_unlock(&rtas_data_buf_lock);
 
 	if (call_status != 0) {
-		printk(KERN_INFO "%s %s Error calling get-system-parameter (0x%x)\n",
+		printk(KERN_INFO
+		       "%s %s Error calling get-system-parameter (0x%x)\n",
 		       __FILE__, __FUNCTION__, call_status);
 	} else {
 		int splpar_strlen;
 		int idx, w_idx;
-		char * workbuffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+		char *workbuffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
 		if (!workbuffer) {
-			printk(KERN_ERR "%s %s kmalloc failure at line %d \n",__FILE__,__FUNCTION__,__LINE__);
+			printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
+			       __FILE__, __FUNCTION__, __LINE__);
 			return;
 		}
-
 #ifdef LPARCFG_DEBUG
 		printk(KERN_INFO "success calling get-system-parameter \n");
 #endif
 		splpar_strlen = local_buffer[0] * 16 + local_buffer[1];
-		local_buffer += 2; /* step over strlen value */
+		local_buffer += 2;	/* step over strlen value */
 
 		memset(workbuffer, 0, SPLPAR_MAXLENGTH);
 		w_idx = 0;
@@ -253,13 +263,15 @@
 				w_idx = 0;
 			} else if (local_buffer[idx] == '=') {
 				/* code here to replace workbuffer contents
-				 with different keyword strings */
-				if (0 == strcmp(workbuffer,"MaxEntCap")) {
-					strcpy(workbuffer, "partition_max_entitled_capacity");
+				   with different keyword strings */
+				if (0 == strcmp(workbuffer, "MaxEntCap")) {
+					strcpy(workbuffer,
+					       "partition_max_entitled_capacity");
 					w_idx = strlen(workbuffer);
 				}
-				if (0 == strcmp(workbuffer,"MaxPlatProcs")) {
-					strcpy(workbuffer, "system_potential_processors");
+				if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
+					strcpy(workbuffer,
+					       "system_potential_processors");
 					w_idx = strlen(workbuffer);
 				}
 			}
@@ -283,7 +295,7 @@
 
 	while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) {
 #ifdef LPARCFG_DEBUG
-		printk(KERN_ERR "cpus_dn %p \n",cpus_dn);
+		printk(KERN_ERR "cpus_dn %p \n", cpus_dn);
 #endif
 		count++;
 	}
@@ -292,7 +304,8 @@
 
 static int lparcfg_data(struct seq_file *m, void *v)
 {
-	int system_active_processors;
+	int partition_potential_processors;
+	int partition_active_processors;
 	struct device_node *rootdn;
 	const char *model = "";
 	const char *system_id = "";
@@ -305,12 +318,11 @@
 		model = get_property(rootdn, "model", NULL);
 		system_id = get_property(rootdn, "system-id", NULL);
 		lp_index_ptr = (unsigned int *)
-			get_property(rootdn, "ibm,partition-no", NULL);
+		    get_property(rootdn, "ibm,partition-no", NULL);
 		if (lp_index_ptr)
 			lp_index = *lp_index_ptr;
 	}
 
-
 	seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
 
 	seq_printf(m, "serial_number=%s\n", system_id);
@@ -323,11 +335,13 @@
 	lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
 
 	if (lrdrp == NULL) {
-		system_active_processors = systemcfg->processorCount;
+		partition_potential_processors = systemcfg->processorCount;
 	} else {
-		system_active_processors = *(lrdrp + 4);
+		partition_potential_processors = *(lrdrp + 4);
 	}
 
+	partition_active_processors = lparcfg_count_active_processors();
+
 	if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
 		unsigned long h_entitled, h_unallocated;
 		unsigned long h_aggregation, h_resource;
@@ -342,71 +356,69 @@
 		seq_printf(m, "R6=0x%lx\n", h_aggregation);
 		seq_printf(m, "R7=0x%lx\n", h_resource);
 
-		h_pic(&pool_idle_time, &pool_procs);
-
 		purr = get_purr();
 
 		/* this call handles the ibm,get-system-parameter contents */
 		parse_system_parameter_string(m);
 
-		seq_printf(m, "partition_entitled_capacity=%ld\n",
-			      h_entitled);
-
-		seq_printf(m, "pool=%ld\n",
-			      (h_aggregation >> 0*8) & 0xffff);
+		seq_printf(m, "partition_entitled_capacity=%ld\n", h_entitled);
 
-		seq_printf(m, "group=%ld\n",
-			      (h_aggregation >> 2*8) & 0xffff);
+		seq_printf(m, "group=%ld\n", (h_aggregation >> 2 * 8) & 0xffff);
 
 		seq_printf(m, "system_active_processors=%ld\n",
-			      (h_resource >> 0*8) & 0xffff);
+			   (h_resource >> 0 * 8) & 0xffff);
+
+		/* pool related entries are apropriate for shared configs */
+		if (paca[0].lppaca.xSharedProc) {
 
-		seq_printf(m, "pool_capacity=%ld\n",
-			      (h_resource >> 2*8) & 0xffff);
+			h_pic(&pool_idle_time, &pool_procs);
+
+			seq_printf(m, "pool=%ld\n",
+				   (h_aggregation >> 0 * 8) & 0xffff);
+
+			/* report pool_capacity in percentage */
+			seq_printf(m, "pool_capacity=%ld\n",
+				   ((h_resource >> 2 * 8) & 0xffff) * 100);
+
+			seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
+
+			seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
+		}
 
 		seq_printf(m, "unallocated_capacity_weight=%ld\n",
-			      (h_resource >> 4*8) & 0xFF);
+			   (h_resource >> 4 * 8) & 0xFF);
 
 		seq_printf(m, "capacity_weight=%ld\n",
-			      (h_resource >> 5*8) & 0xFF);
-
-		seq_printf(m, "capped=%ld\n",
-			      (h_resource >> 6*8) & 0x01);
+			   (h_resource >> 5 * 8) & 0xFF);
 
-		seq_printf(m, "unallocated_capacity=%ld\n",
-			      h_unallocated);
+		seq_printf(m, "capped=%ld\n", (h_resource >> 6 * 8) & 0x01);
 
-		seq_printf(m, "pool_idle_time=%ld\n",
-			      pool_idle_time);
+		seq_printf(m, "unallocated_capacity=%ld\n", h_unallocated);
 
-		seq_printf(m, "pool_num_procs=%ld\n",
-			      pool_procs);
+		seq_printf(m, "purr=%ld\n", purr);
 
-		seq_printf(m, "purr=%ld\n",
-			      purr);
+	} else {		/* non SPLPAR case */
 
-	} else /* non SPLPAR case */ {
 		seq_printf(m, "system_active_processors=%d\n",
-			      system_active_processors);
+			   partition_potential_processors);
 
 		seq_printf(m, "system_potential_processors=%d\n",
-			      system_active_processors);
+			   partition_potential_processors);
 
 		seq_printf(m, "partition_max_entitled_capacity=%d\n",
-			      100*system_active_processors);
+			   partition_potential_processors * 100);
 
 		seq_printf(m, "partition_entitled_capacity=%d\n",
-			      system_active_processors*100);
+			   partition_active_processors * 100);
 	}
 
 	seq_printf(m, "partition_active_processors=%d\n",
-			(int) lparcfg_count_active_processors());
+		   partition_active_processors);
 
 	seq_printf(m, "partition_potential_processors=%d\n",
-			system_active_processors);
+		   partition_potential_processors);
 
-	seq_printf(m, "shared_processor_mode=%d\n",
-			paca[0].lppaca.xSharedProc);
+	seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.xSharedProc);
 
 	return 0;
 }
@@ -421,14 +433,15 @@
  * This function should be invoked only on systems with
  * FW_FEATURE_SPLPAR.
  */
-static ssize_t lparcfg_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
+static ssize_t lparcfg_write(struct file *file, const char __user * buf,
+			     size_t count, loff_t * off)
 {
 	char *kbuf;
 	char *tmp;
 	u64 new_entitled, *new_entitled_ptr = &new_entitled;
 	u8 new_weight, *new_weight_ptr = &new_weight;
 
-	unsigned long current_entitled;    /* parameters for h_get_ppp */
+	unsigned long current_entitled;	/* parameters for h_get_ppp */
 	unsigned long dummy;
 	unsigned long resource;
 	u8 current_weight;
@@ -453,13 +466,13 @@
 
 	if (!strcmp(kbuf, "partition_entitled_capacity")) {
 		char *endp;
-		*new_entitled_ptr = (u64)simple_strtoul(tmp, &endp, 10);
+		*new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
 		if (endp == tmp)
 			goto out;
 		new_weight_ptr = &current_weight;
 	} else if (!strcmp(kbuf, "capacity_weight")) {
 		char *endp;
-		*new_weight_ptr = (u8)simple_strtoul(tmp, &endp, 10);
+		*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
 		if (endp == tmp)
 			goto out;
 		new_entitled_ptr = &current_entitled;
@@ -473,7 +486,7 @@
 		goto out;
 	}
 
-	current_weight = (resource>>5*8)&0xFF;
+	current_weight = (resource >> 5 * 8) & 0xFF;
 
 	pr_debug("%s: current_entitled = %lu, current_weight = %lu\n",
 		 __FUNCTION__, current_entitled, current_weight);
@@ -498,23 +511,23 @@
 		retval = -EIO;
 	}
 
-out:
+      out:
 	kfree(kbuf);
 	return retval;
 }
 
-#endif /* CONFIG_PPC_PSERIES */
+#endif				/* CONFIG_PPC_PSERIES */
 
-static int lparcfg_open(struct inode * inode, struct file * file)
+static int lparcfg_open(struct inode *inode, struct file *file)
 {
-	return single_open(file,lparcfg_data,NULL);
+	return single_open(file, lparcfg_data, NULL);
 }
 
 struct file_operations lparcfg_fops = {
-	owner:		THIS_MODULE,
-	read:		seq_read,
-	open:		lparcfg_open,
-	release:	single_release,
+      owner:THIS_MODULE,
+      read:seq_read,
+      open:lparcfg_open,
+      release:single_release,
 };
 
 int __init lparcfg_init(void)
@@ -533,7 +546,8 @@
 		ent->proc_fops = &lparcfg_fops;
 		ent->data = kmalloc(LPARCFG_BUFF_SIZE, GFP_KERNEL);
 		if (!ent->data) {
-			printk(KERN_ERR "Failed to allocate buffer for lparcfg\n");
+			printk(KERN_ERR
+			       "Failed to allocate buffer for lparcfg\n");
 			remove_proc_entry("lparcfg", ent->parent);
 			return -ENOMEM;
 		}
@@ -550,7 +564,7 @@
 {
 	if (proc_ppc64_lparcfg) {
 		if (proc_ppc64_lparcfg->data) {
-		    kfree(proc_ppc64_lparcfg->data);
+			kfree(proc_ppc64_lparcfg->data);
 		}
 		remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
 	}
diff -Nru a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
--- a/arch/ppc64/kernel/misc.S	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/misc.S	2004-09-15 20:29:40 -07:00
@@ -860,9 +860,9 @@
 	.llong .sys_ni_syscall		/* 256 reserved for sys_debug_setcontext */
 	.llong .sys_ni_syscall		/* 257 reserved for vserver */
 	.llong .sys_ni_syscall		/* 258 reserved for new sys_remap_file_pages */
-	.llong .sys_ni_syscall		/* 259 reserved for new sys_mbind */
-	.llong .sys_ni_syscall		/* 260 reserved for new sys_get_mempolicy */
-	.llong .sys_ni_syscall		/* 261 reserved for new sys_set_mempolicy */
+	.llong .compat_mbind
+	.llong .compat_get_mempolicy	/* 260 */
+	.llong .compat_set_mempolicy
 	.llong .compat_sys_mq_open
 	.llong .sys_mq_unlink
 	.llong .compat_sys_mq_timedsend
@@ -1132,9 +1132,9 @@
 	.llong .sys_ni_syscall		/* 256 reserved for sys_debug_setcontext */
 	.llong .sys_ni_syscall		/* 257 reserved for vserver */
 	.llong .sys_ni_syscall		/* 258 reserved for new sys_remap_file_pages */
-	.llong .sys_ni_syscall		/* 259 reserved for new sys_mbind */
-	.llong .sys_ni_syscall		/* 260 reserved for new sys_get_mempolicy */
-	.llong .sys_ni_syscall		/* 261 reserved for new sys_set_mempolicy */
+	.llong .sys_mbind
+	.llong .sys_get_mempolicy	/* 260 */
+	.llong .sys_set_mempolicy
 	.llong .sys_mq_open
 	.llong .sys_mq_unlink
 	.llong .sys_mq_timedsend
diff -Nru a/arch/ppc64/kernel/nvram.c b/arch/ppc64/kernel/nvram.c
--- a/arch/ppc64/kernel/nvram.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/nvram.c	2004-09-15 20:29:40 -07:00
@@ -340,7 +340,7 @@
 	struct list_head * p;
 	struct nvram_partition * part;
 	struct nvram_partition * new_part = NULL;
-	struct nvram_partition * free_part;
+	struct nvram_partition * free_part = NULL;
 	int seq_init[2] = { 0, 0 };
 	loff_t tmp_index;
 	long size = 0;
diff -Nru a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c
--- a/arch/ppc64/kernel/pSeries_pci.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/pSeries_pci.c	2004-09-15 20:29:40 -07:00
@@ -497,7 +497,7 @@
 	struct pci_controller *phb;
 	unsigned int root_size_cells = 0;
 	unsigned int index;
-	unsigned int *opprop;
+	unsigned int *opprop = NULL;
 	struct device_node *root = of_find_node_by_path("/");
 
 	if (naca->interrupt_controller == IC_OPEN_PIC) {
diff -Nru a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
--- a/arch/ppc64/kernel/prom.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/prom.c	2004-09-15 20:29:40 -07:00
@@ -918,11 +918,7 @@
 		= (void *)virt_to_abs(&__secondary_hold_acknowledge);
 	unsigned long secondary_hold
 		= virt_to_abs(*PTRRELOC((unsigned long *)__secondary_hold));
-	struct systemcfg *_systemcfg = RELOC(systemcfg);
 	struct prom_t *_prom = PTRRELOC(&prom);
-#ifdef CONFIG_SMP
-	struct naca_struct *_naca = RELOC(naca);
-#endif
 
 	prom_debug("prom_hold_cpus: start...\n");
 	prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
@@ -1003,18 +999,18 @@
 			      (*acknowledge == ((unsigned long)-1)); i++ ) ;
 
 			if (*acknowledge == cpuid) {
-				prom_printf("... done\n");
+				prom_printf(" done\n");
 				/* We have to get every CPU out of OF,
 				 * even if we never start it. */
 				if (cpuid >= NR_CPUS)
 					goto next;
 			} else {
-				prom_printf("... failed: %x\n", *acknowledge);
+				prom_printf(" failed: %x\n", *acknowledge);
 			}
 		}
 #ifdef CONFIG_SMP
 		else
-			prom_printf("%x : booting  cpu %s\n", cpuid, path);
+			prom_printf("%x : boot cpu     %s\n", cpuid, path);
 #endif
 next:
 #ifdef CONFIG_SMP
@@ -1023,13 +1019,6 @@
 			cpuid++;
 			if (cpuid >= NR_CPUS)
 				continue;
-			prom_printf("%x : preparing thread ... ",
-				    interrupt_server[i]);
-			if (_naca->smt_state) {
-				prom_printf("available\n");
-			} else {
-				prom_printf("not available\n");
-			}
 		}
 #endif
 		cpuid++;
@@ -1068,57 +1057,6 @@
 	prom_debug("prom_hold_cpus: end...\n");
 }
 
-static void __init smt_setup(void)
-{
-	char *p, *q;
-	char my_smt_enabled = SMT_DYNAMIC;
-	ihandle prom_options = 0;
-	char option[9];
-	unsigned long offset = reloc_offset();
-	struct naca_struct *_naca = RELOC(naca);
-	char found = 0;
-
-	if (strstr(RELOC(cmd_line), RELOC("smt-enabled="))) {
-		for (q = RELOC(cmd_line); (p = strstr(q, RELOC("smt-enabled="))) != 0; ) {
-			q = p + 12;
-			if (p > RELOC(cmd_line) && p[-1] != ' ')
-				continue;
-			found = 1;
-			if (q[0] == 'o' && q[1] == 'f' && 
-			    q[2] == 'f' && (q[3] == ' ' || q[3] == '\0')) {
-				my_smt_enabled = SMT_OFF;
-			} else if (q[0]=='o' && q[1] == 'n' && 
-				   (q[2] == ' ' || q[2] == '\0')) {
-				my_smt_enabled = SMT_ON;
-			} else {
-				my_smt_enabled = SMT_DYNAMIC;
-			} 
-		}
-	}
-	if (!found) {
-		prom_options = call_prom("finddevice", 1, 1, ADDR("/options"));
-		if (prom_options != (ihandle) -1) {
-			prom_getprop(prom_options, "ibm,smt-enabled",
-				     option, sizeof(option));
-			if (option[0] != 0) {
-				found = 1;
-				if (!strcmp(option, RELOC("off")))
-					my_smt_enabled = SMT_OFF;
-				else if (!strcmp(option, RELOC("on")))
-					my_smt_enabled = SMT_ON;
-				else
-					my_smt_enabled = SMT_DYNAMIC;
-			}
-		}
-	}
-
-	if (!found )
-		my_smt_enabled = SMT_DYNAMIC; /* default to on */
-
-	_naca->smt_state = my_smt_enabled;
-}
-
-
 #ifdef CONFIG_BOOTX_TEXT
 
 /* This function will enable the early boot text when doing OF booting. This
@@ -1707,6 +1645,9 @@
 	}
 
 	RELOC(cmd_line[0]) = 0;
+#ifdef CONFIG_CMDLINE
+	strlcpy(RELOC(cmd_line), CONFIG_CMDLINE, sizeof(cmd_line));
+#endif /* CONFIG_CMDLINE */
 	if ((long)_prom->chosen > 0) {
 		prom_getprop(_prom->chosen, "bootargs", p, sizeof(cmd_line));
 		if (p != NULL && p[0] != 0)
@@ -1726,8 +1667,6 @@
 
 	/* Initialize some system info into the Naca early... */
 	prom_initialize_naca();
-
-	smt_setup();
 
 	/* If we are on an SMP machine, then we *MUST* do the
 	 * following, regardless of whether we have an SMP
diff -Nru a/arch/ppc64/kernel/rtasd.c b/arch/ppc64/kernel/rtasd.c
--- a/arch/ppc64/kernel/rtasd.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/rtasd.c	2004-09-15 20:29:40 -07:00
@@ -106,7 +106,7 @@
 static void printk_log_rtas(char *buf, int len)
 {
 
-	int i,j,n;
+	int i,j,n = 0;
 	int perline = 16;
 	char buffer[64];
 	char * str = "RTAS event";
@@ -216,12 +216,13 @@
 	if (!no_more_logging && !(err_type & ERR_FLAG_BOOT))
 		nvram_write_error_log(buf, len, err_type);
 
-	/* rtas errors can occur during boot, and we do want to capture
+	/*
+	 * rtas errors can occur during boot, and we do want to capture
 	 * those somewhere, even if nvram isn't ready (why not?), and even
-	 * if rtasd isn't ready. Put them into the boot log, at least.  */
-	if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG) {
+	 * if rtasd isn't ready. Put them into the boot log, at least.
+	 */
+	if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG)
 		printk_log_rtas(buf, len);
-	}
 
 	/* Check to see if we need to or have stopped logging */
 	if (fatal || no_more_logging) {
@@ -233,9 +234,6 @@
 	/* call type specific method for error */
 	switch (err_type & ERR_TYPE_MASK) {
 	case ERR_TYPE_RTAS_LOG:
-		/* put into syslog and error_log file */
-		printk_log_rtas(buf, len);
-
 		offset = rtas_error_log_buffer_max *
 			((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK);
 
diff -Nru a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
--- a/arch/ppc64/kernel/setup.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/setup.c	2004-09-15 20:29:40 -07:00
@@ -51,7 +51,6 @@
 extern unsigned long klimit;
 /* extern void *stab; */
 extern HTAB htab_data;
-extern unsigned long loops_per_jiffy;
 
 int have_of = 1;
 
@@ -68,15 +67,15 @@
 		       unsigned long r7);
 
 extern void fw_feature_init(void);
-extern void iSeries_init( void );
-extern void iSeries_init_early( void );
-extern void pSeries_init_early( void );
+extern void iSeries_init_early(void);
+extern void pSeries_init_early(void);
 extern void pSeriesLP_init_early(void);
 extern void pmac_init_early(void);
-extern void mm_init_ppc64( void ); 
+extern void mm_init_ppc64(void);
 extern void pseries_secondary_smp_init(unsigned long); 
 extern int  idle_setup(void);
 extern void vpa_init(int cpu);
+extern void iSeries_parse_cmdline(void);
 
 unsigned long decr_overclock = 1;
 unsigned long decr_overclock_proc0 = 1;
@@ -87,10 +86,6 @@
 
 unsigned char aux_device_present;
 
-void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
-		    unsigned long r6, unsigned long r7);
-int parse_bootinfo(void);
-
 #ifdef CONFIG_MAGIC_SYSRQ
 unsigned long SYSRQ_KEY;
 #endif /* CONFIG_MAGIC_SYSRQ */
@@ -157,6 +152,50 @@
 }
 
 #if !defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
+
+static int smt_enabled_cmdline;
+
+/* Look for ibm,smt-enabled OF option */
+static void check_smt_enabled(void)
+{
+	struct device_node *dn;
+	char *smt_option;
+
+	/* Allow the command line to overrule the OF option */
+	if (smt_enabled_cmdline)
+		return;
+
+	dn = of_find_node_by_path("/options");
+
+	if (dn) {
+		smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
+
+                if (smt_option) {
+			if (!strcmp(smt_option, "on"))
+				smt_enabled_at_boot = 1;
+			else if (!strcmp(smt_option, "off"))
+				smt_enabled_at_boot = 0;
+                }
+        }
+}
+
+/* Look for smt-enabled= cmdline option */
+static int __init early_smt_enabled(char *p)
+{
+	smt_enabled_cmdline = 1;
+
+	if (!p)
+		return 0;
+
+	if (!strcmp(p, "on") || !strcmp(p, "1"))
+		smt_enabled_at_boot = 1;
+	else if (!strcmp(p, "off") || !strcmp(p, "0"))
+		smt_enabled_at_boot = 0;
+
+	return 0;
+}
+early_param("smt-enabled", early_smt_enabled);
+
 /**
  * setup_cpu_maps - initialize the following cpu maps:
  *                  cpu_possible_map
@@ -179,6 +218,8 @@
 	struct device_node *dn = NULL;
 	int cpu = 0;
 
+	check_smt_enabled();
+
 	while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
 		u32 *intserv;
 		int j, len = sizeof(u32), nthreads;
@@ -191,9 +232,16 @@
 		nthreads = len / sizeof(u32);
 
 		for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
+			/*
+			 * Only spin up secondary threads if SMT is enabled.
+			 * We must leave space in the logical map for the
+			 * threads.
+			 */
+			if (j == 0 || smt_enabled_at_boot) {
+				cpu_set(cpu, cpu_present_map);
+				set_hard_smp_processor_id(cpu, intserv[j]);
+			}
 			cpu_set(cpu, cpu_possible_map);
-			cpu_set(cpu, cpu_present_map);
-			set_hard_smp_processor_id(cpu, intserv[j]);
 			cpu++;
 		}
 	}
@@ -249,7 +297,21 @@
 
 	systemcfg->processorCount = num_present_cpus();
 }
+
 #endif /* !defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP) */
+
+#ifdef CONFIG_XMON
+static int __init early_xmon(char *p)
+{
+	/* ensure xmon is enabled */
+	xmon_init();
+	debugger(0);
+
+	return 0;
+}
+early_param("xmon", early_xmon);
+#endif
+
 /*
  * Do some initial setup of the system.  The parameters are those which 
  * were passed in from the bootloader.
@@ -261,16 +323,12 @@
 	int ret, i;
 #endif
 
-#ifdef CONFIG_XMON_DEFAULT
-	xmon_init();
-#endif
-
 #ifdef CONFIG_PPC_ISERIES
 	/* pSeries systems are identified in prom.c via OF. */
-	if ( itLpNaca.xLparInstalled == 1 )
+	if (itLpNaca.xLparInstalled == 1)
 		systemcfg->platform = PLATFORM_ISERIES_LPAR;
 #endif
-	
+
 	switch (systemcfg->platform) {
 #ifdef CONFIG_PPC_ISERIES
 	case PLATFORM_ISERIES_LPAR:
@@ -282,22 +340,22 @@
 	case PLATFORM_PSERIES:
 		fw_feature_init();
 		pSeries_init_early();
-		parse_bootinfo();
 		break;
 
 	case PLATFORM_PSERIES_LPAR:
 		fw_feature_init();
 		pSeriesLP_init_early();
-		parse_bootinfo();
 		break;
 #endif /* CONFIG_PPC_PSERIES */
 #ifdef CONFIG_PPC_PMAC
 	case PLATFORM_POWERMAC:
 		pmac_init_early();
-		parse_bootinfo();
 #endif /* CONFIG_PPC_PMAC */
 	}
 
+#ifdef CONFIG_XMON_DEFAULT
+	xmon_init();
+#endif
 	/* If we were passed an initrd, set the ROOT_DEV properly if the values
 	 * look sensible. If not, clear initrd reference.
 	 */
@@ -334,6 +392,15 @@
 	}
 #endif /* CONFIG_PPC_PSERIES */
 
+#ifdef CONFIG_PPC_ISERIES
+	iSeries_parse_cmdline();
+#endif
+
+	/* Save unparsed command line copy for /proc/cmdline */
+	strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
+
+	parse_early_param();
+
 #ifdef CONFIG_SMP
 #ifndef CONFIG_PPC_ISERIES
 	/*
@@ -355,6 +422,10 @@
 				  i);
 		}
 	}
+
+	if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
+		vpa_init(boot_cpuid);
+
 #endif /* CONFIG_PPC_PSERIES */
 #endif /* CONFIG_SMP */
 
@@ -384,27 +455,6 @@
 	printk("-----------------------------------------------------\n");
 
 	mm_init_ppc64();
-
-#if defined(CONFIG_SMP) && defined(CONFIG_PPC_PSERIES)
-	if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
-		vpa_init(boot_cpuid);
-	}
-#endif
-
-	/* Select the correct idle loop for the platform. */
-	idle_setup();
-
-	switch (systemcfg->platform) {
-#ifdef CONFIG_PPC_ISERIES
-	case PLATFORM_ISERIES_LPAR:
-		iSeries_init();
-		break;
-#endif
-	default:
-		/* The following relies on the device tree being */
-		/* fully configured.                             */
-		parse_cmd_line(r3, r4, r5, r6, r7);
-	}
 }
 
 void machine_restart(char *cmd)
@@ -528,62 +578,27 @@
 	.show =	show_cpuinfo,
 };
 
-/*
- * Fetch the cmd_line from open firmware. 
- */
-void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
-		  unsigned long r6, unsigned long r7)
-{
-	cmd_line[0] = 0;
+#if 0 /* XXX not currently used */
+unsigned long memory_limit;
 
-#ifdef CONFIG_CMDLINE
-	strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
-#endif /* CONFIG_CMDLINE */
+static int __init early_parsemem(char *p)
+{
+	if (!p)
+		return 0;
 
-#ifdef CONFIG_PPC_PSERIES
-	{
-	struct device_node *chosen;
+	memory_limit = memparse(p, &p);
 
-	chosen = of_find_node_by_name(NULL, "chosen");
-	if (chosen != NULL) {
-		char *p;
-		p = get_property(chosen, "bootargs", NULL);
-		if (p != NULL && p[0] != 0)
-			strlcpy(cmd_line, p, sizeof(cmd_line));
-		of_node_put(chosen);
-	}
-	}
-#endif
-
-	/* Look for mem= option on command line */
-	if (strstr(cmd_line, "mem=")) {
-		char *p, *q;
-		unsigned long maxmem = 0;
-		extern unsigned long __max_memory;
-
-		for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
-			q = p + 4;
-			if (p > cmd_line && p[-1] != ' ')
-				continue;
-			maxmem = simple_strtoul(q, &q, 0);
-			if (*q == 'k' || *q == 'K') {
-				maxmem <<= 10;
-				++q;
-			} else if (*q == 'm' || *q == 'M') {
-				maxmem <<= 20;
-				++q;
-			}
-		}
-		__max_memory = maxmem;
-	}
+	return 0;
 }
+early_param("mem", early_parsemem);
+#endif
 
 #ifdef CONFIG_PPC_PSERIES
 static int __init set_preferred_console(void)
 {
 	struct device_node *prom_stdout;
 	char *name;
-	int offset;
+	int offset = 0;
 
 	/* The user has requested a console so this is already set up. */
 	if (strstr(saved_command_line, "console="))
@@ -652,26 +667,6 @@
 
 }
 console_initcall(set_preferred_console);
-
-int parse_bootinfo(void)
-{
-	struct bi_record *rec;
-
-	rec = prom.bi_recs;
-
-	if ( rec == NULL || rec->tag != BI_FIRST )
-		return -1;
-
-	for ( ; rec->tag != BI_LAST ; rec = bi_rec_next(rec) ) {
-		switch (rec->tag) {
-		case BI_CMD_LINE:
-			strlcpy(cmd_line, (void *)rec->data, sizeof(cmd_line));
-			break;
-		}
-	}
-
-	return 0;
-}
 #endif
 
 int __init ppc_init(void)
@@ -687,17 +682,6 @@
 
 arch_initcall(ppc_init);
 
-void __init ppc64_calibrate_delay(void)
-{
-	loops_per_jiffy = tb_ticks_per_jiffy;
-
-	printk("Calibrating delay loop... %lu.%02lu BogoMips\n",
-			       loops_per_jiffy/(500000/HZ),
-			       loops_per_jiffy/(5000/HZ) % 100);
-}	
-
-extern void (*calibrate_delay)(void);
-
 #ifdef CONFIG_IRQSTACKS
 static void __init irqstack_early_init(void)
 {
@@ -753,17 +737,10 @@
 	extern int panic_timeout;
 	extern void do_init_bootmem(void);
 
-	calibrate_delay = ppc64_calibrate_delay;
-
 	ppc64_boot_msg(0x12, "Setup Arch");
 
-#ifdef CONFIG_XMON
-	if (strstr(cmd_line, "xmon")) {
-		/* ensure xmon is enabled */
-		xmon_init();
-		debugger(0);
-	}
-#endif /* CONFIG_XMON */
+	*cmdline_p = cmd_line;
+
 
 	/*
 	 * Set cache line size based on type of cpu as a default.
@@ -784,15 +761,14 @@
 	init_mm.end_data = (unsigned long) _edata;
 	init_mm.brk = klimit;
 	
-	/* Save unparsed command line copy for /proc/cmdline */
-	strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
-	*cmdline_p = cmd_line;
-
 	irqstack_early_init();
 	emergency_stack_init();
 
 	/* set up the bootmem stuff with available memory */
 	do_init_bootmem();
+
+	/* Select the correct idle loop for the platform. */
+	idle_setup();
 
 	ppc_md.setup_arch();
 
diff -Nru a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
--- a/arch/ppc64/kernel/signal.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/signal.c	2004-09-15 20:29:40 -07:00
@@ -178,7 +178,7 @@
 	elf_vrreg_t __user *v_regs;
 #endif
 	unsigned long err = 0;
-	unsigned long save_r13;
+	unsigned long save_r13 = 0;
 	elf_greg_t *gregs = (elf_greg_t *)regs;
 	int i;
 
diff -Nru a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c
--- a/arch/ppc64/kernel/signal32.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/signal32.c	2004-09-15 20:29:40 -07:00
@@ -189,7 +189,7 @@
 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
 	int i;
 	long err = 0;
-	unsigned int save_r2;
+	unsigned int save_r2 = 0;
 #ifdef CONFIG_ALTIVEC
 	unsigned long msr;
 #endif
diff -Nru a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
--- a/arch/ppc64/kernel/smp.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/smp.c	2004-09-15 20:29:40 -07:00
@@ -74,6 +74,8 @@
 extern long register_vpa(unsigned long flags, unsigned long proc,
 			 unsigned long vpa);
 
+int smt_enabled_at_boot = 1;
+
 /* Low level assembly function used to backup CPU 0 state */
 extern void __save_cpu_setup(void);
 
@@ -942,4 +944,12 @@
 	smp_threads_ready = 1;
 
 	set_cpus_allowed(current, old_mask);
+
+	/*
+	 * We know at boot the maximum number of cpus we can add to
+	 * a partition and set cpu_possible_map accordingly. cpu_present_map
+	 * needs to match for the hotplug code to allow us to hot add
+	 * any offline cpus.
+	 */
+	cpu_present_map = cpu_possible_map;
 }
diff -Nru a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c
--- a/arch/ppc64/kernel/sysfs.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/sysfs.c	2004-09-15 20:29:40 -07:00
@@ -97,6 +97,13 @@
 
 /* PMC stuff */
 
+#ifdef CONFIG_PPC_ISERIES
+void ppc64_enable_pmcs(void)
+{
+	/* XXX Implement for iseries */
+}
+#else
+
 /*
  * Enabling PMCs will slow partition context switch times so we only do
  * it the first time we write to the PMCs.
@@ -104,12 +111,6 @@
 
 static DEFINE_PER_CPU(char, pmcs_enabled);
 
-#ifdef CONFIG_PPC_ISERIES
-void ppc64_enable_pmcs(void)
-{
-	/* XXX Implement for iseries */
-}
-#else
 void ppc64_enable_pmcs(void)
 {
 	unsigned long hid0;
diff -Nru a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
--- a/arch/ppc64/kernel/vmlinux.lds.S	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/kernel/vmlinux.lds.S	2004-09-15 20:29:40 -07:00
@@ -117,8 +117,11 @@
 
   .data : {
 	*(.data .data.rel* .toc1)
-	*(.opd)
 	*(.branch_lt)
+	}
+
+  .opd : {
+	*(.opd)
 	}
 
   .got : {
diff -Nru a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c
--- a/arch/ppc64/lib/locks.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/lib/locks.c	2004-09-15 20:29:40 -07:00
@@ -20,8 +20,6 @@
 #include <asm/hvcall.h>
 #include <asm/iSeries/HvCall.h>
 
-#ifndef CONFIG_SPINLINE
-
 /* waiting for a spinlock... */
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 
@@ -95,5 +93,3 @@
 }
 
 EXPORT_SYMBOL(spin_unlock_wait);
-
-#endif /* CONFIG_SPINLINE */
diff -Nru a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
--- a/arch/ppc64/mm/hash_utils.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/mm/hash_utils.c	2004-09-15 20:29:40 -07:00
@@ -253,24 +253,24 @@
 	int local = 0;
 	cpumask_t tmp;
 
-	/* Check for invalid addresses. */
-	if (!IS_VALID_EA(ea))
-		return 1;
-
  	switch (REGION_ID(ea)) {
 	case USER_REGION_ID:
 		user_region = 1;
 		mm = current->mm;
-		if (mm == NULL)
+		if ((ea > USER_END) || (! mm))
 			return 1;
 
 		vsid = get_vsid(mm->context.id, ea);
 		break;
 	case IO_REGION_ID:
+		if (ea > IMALLOC_END)
+			return 1;
 		mm = &ioremap_mm;
 		vsid = get_kernel_vsid(ea);
 		break;
 	case VMALLOC_REGION_ID:
+		if (ea > VMALLOC_END)
+			return 1;
 		mm = &init_mm;
 		vsid = get_kernel_vsid(ea);
 		break;
diff -Nru a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
--- a/arch/ppc64/mm/numa.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/mm/numa.c	2004-09-15 20:29:40 -07:00
@@ -18,6 +18,8 @@
 #include <asm/machdep.h>
 #include <asm/abs_addr.h>
 
+static int numa_enabled = 1;
+
 static int numa_debug;
 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
 
@@ -189,10 +191,7 @@
 	long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
 	unsigned long i;
 
-	if (strstr(saved_command_line, "numa=debug"))
-		numa_debug = 1;
-
-	if (strstr(saved_command_line, "numa=off")) {
+	if (numa_enabled == 0) {
 		printk(KERN_WARNING "NUMA disabled by user\n");
 		return -1;
 	}
@@ -587,3 +586,18 @@
 							start_pfn, zholes_size);
 	}
 }
+
+static int __init early_numa(char *p)
+{
+	if (!p)
+		return 0;
+
+	if (strstr(p, "off"))
+		numa_enabled = 0;
+
+	if (strstr(p, "debug"))
+		numa_debug = 1;
+
+	return 0;
+}
+early_param("numa", early_numa);
diff -Nru a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S
--- a/arch/ppc64/mm/slb_low.S	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/mm/slb_low.S	2004-09-15 20:29:40 -07:00
@@ -68,19 +68,19 @@
 	srdi	r3,r3,28		/* get esid */
 	cmpldi	cr7,r9,0xc		/* cmp KERNELBASE for later use */
 
-	/* r9 = region, r3 = esid, cr7 = <>KERNELBASE */
-
-	rldicr.	r11,r3,32,16
-	bne-	8f			/* invalid ea bits set */
-	addi	r11,r9,-1
-	cmpldi	r11,0xb
-	blt-	8f			/* invalid region */
+	rldimi	r10,r3,28,0		/* r10= ESID<<28 | entry */
+	oris	r10,r10,SLB_ESID_V@h	/* r10 |= SLB_ESID_V */
 
-	/* r9 = region, r3 = esid, r10 = entry, cr7 = <>KERNELBASE */
+	/* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
 
 	blt	cr7,0f			/* user or kernel? */
 
-	/* kernel address */
+	/* kernel address: proto-VSID = ESID */
+	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
+	 * this code will generate the protoVSID 0xfffffffff for the
+	 * top segment.  That's ok, the scramble below will translate
+	 * it to VSID 0, which is reserved as a bad VSID - one which
+	 * will never have any pages in it.  */
 	li	r11,SLB_VSID_KERNEL
 BEGIN_FTR_SECTION
 	bne	cr7,9f
@@ -88,8 +88,12 @@
 END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
 	b	9f
 
-0:	/* user address */
+0:	/* user address: proto-VSID = context<<15 | ESID */
 	li	r11,SLB_VSID_USER
+
+	srdi.	r9,r3,13
+	bne-	8f			/* invalid ea bits set */
+
 #ifdef CONFIG_HUGETLB_PAGE
 BEGIN_FTR_SECTION
 	/* check against the hugepage ranges */
@@ -111,33 +115,18 @@
 #endif /* CONFIG_HUGETLB_PAGE */
 
 6:	ld	r9,PACACONTEXTID(r13)
+	rldimi	r3,r9,USER_ESID_BITS,0
 
-9:	/* r9 = "context", r3 = esid, r11 = flags, r10 = entry */
-
-	rldimi	r9,r3,15,0		/* r9= VSID ordinal */
-
-7:	rldimi	r10,r3,28,0		/* r10= ESID<<28 | entry */
-	oris	r10,r10,SLB_ESID_V@h	/* r10 |= SLB_ESID_V */
-
-	/* r9 = ordinal, r3 = esid, r11 = flags, r10 = esid_data */
-
-	li	r3,VSID_RANDOMIZER@higher
-	sldi	r3,r3,32
-	oris	r3,r3,VSID_RANDOMIZER@h
-	ori	r3,r3,VSID_RANDOMIZER@l
-
-	mulld	r9,r3,r9		/* r9 = ordinal * VSID_RANDOMIZER */
-	clrldi	r9,r9,28		/* r9 &= VSID_MASK */
-	sldi	r9,r9,SLB_VSID_SHIFT	/* r9 <<= SLB_VSID_SHIFT */
-	or	r9,r9,r11		/* r9 |= flags */
+9:	/* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
+	ASM_VSID_SCRAMBLE(r3,r9)
 
-	/* r9 = vsid_data, r10 = esid_data, cr7 = <>KERNELBASE */
+	rldimi	r11,r3,SLB_VSID_SHIFT,16	/* combine VSID and flags */
 
 	/*
 	 * No need for an isync before or after this slbmte. The exception
 	 * we enter with and the rfid we exit with are context synchronizing.
 	 */
-	slbmte	r9,r10
+	slbmte	r11,r10
 
 	bgelr	cr7			/* we're done for kernel addresses */
 
@@ -160,6 +149,6 @@
 	blr
 
 8:	/* invalid EA */
-	li	r9,0			/* 0 VSID ordinal -> BAD_VSID */
+	li	r3,0			/* BAD_VSID */
 	li	r11,SLB_VSID_USER	/* flags don't much matter */
-	b	7b
+	b	9b
diff -Nru a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
--- a/arch/ppc64/mm/stab.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/mm/stab.c	2004-09-15 20:29:40 -07:00
@@ -115,15 +115,11 @@
 	unsigned char stab_entry;
 	unsigned long offset;
 
-	/* Check for invalid effective addresses. */
-	if (!IS_VALID_EA(ea))
-		return 1;
-
 	/* Kernel or user address? */
 	if (ea >= KERNELBASE) {
 		vsid = get_kernel_vsid(ea);
 	} else {
-		if (! mm)
+		if ((ea >= TASK_SIZE_USER64) || (! mm))
 			return 1;
 
 		vsid = get_vsid(mm->context.id, ea);
diff -Nru a/arch/ppc64/oprofile/op_model_rs64.c b/arch/ppc64/oprofile/op_model_rs64.c
--- a/arch/ppc64/oprofile/op_model_rs64.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/oprofile/op_model_rs64.c	2004-09-15 20:29:40 -07:00
@@ -21,8 +21,8 @@
 
 static void ctrl_write(unsigned int i, unsigned int val)
 {
-	unsigned int tmp;
-	unsigned long shift, mask;
+	unsigned int tmp = 0;
+	unsigned long shift = 0, mask = 0;
 
 	dbg("ctrl_write %d %x\n", i, val);
 
diff -Nru a/arch/ppc64/xmon/xmon.c b/arch/ppc64/xmon/xmon.c
--- a/arch/ppc64/xmon/xmon.c	2004-09-15 20:29:40 -07:00
+++ b/arch/ppc64/xmon/xmon.c	2004-09-15 20:29:40 -07:00
@@ -2059,7 +2059,7 @@
 {
 	int nr, dotted;
 	unsigned long first_adr;
-	unsigned long inst, last_inst;
+	unsigned long inst, last_inst = 0;
 	unsigned char val[4];
 
 	dotted = 0;
diff -Nru a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
--- a/arch/sparc64/kernel/auxio.c	2004-09-15 20:29:40 -07:00
+++ b/arch/sparc64/kernel/auxio.c	2004-09-15 20:29:40 -07:00
@@ -17,7 +17,7 @@
 #include <asm/auxio.h>
 
 /* This cannot be static, as it is referenced in entry.S */
-unsigned long auxio_register = 0UL;
+void __iomem *auxio_register = 0UL;
 
 enum auxio_type {
 	AUXIO_TYPE_NODEV,
@@ -30,7 +30,7 @@
 
 static void __auxio_sbus_set(u8 bits_on, u8 bits_off)
 {
-	if(auxio_register) {
+	if (auxio_register) {
 		unsigned char regval;
 		unsigned long flags;
 		unsigned char newval;
@@ -49,7 +49,7 @@
 
 static void __auxio_ebus_set(u8 bits_on, u8 bits_off)
 {
-	if(auxio_register) {
+	if (auxio_register) {
 		unsigned char regval;
 		unsigned long flags;
 		unsigned char newval;
@@ -126,7 +126,8 @@
 	if (sdev) {
 		auxio_devtype  = AUXIO_TYPE_SBUS;
 		auxio_register = sbus_ioremap(&sdev->resource[0], 0,
-		  		sdev->reg_addrs[0].reg_size, "auxiliaryIO");
+					      sdev->reg_addrs[0].reg_size,
+					      "auxiliaryIO");
 	}
 #ifdef CONFIG_PCI
 	else {
@@ -142,7 +143,7 @@
 	ebus_done:
 		if (edev) {
 			auxio_devtype  = AUXIO_TYPE_EBUS;
-			auxio_register = (unsigned long)
+			auxio_register =
 				ioremap(edev->resource[0].start, sizeof(u32));
 		}
 	}
diff -Nru a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
--- a/arch/sparc64/kernel/pci_schizo.c	2004-09-15 20:29:40 -07:00
+++ b/arch/sparc64/kernel/pci_schizo.c	2004-09-15 20:29:40 -07:00
@@ -1171,7 +1171,7 @@
 		prom_halt();
 	}
 	bucket = __bucket(irq);
-	tmp = readl(bucket->imap);
+	tmp = upa_readl(bucket->imap);
 	upa_writel(tmp, (pbm->pbm_regs +
 			 schizo_imap_offset(SCHIZO_UE_INO) + 4));
 
@@ -1309,7 +1309,7 @@
 		prom_halt();
 	}
 	bucket = __bucket(irq);
-	tmp = readl(bucket->imap);
+	tmp = upa_readl(bucket->imap);
 	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
 
 	pbm = pbm_for_ino(p, SCHIZO_CE_INO);
diff -Nru a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
--- a/arch/sparc64/kernel/power.c	2004-09-15 20:29:40 -07:00
+++ b/arch/sparc64/kernel/power.c	2004-09-15 20:29:40 -07:00
@@ -27,7 +27,7 @@
 int scons_pwroff = 1; 
 
 #ifdef CONFIG_PCI
-static unsigned long power_reg = 0UL;
+static void __iomem *power_reg;
 
 static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
 static int button_pressed;
@@ -52,7 +52,7 @@
 {
 	if (!serial_console || scons_pwroff) {
 #ifdef CONFIG_PCI
-		if (power_reg != 0UL) {
+		if (power_reg) {
 			/* Both register bits seem to have the
 			 * same effect, so until I figure out
 			 * what the difference is...
@@ -130,8 +130,8 @@
 	return;
 
 found:
-	power_reg = (unsigned long)ioremap(edev->resource[0].start, 0x4);
-	printk("power: Control reg at %016lx ... ", power_reg);
+	power_reg = ioremap(edev->resource[0].start, 0x4);
+	printk("power: Control reg at %p ... ", power_reg);
 	poweroff_method = machine_halt;  /* able to use the standard halt */
 	if (has_button_interrupt(edev)) {
 		if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
diff -Nru a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
--- a/arch/sparc64/lib/Makefile	2004-09-15 20:29:40 -07:00
+++ b/arch/sparc64/lib/Makefile	2004-09-15 20:29:40 -07:00
@@ -12,7 +12,7 @@
 	 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
 	 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
 	 copy_in_user.o user_fixup.o memmove.o \
-	 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+	 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o iomap.o
 
 lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
 lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff -Nru a/arch/sparc64/lib/iomap.c b/arch/sparc64/lib/iomap.c
--- /dev/null	Wed Dec 31 16:00:00 196900
+++ b/arch/sparc64/lib/iomap.c	2004-09-15 20:29:40 -07:00
@@ -0,0 +1,48 @@
+/*
+ * Implement the sparc64 iomap interfaces
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Create a virtual mapping cookie for an IO port range */
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	return (void __iomem *) (unsigned long) port;
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+	unsigned long start = pci_resource_start(dev, bar);
+	unsigned long len = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+	if (!len || !start)
+		return NULL;
+	if (maxlen && len > maxlen)
+		len = maxlen;
+	if (flags & IORESOURCE_IO)
+		return ioport_map(start, len);
+	if (flags & IORESOURCE_MEM) {
+		if (flags & IORESOURCE_CACHEABLE)
+			return ioremap(start, len);
+		return ioremap_nocache(start, len);
+	}
+	/* What? */
+	return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	/* nothing to do */
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff -Nru a/arch/um/Kconfig b/arch/um/Kconfig
--- a/arch/um/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/arch/um/Kconfig	2004-09-15 20:29:40 -07:00
@@ -132,8 +132,9 @@
 config HOST_2G_2G
 	bool "2G/2G host address space split"
 
-config UML_SMP
+config SMP
 	bool "Symmetric multi-processing support"
+	default n
 	help
         This option enables UML SMP support.  UML implements virtual SMP by
         allowing as many processes to run simultaneously on the host as
@@ -144,10 +145,6 @@
         on the host scheduler.
         CONFIG_SMP will be set to whatever this option is set to.
         It is safe to leave this unchanged.
-
-config SMP
-	bool
-	default UML_SMP
 
 config NR_CPUS
 	int "Maximum number of CPUs (2-32)"
diff -Nru a/arch/um/Kconfig_char b/arch/um/Kconfig_char
--- a/arch/um/Kconfig_char	2004-09-15 20:29:40 -07:00
+++ b/arch/um/Kconfig_char	2004-09-15 20:29:40 -07:00
@@ -158,11 +158,6 @@
 	  When not in use, each legacy PTY occupies 12 bytes on 32-bit
 	  architectures and 24 bytes on 64-bit architectures.
 
-#config UNIX98_PTY_COUNT
-#	int "Maximum number of Unix98 PTYs in use (0-2048)"
-#	depends on UNIX98_PTYS
-#	default "256"
-
 config WATCHDOG
 	bool "Watchdog Timer Support"
 
diff -Nru a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h
--- a/arch/um/include/irq_user.h	2004-09-15 20:29:40 -07:00
+++ b/arch/um/include/irq_user.h	2004-09-15 20:29:40 -07:00
@@ -14,6 +14,7 @@
 extern void free_irq_by_fd(int fd);
 extern void reactivate_fd(int fd, int irqnum);
 extern void deactivate_fd(int fd, int irqnum);
+extern int deactivate_all_fds(void);
 extern void forward_interrupts(int pid);
 extern void init_irq_signals(int on_sigstack);
 extern void forward_ipi(int fd, int pid);
diff -Nru a/arch/um/include/os.h b/arch/um/include/os.h
--- a/arch/um/include/os.h	2004-09-15 20:29:40 -07:00
+++ b/arch/um/include/os.h	2004-09-15 20:29:40 -07:00
@@ -140,6 +140,7 @@
 extern int os_file_modtime(char *file, unsigned long *modtime);
 extern int os_pipe(int *fd, int stream, int close_on_exec);
 extern int os_set_fd_async(int fd, int owner);
+extern int os_clear_fd_async(int fd);
 extern int os_set_fd_block(int fd, int blocking);
 extern int os_accept_connection(int fd);
 extern int os_create_unix_socket(char *file, int len, int close_on_exec);
diff -Nru a/arch/um/include/time_user.h b/arch/um/include/time_user.h
--- a/arch/um/include/time_user.h	2004-09-15 20:29:40 -07:00
+++ b/arch/um/include/time_user.h	2004-09-15 20:29:40 -07:00
@@ -11,6 +11,7 @@
 extern void set_interval(int timer_type);
 extern void idle_sleep(int secs);
 extern void enable_timer(void);
+extern void disable_timer(void);
 extern unsigned long time_lock(void);
 extern void time_unlock(unsigned long);
 
diff -Nru a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c
--- a/arch/um/kernel/irq_user.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/irq_user.c	2004-09-15 20:29:40 -07:00
@@ -364,6 +364,20 @@
 	irq_unlock(flags);
 }
 
+int deactivate_all_fds(void)
+{
+	struct irq_fd *irq;
+	int err;
+
+	for(irq=active_fds;irq != NULL;irq = irq->next){
+		err = os_clear_fd_async(irq->fd);
+		if(err)
+			return(err);
+	}
+
+	return(0);
+}
+
 void forward_ipi(int fd, int pid)
 {
 	int err;
diff -Nru a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
--- a/arch/um/kernel/physmem.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/physmem.c	2004-09-15 20:29:40 -07:00
@@ -4,7 +4,7 @@
  */
 
 #include "linux/mm.h"
-#include "linux/ghash.h"
+#include "linux/rbtree.h"
 #include "linux/slab.h"
 #include "linux/vmalloc.h"
 #include "linux/bootmem.h"
@@ -19,36 +19,8 @@
 #include "kern.h"
 #include "init.h"
 
-#if 0
-static pgd_t physmem_pgd[PTRS_PER_PGD];
-
-static struct phys_desc *lookup_mapping(void *addr)
-{
-	pgd = &physmem_pgd[pgd_index(addr)];
-	if(pgd_none(pgd))
-		return(NULL);
-
-	pmd = pmd_offset(pgd, addr);
-	if(pmd_none(pmd))
-		return(NULL);
-
-	pte = pte_offset_kernel(pmd, addr);
-	return((struct phys_desc *) pte_val(pte));
-}
-
-static struct add_mapping(void *addr, struct phys_desc *new)
-{
-}
-#endif
-
-#define PHYS_HASHSIZE (8192)
-
-struct phys_desc;
-
-DEF_HASH_STRUCTS(virtmem, PHYS_HASHSIZE, struct phys_desc);
-
 struct phys_desc {
-	struct virtmem_ptrs virt_ptrs;
+	struct rb_node rb;
 	int fd;
 	__u64 offset;
 	void *virt;
@@ -56,21 +28,48 @@
 	struct list_head list;
 };
 
-struct virtmem_table virtmem_hash;
+static struct rb_root phys_mappings = RB_ROOT;
 
-static int virt_cmp(void *virt1, void *virt2)
+static struct rb_node **find_rb(void *virt)
 {
-	return(virt1 != virt2);
+	struct rb_node **n = &phys_mappings.rb_node;
+	struct phys_desc *d;
+
+	while(*n != NULL){
+		d = rb_entry(n, struct phys_desc, rb);
+		if(d->virt == virt)
+			return(n);
+
+		if(d->virt > virt)
+			n = &(*n)->rb_left;
+		else
+			n = &(*n)->rb_right;
+	}
+
+	return(n);
 }
 
-static int virt_hash(void *virt)
+static struct phys_desc *find_phys_mapping(void *virt)
 {
-	unsigned long addr = ((unsigned long) virt) >> PAGE_SHIFT;
-	return(addr % PHYS_HASHSIZE);
+	struct rb_node **n = find_rb(virt);
+
+	if(*n == NULL)
+		return(NULL);
+
+	return(rb_entry(n, struct phys_desc, rb));
 }
 
-DEF_HASH(static, virtmem, struct phys_desc, virt_ptrs, void *, virt, virt_cmp,
-	 virt_hash);
+static void insert_phys_mapping(struct phys_desc *desc)
+{
+	struct rb_node **n = find_rb(desc->virt);
+
+	if(*n != NULL)
+		panic("Physical remapping for %p already present",
+		      desc->virt);
+
+	rb_link_node(&desc->rb, (*n)->rb_parent, n);
+	rb_insert_color(&desc->rb, &phys_mappings);
+}
 
 LIST_HEAD(descriptor_mappings);
 
@@ -127,7 +126,8 @@
 		return(-ENOMEM);
 
 	phys = __pa(virt);
-	if(find_virtmem_hash(&virtmem_hash, virt) != NULL)
+	desc = find_phys_mapping(virt);
+  	if(desc != NULL)
 		panic("Address 0x%p is already substituted\n", virt);
 
 	err = -ENOMEM;
@@ -136,13 +136,12 @@
 		goto out;
 
 	*desc = ((struct phys_desc)
-		{ .virt_ptrs =	{ NULL, NULL },
-		  .fd =		fd,
+		{ .fd =			fd,
 		  .offset =		offset,
 		  .virt =		virt,
 		  .phys =		__pa(virt),
 		  .list = 		LIST_HEAD_INIT(desc->list) });
-	insert_virtmem_hash(&virtmem_hash, desc);
+	insert_phys_mapping(desc);
 
 	list_add(&desc->list, &fd_maps->pages);
 
@@ -151,7 +150,7 @@
 	if(!err)
 		goto out;
 
-	remove_virtmem_hash(&virtmem_hash, desc);
+	rb_erase(&desc->rb, &phys_mappings);
 	kfree(desc);
  out:
 	return(err);
@@ -164,7 +163,7 @@
 	void *virt = desc->virt;
 	int err;
 
-	remove_virtmem_hash(&virtmem_hash, desc);
+	rb_erase(&desc->rb, &phys_mappings);
 	list_del(&desc->list);
 	kfree(desc);
 
@@ -179,7 +178,7 @@
 	struct phys_desc *desc;
 
 	virt = (void *) ((unsigned long) virt & PAGE_MASK);
-	desc = find_virtmem_hash(&virtmem_hash, virt);
+	desc = find_phys_mapping(virt);
 	if(desc == NULL)
 		return(0);
 
@@ -234,7 +233,9 @@
 
 int is_remapped(void *virt)
 {
-	return(find_virtmem_hash(&virtmem_hash, virt) != NULL);
+  	struct phys_desc *desc = find_phys_mapping(virt);
+
+	return(desc != NULL);
 }
 
 /* Changed during early boot */
@@ -367,8 +368,7 @@
 
 int phys_mapping(unsigned long phys, __u64 *offset_out)
 {
-	struct phys_desc *desc = find_virtmem_hash(&virtmem_hash,
-						   __va(phys & PAGE_MASK));
+	struct phys_desc *desc = find_phys_mapping(__va(phys & PAGE_MASK));
 	int fd = -1;
 
 	if(desc != NULL){
diff -Nru a/arch/um/kernel/process.c b/arch/um/kernel/process.c
--- a/arch/um/kernel/process.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/process.c	2004-09-15 20:29:40 -07:00
@@ -46,7 +46,7 @@
 	int flags = 0, pages;
 
 	if(sig_stack != NULL){
-		pages = (1 << UML_CONFIG_KERNEL_STACK_ORDER) - 2;
+		pages = (1 << UML_CONFIG_KERNEL_STACK_ORDER);
 		set_sigstack(sig_stack, pages * page_size());
 		flags = SA_ONSTACK;
 	}
diff -Nru a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
--- a/arch/um/kernel/process_kern.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/process_kern.c	2004-09-15 20:29:40 -07:00
@@ -166,8 +166,6 @@
 		struct pt_regs *regs)
 {
 	p->thread = (struct thread_struct) INIT_THREAD;
-	p->thread.kernel_stack = 
-		(unsigned long) p->thread_info + 2 * PAGE_SIZE;
 	return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr, 
 				clone_flags, sp, stack_top, p, regs));
 }
@@ -327,8 +325,7 @@
 	unsigned long stack;
 
 	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
-	stack += 2 * PAGE_SIZE;
-	return(stack != current->thread.kernel_stack);
+	return(stack != (unsigned long) current_thread);
 }
 
 extern void remove_umid_dir(void);
diff -Nru a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
--- a/arch/um/kernel/skas/process_kern.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/skas/process_kern.c	2004-09-15 20:29:40 -07:00
@@ -191,8 +191,7 @@
 		handler = new_thread_handler;
 	}
 
-	new_thread((void *) p->thread.kernel_stack, 
-		   &p->thread.mode.skas.switch_buf, 
+	new_thread(p->thread_info, &p->thread.mode.skas.switch_buf,
 		   &p->thread.mode.skas.fork_buf, handler);
 	return(0);
 }
@@ -231,7 +230,7 @@
 
 	init_task.thread.request.u.thread.proc = start_kernel_proc;
 	init_task.thread.request.u.thread.arg = NULL;
-	return(start_idle_thread((void *) init_task.thread.kernel_stack,
+	return(start_idle_thread(init_task.thread_info,
 				 &init_task.thread.mode.skas.switch_buf,
 				 &init_task.thread.mode.skas.fork_buf));
 }
diff -Nru a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
--- a/arch/um/kernel/smp.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/smp.c	2004-09-15 20:29:40 -07:00
@@ -29,9 +29,11 @@
 #include "os.h"
 
 /* CPU online map, set by smp_boot_cpus */
-unsigned long cpu_online_map = CPU_MASK_NONE;
+cpumask_t cpu_online_map = CPU_MASK_NONE;
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
 
 EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(cpu_possible_map);
 
 /* Per CPU bogomips and other parameters
  * The only piece used here is the ipi pipe, which is set before SMP is
@@ -125,6 +127,10 @@
 	struct task_struct *idle;
 	unsigned long waittime;
 	int err, cpu, me = smp_processor_id();
+	int i;
+
+	for (i = 0; i < ncpus; ++i)
+		cpu_set(i, cpu_possible_map);
 
 	cpu_clear(me, cpu_online_map);
 	cpu_set(me, cpu_online_map);
diff -Nru a/arch/um/kernel/time.c b/arch/um/kernel/time.c
--- a/arch/um/kernel/time.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/time.c	2004-09-15 20:29:40 -07:00
@@ -54,6 +54,15 @@
 		       errno);
 }
 
+void disable_timer(void)
+{
+	struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
+	if((setitimer(ITIMER_VIRTUAL, &disable, NULL) < 0) ||
+	   (setitimer(ITIMER_REAL, &disable, NULL) < 0))
+		printk("disnable_timer - setitimer failed, errno = %d\n",
+		       errno);
+}
+
 void switch_timers(int to_real)
 {
 	struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
diff -Nru a/arch/um/kernel/tt/exec_kern.c b/arch/um/kernel/tt/exec_kern.c
--- a/arch/um/kernel/tt/exec_kern.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/tt/exec_kern.c	2004-09-15 20:29:40 -07:00
@@ -39,8 +39,7 @@
 		do_exit(SIGKILL);
 	}
 		
-	new_pid = start_fork_tramp((void *) current->thread.kernel_stack,
-				   stack, 0, exec_tramp);
+	new_pid = start_fork_tramp(current->thread_info, stack, 0, exec_tramp);
 	if(new_pid < 0){
 		printk(KERN_ERR 
 		       "flush_thread : new thread failed, errno = %d\n",
diff -Nru a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
--- a/arch/um/kernel/tt/process_kern.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/tt/process_kern.c	2004-09-15 20:29:40 -07:00
@@ -28,7 +28,7 @@
 
 void *switch_to_tt(void *prev, void *next, void *last)
 {
-	struct task_struct *from, *to;
+	struct task_struct *from, *to, *prev_sched;
 	unsigned long flags;
 	int err, vtalrm, alrm, prof, cpu;
 	char c;
@@ -72,6 +72,18 @@
 	if(err != sizeof(c))
 		panic("read of switch_pipe failed, errno = %d", -err);
 
+	/* If the process that we have just scheduled away from has exited,
+	 * then it needs to be killed here.  The reason is that, even though
+	 * it will kill itself when it next runs, that may be too late.  Its
+	 * stack will be freed, possibly before then, and if that happens,
+	 * we have a use-after-free situation.  So, it gets killed here
+	 * in case it has not already killed itself.
+	 */
+	prev_sched = current->thread.prev_sched;
+	if((prev_sched->state == TASK_ZOMBIE) ||
+	   (prev_sched->state == TASK_DEAD))
+		os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1);
+
 	/* This works around a nasty race with 'jail'.  If we are switching
 	 * between two threads of a threaded app and the incoming process 
 	 * runs before the outgoing process reaches the read, and it makes
@@ -248,8 +260,7 @@
 
 	clone_flags &= CLONE_VM;
 	p->thread.temp_stack = stack;
-	new_pid = start_fork_tramp((void *) p->thread.kernel_stack, stack,
-				   clone_flags, tramp);
+	new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp);
 	if(new_pid < 0){
 		printk(KERN_ERR "copy_thread : clone failed - errno = %d\n", 
 		       -new_pid);
@@ -412,7 +423,7 @@
 	protect_memory(start, end - start, 1, w, 1, 1);
 
 	start = (unsigned long) UML_ROUND_DOWN(&__bss_start);
-	end = (unsigned long) UML_ROUND_UP(&_end);
+	end = (unsigned long) UML_ROUND_UP(brk_start);
 	protect_memory(start, end - start, 1, w, 1, 1);
 
 	mprotect_kernel_vm(w);
@@ -501,9 +512,9 @@
 	void *sp;
 	int pages;
 
-	pages = (1 << CONFIG_KERNEL_STACK_ORDER) - 2;
-	sp = (void *) init_task.thread.kernel_stack + pages * PAGE_SIZE - 
-		sizeof(unsigned long);
+	pages = (1 << CONFIG_KERNEL_STACK_ORDER);
+	sp = (void *) ((unsigned long) init_task.thread_info) +
+		pages * PAGE_SIZE - sizeof(unsigned long);
 	return(tracer(start_kernel_proc, sp));
 }
 
diff -Nru a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
--- a/arch/um/kernel/um_arch.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/kernel/um_arch.c	2004-09-15 20:29:40 -07:00
@@ -306,7 +306,7 @@
 
 int linux_main(int argc, char **argv)
 {
-	unsigned long avail;
+	unsigned long avail, diff;
 	unsigned long virtmem_size, max_physmem;
 	unsigned int i, add;
 
@@ -324,6 +324,16 @@
 
 	brk_start = (unsigned long) sbrk(0);
 	CHOOSE_MODE_PROC(before_mem_tt, before_mem_skas, brk_start);
+	/* Increase physical memory size for exec-shield users
+	so they actually get what they asked for. This should
+	add zero for non-exec shield users */
+
+	diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+	if(diff > 1024 * 1024){
+		printf("Adding %ld bytes to physical memory to account for "
+		       "exec-shield gap\n", diff);
+		physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
+	}
 
 	uml_physmem = uml_start;
 
@@ -379,9 +389,6 @@
 		       virtmem_size);
 
   	uml_postsetup();
-
-	init_task.thread.kernel_stack = (unsigned long) &init_thread_info + 
-		2 * PAGE_SIZE;
 
 	task_protections((unsigned long) &init_thread_info);
 	os_flush_stdout();
diff -Nru a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
--- a/arch/um/os-Linux/file.c	2004-09-15 20:29:40 -07:00
+++ b/arch/um/os-Linux/file.c	2004-09-15 20:29:40 -07:00
@@ -495,6 +495,16 @@
 	return(0);
 }
 
+int os_clear_fd_async(int fd)
+{
+	int flags = fcntl(fd, F_GETFL);
+
+	flags &= ~(O_ASYNC | O_NONBLOCK);
+	if(fcntl(fd, F_SETFL, flags) < 0)
+		return(-errno);
+	return(0);
+}
+
 int os_set_fd_block(int fd, int blocking)
 {
 	int flags;
diff -Nru a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
--- a/arch/x86_64/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/arch/x86_64/Kconfig	2004-09-15 20:29:40 -07:00
@@ -78,6 +78,10 @@
 	bool
 	default y
 
+config GENERIC_IOMAP
+	bool
+	default y
+
 source "init/Kconfig"
 
 
diff -Nru a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
--- a/drivers/acpi/thermal.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/acpi/thermal.c	2004-09-15 20:29:40 -07:00
@@ -659,7 +659,7 @@
 	struct acpi_thermal	*tz = (struct acpi_thermal *) data;
 	unsigned long		sleep_time = 0;
 	int			i = 0;
-	struct acpi_thermal_state state = tz->state;
+	struct acpi_thermal_state state;
 
 	ACPI_FUNCTION_TRACE("acpi_thermal_check");
 
@@ -667,6 +667,8 @@
 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid (NULL) context.\n"));
 		return_VOID;
 	}
+
+	state = tz->state;
 
 	result = acpi_thermal_get_temperature(tz);
 	if (result)
diff -Nru a/drivers/block/amiflop.c b/drivers/block/amiflop.c
--- a/drivers/block/amiflop.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/block/amiflop.c	2004-09-15 20:29:40 -07:00
@@ -386,16 +386,6 @@
 	fd_select(drive);
 	udelay (1);
 	fd_deselect(drive);
-
-#ifdef MODULE
-/*
-  this is the last interrupt for any drive access, happens after
-  release (from floppy_off). So we have to wait until now to decrease
-  the use count.
-*/
-	if (decusecount)
-		MOD_DEC_USE_COUNT;
-#endif
 }
 
 static void floppy_off (unsigned int nr)
@@ -1590,10 +1580,6 @@
 	local_irq_save(flags);
 	fd_ref[drive]++;
 	fd_device[drive] = system;
-#ifdef MODULE
-	if (unit[drive].motor == 0)
-		MOD_INC_USE_COUNT;
-#endif
 	local_irq_restore(flags);
 
 	unit[drive].dtype=&data_types[system];
@@ -1839,6 +1825,7 @@
 	return amiga_floppy_init();
 }
 
+#if 0 /* not safe to unload */
 void cleanup_module(void)
 {
 	int i;
@@ -1859,4 +1846,5 @@
 	release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
 	unregister_blkdev(FLOPPY_MAJOR, "fd");
 }
+#endif
 #endif
diff -Nru a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
--- a/drivers/block/ll_rw_blk.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/block/ll_rw_blk.c	2004-09-15 20:29:40 -07:00
@@ -352,7 +352,7 @@
 		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
 	}
 
-	q->max_sectors = max_sectors;
+	q->max_sectors = q->max_hw_sectors = max_sectors;
 }
 
 EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -454,7 +454,8 @@
 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
 {
 	/* zero is "infinity" */
-	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+	t->max_sectors = t->max_hw_sectors =
+		min_not_zero(t->max_sectors,b->max_sectors);
 
 	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
 	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@@ -2583,11 +2584,11 @@
 			break;
 		}
 
-		if (unlikely(bio_sectors(bio) > q->max_sectors)) {
+		if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
 			printk("bio too big device %s (%u > %u)\n", 
 				bdevname(bio->bi_bdev, b),
 				bio_sectors(bio),
-				q->max_sectors);
+				q->max_hw_sectors);
 			goto end_io;
 		}
 
@@ -3206,13 +3207,61 @@
 	unsigned long ra_kb;
 	ssize_t ret = queue_var_store(&ra_kb, page, count);
 
+	spin_lock_irq(q->queue_lock);
 	if (ra_kb > (q->max_sectors >> 1))
 		ra_kb = (q->max_sectors >> 1);
 
 	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+	spin_unlock_irq(q->queue_lock);
+
+	return ret;
+}
+
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+	int max_sectors_kb = q->max_sectors >> 1;
+
+	return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+	unsigned long max_sectors_kb,
+			max_hw_sectors_kb = q->max_hw_sectors >> 1,
+			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+	int ra_kb;
+
+	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+		return -EINVAL;
+	/*
+	 * Take the queue lock to update the readahead and max_sectors
+	 * values synchronously:
+	 */
+	spin_lock_irq(q->queue_lock);
+	/*
+	 * Trim readahead window as well, if necessary:
+	 */
+	ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+	if (ra_kb > max_sectors_kb)
+		q->backing_dev_info.ra_pages =
+				max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
+
+	q->max_sectors = max_sectors_kb << 1;
+	spin_unlock_irq(q->queue_lock);
+
 	return ret;
 }
 
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+
+	return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_requests_show,
@@ -3225,9 +3274,22 @@
 	.store = queue_ra_store,
 };
 
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_max_sectors_show,
+	.store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+	.show = queue_max_hw_sectors_show,
+};
+
 static struct attribute *default_attrs[] = {
 	&queue_requests_entry.attr,
 	&queue_ra_entry.attr,
+	&queue_max_hw_sectors_entry.attr,
+	&queue_max_sectors_entry.attr,
 	NULL,
 };
 
diff -Nru a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
--- a/drivers/cdrom/cdrom.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/cdrom/cdrom.c	2004-09-15 20:29:40 -07:00
@@ -1933,7 +1933,8 @@
 			       int lba, int nframes)
 {
 	struct packet_command cgc;
-	int nr, ret;
+	int ret = 0;
+	int nr;
 
 	cdi->last_sense = 0;
 
@@ -1955,8 +1956,8 @@
 		return -ENOMEM;
 
 	if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
-		kfree(cgc.buffer);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out;
 	}
 
 	cgc.data_direction = CGC_DATA_READ;
@@ -1967,13 +1968,17 @@
 		ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
 		if (ret)
 			break;
-		__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr);
+		if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
+			ret = -EFAULT;
+			break;
+		}
 		ubuf += CD_FRAMESIZE_RAW * nr;
 		nframes -= nr;
 		lba += nr;
 	}
+out:
 	kfree(cgc.buffer);
-	return 0;
+	return ret;
 }
 
 static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
diff -Nru a/drivers/char/Kconfig b/drivers/char/Kconfig
--- a/drivers/char/Kconfig	2004-09-15 20:29:40 -07:00
+++ b/drivers/char/Kconfig	2004-09-15 20:29:40 -07:00
@@ -59,7 +59,7 @@
 
 config HW_CONSOLE
 	bool
-	depends on VT && !S390 && !UM
+	depends on VT && !S390 && !USERMODE
 	default y
 
 config SERIAL_NONSTANDARD
diff -Nru a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
--- a/drivers/char/hvc_console.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/char/hvc_console.c	2004-09-15 20:29:40 -07:00
@@ -801,7 +801,7 @@
 void hvc_console_print(struct console *co, const char *b, unsigned count)
 {
 	char c[16] __ALIGNED__;
-	unsigned i, n = 0;
+	unsigned i = 0, n = 0;
 	int r, donecr = 0;
 
 	/* Console access attempt outside of acceptable console range. */
diff -Nru a/drivers/char/hvcs.c b/drivers/char/hvcs.c
--- a/drivers/char/hvcs.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/char/hvcs.c	2004-09-15 20:29:40 -07:00
@@ -119,8 +119,16 @@
  * Rearranged hvcs_close().  Cleaned up some printks and did some housekeeping
  * on the changelog.  Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from
  * arch/ppc64/hvcserver.h.
+ *
+ * 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to
+ * prevent possible lockup with realtime scheduling as similarily pointed out by
+ * akpm in hvc_console.  Changed resulted in the removal of hvcs_final_close()
+ * to reorder cleanup operations and prevent discarding of pending data during
+ * an hvcs_close().  Removed spinlock protection of hvcs_struct data members in
+ * hvcs_write_room() and hvcs_chars_in_buffer() because they aren't needed.
  */
-#define HVCS_DRIVER_VERSION "1.3.2"
+
+#define HVCS_DRIVER_VERSION "1.3.3"
 
 MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
 MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver");
@@ -128,6 +136,12 @@
 MODULE_VERSION(HVCS_DRIVER_VERSION);
 
 /*
+ * Wait this long per iteration while trying to push buffered data to the
+ * hypervisor before allowing the tty to complete a close operation.
+ */
+#define HVCS_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
+
+/*
  * Since the Linux TTY code does not currently (2-04-2004) support dynamic
  * addition of tty derived devices and we shouldn't allocate thousands of
  * tty_device pointers when the number of vty-server & vty partner connections
@@ -317,7 +331,6 @@
 
 static int hvcs_enable_device(struct hvcs_struct *hvcsd,
 		uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
-static void hvcs_final_close(struct hvcs_struct *hvcsd);
 
 static void destroy_hvcs_struct(struct kobject *kobj);
 static int hvcs_open(struct tty_struct *tty, struct file *filp);
@@ -427,7 +440,7 @@
 	struct tty_struct *tty;
 	char buf[HVCS_BUFF_LEN] __ALIGNED__;
 	unsigned long flags;
-	int got;
+	int got = 0;
 	int i;
 
 	spin_lock_irqsave(&hvcsd->lock, flags);
@@ -574,28 +587,6 @@
 	kfree(hvcsd);
 }
 
-/*
- * This function must be called with hvcsd->lock held.  Do not free the irq in
- * this function since it is called with the spinlock held.
- */
-static void hvcs_final_close(struct hvcs_struct *hvcsd)
-{
-	vio_disable_interrupts(hvcsd->vdev);
-
-	hvcsd->todo_mask = 0;
-
-	/* These two may be redundant if the operation was a close. */
-	if (hvcsd->tty) {
-		hvcsd->tty->driver_data = NULL;
-		hvcsd->tty = NULL;
-	}
-
-	hvcsd->open_count = 0;
-
-	memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
-	hvcsd->chars_in_buffer = 0;
-}
-
 static struct kobj_type hvcs_kobj_type = {
 	.release = destroy_hvcs_struct,
 };
@@ -692,8 +683,6 @@
 	return 0;
 }
 
-
-
 static int __devexit hvcs_remove(struct vio_dev *dev)
 {
 	struct hvcs_struct *hvcsd = dev->dev.driver_data;
@@ -945,7 +934,7 @@
  */
 struct hvcs_struct *hvcs_get_by_index(int index)
 {
-	struct hvcs_struct *hvcsd;
+	struct hvcs_struct *hvcsd = NULL;
 	unsigned long flags;
 
 	spin_lock(&hvcs_structs_lock);
@@ -1099,12 +1088,7 @@
 	kobjp = &hvcsd->kobj;
 	if (--hvcsd->open_count == 0) {
 
-		/*
-		 * This line is important because it tells hvcs_open that this
-		 * device needs to be re-configured the next time hvcs_open is
-		 * called.
-		 */
-		hvcsd->tty->driver_data = NULL;
+		vio_disable_interrupts(hvcsd->vdev);
 
 		/*
 		 * NULL this early so that the kernel_thread doesn't try to
@@ -1113,26 +1097,18 @@
 		 */
 		hvcsd->tty = NULL;
 
+		irq = hvcsd->vdev->irq;
+		spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+		tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
+
 		/*
-		 * Block the close until all the buffered data has been
-		 * delivered.
+		 * This line is important because it tells hvcs_open that this
+		 * device needs to be re-configured the next time hvcs_open is
+		 * called.
 		 */
-		while(hvcsd->chars_in_buffer) {
-			spin_unlock_irqrestore(&hvcsd->lock, flags);
-
-			/*
-			 * Give the kernel thread the hvcs_struct so that it can
-			 * try to deliver the remaining data but block the close
-			 * operation by spinning in this function so that other
-			 * tty operations have to wait.
-			 */
-			yield();
-			spin_lock_irqsave(&hvcsd->lock, flags);
-		}
+		tty->driver_data = NULL;
 
-		hvcs_final_close(hvcsd);
-		irq = hvcsd->vdev->irq;
-		spin_unlock_irqrestore(&hvcsd->lock, flags);
 		free_irq(irq, hvcsd);
 		kobject_put(kobjp);
 		return;
@@ -1162,12 +1138,25 @@
 	 * Don't kobject put inside the spinlock because the destruction
 	 * callback may use the spinlock and it may get called before the
 	 * spinlock has been released.  Get a pointer to the kobject and
-	 * kobject_put on that instead.
+	 * kobject_put on that after releasing the spinlock.
 	 */
 	kobjp = &hvcsd->kobj;
 
-	/* Calling this will drop any buffered data on the floor. */
-	hvcs_final_close(hvcsd);
+	vio_disable_interrupts(hvcsd->vdev);
+
+	hvcsd->todo_mask = 0;
+
+	/* I don't think the tty needs the hvcs_struct pointer after a hangup */
+	hvcsd->tty->driver_data = NULL;
+	hvcsd->tty = NULL;
+
+	hvcsd->open_count = 0;
+
+	/* This will drop any buffered data on the floor which is OK in a hangup
+	 * scenario. */
+	memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
+	hvcsd->chars_in_buffer = 0;
+
 	irq = hvcsd->vdev->irq;
 
 	spin_unlock_irqrestore(&hvcsd->lock, flags);
@@ -1323,28 +1312,18 @@
 static int hvcs_write_room(struct tty_struct *tty)
 {
 	struct hvcs_struct *hvcsd = tty->driver_data;
-	unsigned long flags;
-	int retval;
 
 	if (!hvcsd || hvcsd->open_count <= 0)
 		return 0;
 
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
+	return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
 }
 
 static int hvcs_chars_in_buffer(struct tty_struct *tty)
 {
 	struct hvcs_struct *hvcsd = tty->driver_data;
-	unsigned long flags;
-	int retval;
 
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = hvcsd->chars_in_buffer;
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
+	return hvcsd->chars_in_buffer;
 }
 
 static struct tty_operations hvcs_ops = {
@@ -1433,7 +1412,7 @@
 			" as a tty driver failed.\n");
 		hvcs_free_index_list();
 		put_tty_driver(hvcs_tty_driver);
-		return rc;
+		return -EIO;
 	}
 
 	hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
diff -Nru a/drivers/char/hvsi.c b/drivers/char/hvsi.c
--- a/drivers/char/hvsi.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/char/hvsi.c	2004-09-15 20:29:40 -07:00
@@ -1004,7 +1004,7 @@
 {
 	struct hvsi_struct *hp = tty->driver_data;
 	const char *source = buf;
-	char *kbuf;
+	char *kbuf = NULL;
 	unsigned long flags;
 	int total = 0;
 	int origcount = count;
diff -Nru a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
--- a/drivers/ide/pci/sis5513.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/ide/pci/sis5513.c	2004-09-15 20:29:40 -07:00
@@ -954,31 +954,25 @@
 	return;
 }
 
-#define DECLARE_SIS_DEV(name_str)					\
-	{								\
-		.name		= name_str,				\
-		.init_chipset	= init_chipset_sis5513,			\
-		.init_hwif	= init_hwif_sis5513,			\
-		.channels	= 2,					\
-		.autodma	= NOAUTODMA,				\
-		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},	\
-		.bootable	= ON_BOARD,				\
-	}
-
-static ide_pci_device_t sis5513_chipsets[] __devinitdata = {
-	/* 0 */ DECLARE_SIS_DEV("SIS5513"),
-	/* 1 */ DECLARE_SIS_DEV("SIS5518")
+static ide_pci_device_t sis5513_chipset __devinitdata = {
+	.name		= "SIS5513",
+	.init_chipset	= init_chipset_sis5513,
+	.init_hwif	= init_hwif_sis5513,
+	.channels	= 2,
+	.autodma	= NOAUTODMA,
+	.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+	.bootable	= ON_BOARD,
 };
 
 static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
-	ide_setup_pci_device(dev, &sis5513_chipsets[id->driver_data]);
+	ide_setup_pci_device(dev, &sis5513_chipset);
 	return 0;
 }
 
 static struct pci_device_id sis5513_pci_tbl[] = {
 	{ PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
-	{ PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5518, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+	{ PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5518, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ 0, },
 };
 MODULE_DEVICE_TABLE(pci, sis5513_pci_tbl);
diff -Nru a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
--- a/drivers/macintosh/via-pmu.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/macintosh/via-pmu.c	2004-09-15 20:29:40 -07:00
@@ -1446,7 +1446,7 @@
 pmu_sr_intr(struct pt_regs *regs)
 {
 	struct adb_request *req;
-	int bite;
+	int bite = 0;
 
 	if (via[B] & TREQ) {
 		printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
diff -Nru a/drivers/md/dm-table.c b/drivers/md/dm-table.c
--- a/drivers/md/dm-table.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/md/dm-table.c	2004-09-15 20:29:40 -07:00
@@ -825,7 +825,7 @@
 	 * Make sure we obey the optimistic sub devices
 	 * restrictions.
 	 */
-	q->max_sectors = t->limits.max_sectors;
+	blk_queue_max_sectors(q, t->limits.max_sectors);
 	q->max_phys_segments = t->limits.max_phys_segments;
 	q->max_hw_segments = t->limits.max_hw_segments;
 	q->hardsect_size = t->limits.hardsect_size;
diff -Nru a/drivers/md/linear.c b/drivers/md/linear.c
--- a/drivers/md/linear.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/md/linear.c	2004-09-15 20:29:40 -07:00
@@ -157,7 +157,7 @@
 		 */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		disk->size = rdev->size;
 		mddev->array_size += rdev->size;
diff -Nru a/drivers/md/md.c b/drivers/md/md.c
--- a/drivers/md/md.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/md/md.c	2004-09-15 20:29:40 -07:00
@@ -2895,7 +2895,7 @@
 	return thread;
 }
 
-void md_interrupt_thread(mdk_thread_t *thread)
+static void md_interrupt_thread(mdk_thread_t *thread)
 {
 	if (!thread->tsk) {
 		MD_BUG();
@@ -3797,6 +3797,5 @@
 EXPORT_SYMBOL(md_unregister_thread);
 EXPORT_SYMBOL(md_wakeup_thread);
 EXPORT_SYMBOL(md_print_devices);
-EXPORT_SYMBOL(md_interrupt_thread);
 EXPORT_SYMBOL(md_check_recovery);
 MODULE_LICENSE("GPL");
diff -Nru a/drivers/md/multipath.c b/drivers/md/multipath.c
--- a/drivers/md/multipath.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/md/multipath.c	2004-09-15 20:29:40 -07:00
@@ -325,7 +325,7 @@
 		 */
 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-				mddev->queue->max_sectors = (PAGE_SIZE>>9);
+				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 			conf->working_disks++;
 			rdev->raid_disk = path;
@@ -479,7 +479,7 @@
 		 * a merge_bvec_fn to be involved in multipath */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		if (!rdev->faulty) 
 			conf->working_disks++;
diff -Nru a/drivers/md/raid0.c b/drivers/md/raid0.c
--- a/drivers/md/raid0.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/md/raid0.c	2004-09-15 20:29:40 -07:00
@@ -162,7 +162,7 @@
 
 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		if (!smallest || (rdev1->size <smallest->size))
 			smallest = rdev1;
diff -Nru a/drivers/md/raid1.c b/drivers/md/raid1.c
--- a/drivers/md/raid1.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/md/raid1.c	2004-09-15 20:29:40 -07:00
@@ -753,7 +753,7 @@
 			 */
 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-				mddev->queue->max_sectors = (PAGE_SIZE>>9);
+				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 			p->head_position = 0;
 			rdev->raid_disk = mirror;
@@ -1196,7 +1196,7 @@
 		 */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		disk->head_position = 0;
 		if (!rdev->faulty && rdev->in_sync)
diff -Nru a/drivers/net/b44.c b/drivers/net/b44.c
--- a/drivers/net/b44.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/net/b44.c	2004-09-15 20:29:40 -07:00
@@ -98,13 +98,24 @@
 static void b44_init_rings(struct b44 *);
 static void b44_init_hw(struct b44 *);
 
+static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+{
+	return readl(bp->regs + reg);
+}
+
+static inline void bw32(const struct b44 *bp, 
+			unsigned long reg, unsigned long val)
+{
+	writel(val, bp->regs + reg);
+}
+
 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
 			u32 bit, unsigned long timeout, const int clear)
 {
 	unsigned long i;
 
 	for (i = 0; i < timeout; i++) {
-		u32 val = br32(reg);
+		u32 val = br32(bp, reg);
 
 		if (clear && !(val & bit))
 			break;
@@ -168,7 +179,7 @@
 
 static u32 ssb_get_core_rev(struct b44 *bp)
 {
-	return (br32(B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
+	return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
 }
 
 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
@@ -180,13 +191,13 @@
 			       ssb_get_addr(bp, SBID_REG_PCI, 0));
 	pci_rev = ssb_get_core_rev(bp);
 
-	val = br32(B44_SBINTVEC);
+	val = br32(bp, B44_SBINTVEC);
 	val |= cores;
-	bw32(B44_SBINTVEC, val);
+	bw32(bp, B44_SBINTVEC, val);
 
-	val = br32(SSB_PCI_TRANS_2);
+	val = br32(bp, SSB_PCI_TRANS_2);
 	val |= SSB_PCI_PREF | SSB_PCI_BURST;
-	bw32(SSB_PCI_TRANS_2, val);
+	bw32(bp, SSB_PCI_TRANS_2, val);
 
 	pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
 
@@ -195,18 +206,18 @@
 
 static void ssb_core_disable(struct b44 *bp)
 {
-	if (br32(B44_SBTMSLOW) & SBTMSLOW_RESET)
+	if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
 		return;
 
-	bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
+	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
 	b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
 	b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
-	bw32(B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
+	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
 			    SBTMSLOW_REJECT | SBTMSLOW_RESET));
-	br32(B44_SBTMSLOW);
+	br32(bp, B44_SBTMSLOW);
 	udelay(1);
-	bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
-	br32(B44_SBTMSLOW);
+	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
+	br32(bp, B44_SBTMSLOW);
 	udelay(1);
 }
 
@@ -215,31 +226,31 @@
 	u32 val;
 
 	ssb_core_disable(bp);
-	bw32(B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
-	br32(B44_SBTMSLOW);
+	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
+	br32(bp, B44_SBTMSLOW);
 	udelay(1);
 
 	/* Clear SERR if set, this is a hw bug workaround.  */
-	if (br32(B44_SBTMSHIGH) & SBTMSHIGH_SERR)
-		bw32(B44_SBTMSHIGH, 0);
+	if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
+		bw32(bp, B44_SBTMSHIGH, 0);
 
-	val = br32(B44_SBIMSTATE);
+	val = br32(bp, B44_SBIMSTATE);
 	if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
-		bw32(B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
+		bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
 
-	bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
-	br32(B44_SBTMSLOW);
+	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
+	br32(bp, B44_SBTMSLOW);
 	udelay(1);
 
-	bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK));
-	br32(B44_SBTMSLOW);
+	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
+	br32(bp, B44_SBTMSLOW);
 	udelay(1);
 }
 
 static int ssb_core_unit(struct b44 *bp)
 {
 #if 0
-	u32 val = br32(B44_SBADMATCH0);
+	u32 val = br32(bp, B44_SBADMATCH0);
 	u32 base;
 
 	type = val & SBADMATCH0_TYPE_MASK;
@@ -263,7 +274,7 @@
 
 static int ssb_is_core_up(struct b44 *bp)
 {
-	return ((br32(B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
+	return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
 		== SBTMSLOW_CLOCK);
 }
 
@@ -275,19 +286,19 @@
 	val |= ((u32) data[3]) << 16;
 	val |= ((u32) data[4]) <<  8;
 	val |= ((u32) data[5]) <<  0;
-	bw32(B44_CAM_DATA_LO, val);
+	bw32(bp, B44_CAM_DATA_LO, val);
 	val = (CAM_DATA_HI_VALID | 
 	       (((u32) data[0]) << 8) |
 	       (((u32) data[1]) << 0));
-	bw32(B44_CAM_DATA_HI, val);
-	bw32(B44_CAM_CTRL, (CAM_CTRL_WRITE |
+	bw32(bp, B44_CAM_DATA_HI, val);
+	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
 			    (index << CAM_CTRL_INDEX_SHIFT)));
 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);	
 }
 
 static inline void __b44_disable_ints(struct b44 *bp)
 {
-	bw32(B44_IMASK, 0);
+	bw32(bp, B44_IMASK, 0);
 }
 
 static void b44_disable_ints(struct b44 *bp)
@@ -295,34 +306,34 @@
 	__b44_disable_ints(bp);
 
 	/* Flush posted writes. */
-	br32(B44_IMASK);
+	br32(bp, B44_IMASK);
 }
 
 static void b44_enable_ints(struct b44 *bp)
 {
-	bw32(B44_IMASK, bp->imask);
+	bw32(bp, B44_IMASK, bp->imask);
 }
 
 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
 {
 	int err;
 
-	bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
-	bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
+	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
 			     (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
 			     (reg << MDIO_DATA_RA_SHIFT) |
 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
-	*val = br32(B44_MDIO_DATA) & MDIO_DATA_DATA;
+	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
 
 	return err;
 }
 
 static int b44_writephy(struct b44 *bp, int reg, u32 val)
 {
-	bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
-	bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
+	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
 			     (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
 			     (reg << MDIO_DATA_RA_SHIFT) |
@@ -382,20 +393,20 @@
 	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
 	bp->flags |= pause_flags;
 
-	val = br32(B44_RXCONFIG);
+	val = br32(bp, B44_RXCONFIG);
 	if (pause_flags & B44_FLAG_RX_PAUSE)
 		val |= RXCONFIG_FLOW;
 	else
 		val &= ~RXCONFIG_FLOW;
-	bw32(B44_RXCONFIG, val);
+	bw32(bp, B44_RXCONFIG, val);
 
-	val = br32(B44_MAC_FLOW);
+	val = br32(bp, B44_MAC_FLOW);
 	if (pause_flags & B44_FLAG_TX_PAUSE)
 		val |= (MAC_FLOW_PAUSE_ENAB |
 			(0xc0 & MAC_FLOW_RX_HI_WATER));
 	else
 		val &= ~MAC_FLOW_PAUSE_ENAB;
-	bw32(B44_MAC_FLOW, val);
+	bw32(bp, B44_MAC_FLOW, val);
 }
 
 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
@@ -491,11 +502,11 @@
 
 	val = &bp->hw_stats.tx_good_octets;
 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
-		*val++ += br32(reg);
+		*val++ += br32(bp, reg);
 	}
 	val = &bp->hw_stats.rx_good_octets;
 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
-		*val++ += br32(reg);
+		*val++ += br32(bp, reg);
 	}
 }
 
@@ -535,14 +546,14 @@
 
 		if (!netif_carrier_ok(bp->dev) &&
 		    (bmsr & BMSR_LSTATUS)) {
-			u32 val = br32(B44_TX_CTRL);
+			u32 val = br32(bp, B44_TX_CTRL);
 			u32 local_adv, remote_adv;
 
 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
 				val |= TX_CTRL_DUPLEX;
 			else
 				val &= ~TX_CTRL_DUPLEX;
-			bw32(B44_TX_CTRL, val);
+			bw32(bp, B44_TX_CTRL, val);
 
 			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
 			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
@@ -587,7 +598,7 @@
 {
 	u32 cur, cons;
 
-	cur  = br32(B44_DMATX_STAT) & DMATX_STAT_CDMASK;
+	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
 	cur /= sizeof(struct dma_desc);
 
 	/* XXX needs updating when NETIF_F_SG is supported */
@@ -611,7 +622,7 @@
 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
 		netif_wake_queue(bp->dev);
 
-	bw32(B44_GPTIMER, 0);
+	bw32(bp, B44_GPTIMER, 0);
 }
 
 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
@@ -708,7 +719,7 @@
 	u32 cons, prod;
 
 	received = 0;
-	prod  = br32(B44_DMARX_STAT) & DMARX_STAT_CDMASK;
+	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
 	prod /= sizeof(struct dma_desc);
 	cons = bp->rx_cons;
 
@@ -787,7 +798,7 @@
 	}
 
 	bp->rx_cons = cons;
-	bw32(B44_DMARX_PTR, cons * sizeof(struct dma_desc));
+	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
 
 	return received;
 }
@@ -851,8 +862,8 @@
 
 	spin_lock_irqsave(&bp->lock, flags);
 
-	istat = br32(B44_ISTAT);
-	imask = br32(B44_IMASK);
+	istat = br32(bp, B44_ISTAT);
+	imask = br32(bp, B44_IMASK);
 
 	/* ??? What the fuck is the purpose of the interrupt mask
 	 * ??? register if we have to mask it out by hand anyways?
@@ -872,8 +883,8 @@
 			       dev->name);
 		}
 
-		bw32(B44_ISTAT, istat);
-		br32(B44_ISTAT);
+		bw32(bp, B44_ISTAT, istat);
+		br32(bp, B44_ISTAT);
 	}
 	spin_unlock_irqrestore(&bp->lock, flags);
 	return IRQ_RETVAL(handled);
@@ -937,11 +948,11 @@
 
 	wmb();
 
-	bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
 	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
-		bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
 	if (bp->flags & B44_FLAG_REORDER_BUG)
-		br32(B44_DMATX_PTR);
+		br32(bp, B44_DMATX_PTR);
 
 	if (TX_BUFFS_AVAIL(bp) < 1)
 		netif_stop_queue(dev);
@@ -1109,27 +1120,27 @@
 {
 	unsigned long reg;
 
-	bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
-		br32(reg);
+		br32(bp, reg);
 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
-		br32(reg);
+		br32(bp, reg);
 }
 
 /* bp->lock is held. */
 static void b44_chip_reset(struct b44 *bp)
 {
 	if (ssb_is_core_up(bp)) {
-		bw32(B44_RCV_LAZY, 0);
-		bw32(B44_ENET_CTRL, ENET_CTRL_DISABLE);
+		bw32(bp, B44_RCV_LAZY, 0);
+		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
-		bw32(B44_DMATX_CTRL, 0);
+		bw32(bp, B44_DMATX_CTRL, 0);
 		bp->tx_prod = bp->tx_cons = 0;
-		if (br32(B44_DMARX_STAT) & DMARX_STAT_EMASK) {
+		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
 				     100, 0);
 		}
-		bw32(B44_DMARX_CTRL, 0);
+		bw32(bp, B44_DMARX_CTRL, 0);
 		bp->rx_prod = bp->rx_cons = 0;
 	} else {
 		ssb_pci_setup(bp, (bp->core_unit == 0 ?
@@ -1142,20 +1153,20 @@
 	b44_clear_stats(bp);
 
 	/* Make PHY accessible. */
-	bw32(B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+	bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
 			     (0x0d & MDIO_CTRL_MAXF_MASK)));
-	br32(B44_MDIO_CTRL);
+	br32(bp, B44_MDIO_CTRL);
 
-	if (!(br32(B44_DEVCTRL) & DEVCTRL_IPP)) {
-		bw32(B44_ENET_CTRL, ENET_CTRL_EPSEL);
-		br32(B44_ENET_CTRL);
+	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
+		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
+		br32(bp, B44_ENET_CTRL);
 		bp->flags &= ~B44_FLAG_INTERNAL_PHY;
 	} else {
-		u32 val = br32(B44_DEVCTRL);
+		u32 val = br32(bp, B44_DEVCTRL);
 
 		if (val & DEVCTRL_EPR) {
-			bw32(B44_DEVCTRL, (val & ~DEVCTRL_EPR));
-			br32(B44_DEVCTRL);
+			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
+			br32(bp, B44_DEVCTRL);
 			udelay(100);
 		}
 		bp->flags |= B44_FLAG_INTERNAL_PHY;
@@ -1172,13 +1183,13 @@
 /* bp->lock is held. */
 static void __b44_set_mac_addr(struct b44 *bp)
 {
-	bw32(B44_CAM_CTRL, 0);
+	bw32(bp, B44_CAM_CTRL, 0);
 	if (!(bp->dev->flags & IFF_PROMISC)) {
 		u32 val;
 
 		__b44_cam_write(bp, bp->dev->dev_addr, 0);
-		val = br32(B44_CAM_CTRL);
-		bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+		val = br32(bp, B44_CAM_CTRL);
+		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
 	}
 }
 
@@ -1212,30 +1223,30 @@
 	b44_setup_phy(bp);
 
 	/* Enable CRC32, set proper LED modes and power on PHY */
-	bw32(B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
-	bw32(B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
+	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
+	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
 
 	/* This sets the MAC address too.  */
 	__b44_set_rx_mode(bp->dev);
 
 	/* MTU + eth header + possible VLAN tag + struct rx_header */
-	bw32(B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
-	bw32(B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
 
-	bw32(B44_TX_WMARK, 56); /* XXX magic */
-	bw32(B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
-	bw32(B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
-	bw32(B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
+	bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
+	bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
+	bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
 			      (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
-	bw32(B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
+	bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
 
-	bw32(B44_DMARX_PTR, bp->rx_pending);
+	bw32(bp, B44_DMARX_PTR, bp->rx_pending);
 	bp->rx_prod = bp->rx_pending;	
 
-	bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
 
-	val = br32(B44_ENET_CTRL);
-	bw32(B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+	val = br32(bp, B44_ENET_CTRL);
+	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
 }
 
 static int b44_open(struct net_device *dev)
@@ -1372,11 +1383,11 @@
 	int i=0;
 	unsigned char zero[6] = {0,0,0,0,0,0};
 
-	val = br32(B44_RXCONFIG);
+	val = br32(bp, B44_RXCONFIG);
 	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
 	if (dev->flags & IFF_PROMISC) {
 		val |= RXCONFIG_PROMISC;
-		bw32(B44_RXCONFIG, val);
+		bw32(bp, B44_RXCONFIG, val);
 	} else {
 		__b44_set_mac_addr(bp);
 
@@ -1388,9 +1399,9 @@
 		for(;i<64;i++) {
 			__b44_cam_write(bp, zero, i);			
 		}
-		bw32(B44_RXCONFIG, val);
-        	val = br32(B44_CAM_CTRL);
-	        bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+		bw32(bp, B44_RXCONFIG, val);
+        	val = br32(bp, B44_CAM_CTRL);
+	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
 	}
 }
 
@@ -1760,7 +1771,7 @@
 
 	spin_lock_init(&bp->lock);
 
-	bp->regs = (unsigned long) ioremap(b44reg_base, b44reg_len);
+	bp->regs = ioremap(b44reg_base, b44reg_len);
 	if (bp->regs == 0UL) {
 		printk(KERN_ERR PFX "Cannot map device registers, "
 		       "aborting.\n");
diff -Nru a/drivers/net/b44.h b/drivers/net/b44.h
--- a/drivers/net/b44.h	2004-09-15 20:29:40 -07:00
+++ b/drivers/net/b44.h	2004-09-15 20:29:40 -07:00
@@ -395,9 +395,6 @@
 #define SSB_PCI_MASK1		0xfc000000
 #define SSB_PCI_MASK2		0xc0000000
 
-#define br32(REG)	readl(bp->regs + (REG))
-#define bw32(REG,VAL)	writel((VAL), bp->regs + (REG))
-
 /* 4400 PHY registers */
 #define B44_MII_AUXCTRL		24	/* Auxiliary Control */
 #define  MII_AUXCTRL_DUPLEX	0x0001  /* Full Duplex */
@@ -530,7 +527,7 @@
 	struct net_device_stats	stats;
 	struct b44_hw_stats	hw_stats;
 
-	unsigned long		regs;
+	volatile void __iomem   *regs;
 	struct pci_dev		*pdev;
 	struct net_device	*dev;
 
diff -Nru a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
--- a/drivers/net/ibmveth.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/net/ibmveth.c	2004-09-15 20:29:40 -07:00
@@ -885,13 +885,16 @@
 
 	mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
 	if(!mac_addr_p) {
-		ibmveth_error_printk("Can't find VETH_MAC_ADDR attribute\n");
+		printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
+				"attribute\n", __FILE__, __LINE__);
 		return 0;
 	}
 	
 	mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
 	if(!mcastFilterSize_p) {
-		ibmveth_error_printk("Can't find VETH_MCAST_FILTER_SIZE attribute\n");
+		printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
+				"VETH_MCAST_FILTER_SIZE attribute\n",
+				__FILE__, __LINE__);
 		return 0;
 	}
 	
diff -Nru a/drivers/net/tg3.c b/drivers/net/tg3.c
--- a/drivers/net/tg3.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/net/tg3.c	2004-09-15 20:29:40 -07:00
@@ -1122,29 +1122,33 @@
 	u32 old_rx_mode = tp->rx_mode;
 	u32 old_tx_mode = tp->tx_mode;
 
-	if (local_adv & ADVERTISE_PAUSE_CAP) {
-		if (local_adv & ADVERTISE_PAUSE_ASYM) {
-			if (remote_adv & LPA_PAUSE_CAP)
-				new_tg3_flags |=
-					(TG3_FLAG_RX_PAUSE |
-					 TG3_FLAG_TX_PAUSE);
-			else if (remote_adv & LPA_PAUSE_ASYM)
-				new_tg3_flags |=
-					(TG3_FLAG_RX_PAUSE);
-		} else {
-			if (remote_adv & LPA_PAUSE_CAP)
-				new_tg3_flags |=
-					(TG3_FLAG_RX_PAUSE |
-					 TG3_FLAG_TX_PAUSE);
+	if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
+		if (local_adv & ADVERTISE_PAUSE_CAP) {
+			if (local_adv & ADVERTISE_PAUSE_ASYM) {
+				if (remote_adv & LPA_PAUSE_CAP)
+					new_tg3_flags |=
+						(TG3_FLAG_RX_PAUSE |
+					 	TG3_FLAG_TX_PAUSE);
+				else if (remote_adv & LPA_PAUSE_ASYM)
+					new_tg3_flags |=
+						(TG3_FLAG_RX_PAUSE);
+			} else {
+				if (remote_adv & LPA_PAUSE_CAP)
+					new_tg3_flags |=
+						(TG3_FLAG_RX_PAUSE |
+					 	TG3_FLAG_TX_PAUSE);
+			}
+		} else if (local_adv & ADVERTISE_PAUSE_ASYM) {
+			if ((remote_adv & LPA_PAUSE_CAP) &&
+		    	(remote_adv & LPA_PAUSE_ASYM))
+				new_tg3_flags |= TG3_FLAG_TX_PAUSE;
 		}
-	} else if (local_adv & ADVERTISE_PAUSE_ASYM) {
-		if ((remote_adv & LPA_PAUSE_CAP) &&
-		    (remote_adv & LPA_PAUSE_ASYM))
-			new_tg3_flags |= TG3_FLAG_TX_PAUSE;
-	}
 
-	tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
-	tp->tg3_flags |= new_tg3_flags;
+		tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
+		tp->tg3_flags |= new_tg3_flags;
+	} else {
+		new_tg3_flags = tp->tg3_flags;
+	}
 
 	if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
@@ -6585,8 +6589,8 @@
 	struct tg3 *tp = netdev_priv(dev);
   
 	epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
-	epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
-	epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
+	epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
+	epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
 }
   
 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -6601,13 +6605,13 @@
 	else
 		tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
 	if (epause->rx_pause)
-		tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
+		tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
 	else
-		tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
+		tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
 	if (epause->tx_pause)
-		tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
+		tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
 	else
-		tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
+		tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
 	tg3_halt(tp);
 	tg3_init_hw(tp);
 	tg3_netif_start(tp);
@@ -8340,6 +8344,9 @@
 
 	if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
 		dev->features &= ~NETIF_F_HIGHDMA;
+
+	/* flow control autonegotiation is default behavior */
+	tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
 
 	err = register_netdev(dev);
 	if (err) {
diff -Nru a/drivers/net/tg3.h b/drivers/net/tg3.h
--- a/drivers/net/tg3.h	2004-09-15 20:29:40 -07:00
+++ b/drivers/net/tg3.h	2004-09-15 20:29:40 -07:00
@@ -2069,8 +2069,6 @@
 #define TG3_FLAG_JUMBO_ENABLE		0x00800000
 #define TG3_FLAG_10_100_ONLY		0x01000000
 #define TG3_FLAG_PAUSE_AUTONEG		0x02000000
-#define TG3_FLAG_PAUSE_RX		0x04000000
-#define TG3_FLAG_PAUSE_TX		0x08000000
 #define TG3_FLAG_BROKEN_CHECKSUMS	0x10000000
 #define TG3_FLAG_GOT_SERDES_FLOWCTL	0x20000000
 #define TG3_FLAG_SPLIT_MODE		0x40000000
diff -Nru a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
--- a/drivers/oprofile/buffer_sync.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/oprofile/buffer_sync.c	2004-09-15 20:29:40 -07:00
@@ -133,7 +133,7 @@
  
 static void end_sync(void)
 {
-	end_cpu_timers();
+	end_cpu_work();
 	/* make sure we don't leak task structs */
 	process_task_mortuary();
 	process_task_mortuary();
@@ -144,7 +144,7 @@
 {
 	int err;
 
-	start_cpu_timers();
+	start_cpu_work();
 
 	err = task_handoff_register(&task_free_nb);
 	if (err)
@@ -339,40 +339,25 @@
 	}
 }
  
- 
+
 static void release_mm(struct mm_struct * mm)
 {
-	if (mm)
-		up_read(&mm->mmap_sem);
+	if (!mm)
+		return;
+	up_read(&mm->mmap_sem);
+	mmput(mm);
 }
 
 
-/* Take the task's mmap_sem to protect ourselves from
- * races when we do lookup_dcookie().
- */
 static struct mm_struct * take_tasks_mm(struct task_struct * task)
 {
-	struct mm_struct * mm;
-       
-	/* Subtle. We don't need to keep a reference to this task's mm,
-	 * because, for the mm to be freed on another CPU, that would have
-	 * to go through the task exit notifier, which ends up sleeping
-	 * on the buffer_sem we hold, so we end up with mutual exclusion
-	 * anyway.
-	 */
-	task_lock(task);
-	mm = task->mm;
-	task_unlock(task);
- 
-	if (mm) {
-		/* needed to walk the task's VMAs */
+	struct mm_struct * mm = get_task_mm(task);
+	if (mm)
 		down_read(&mm->mmap_sem);
-	}
- 
 	return mm;
 }
- 
- 
+
+
 static inline int is_ctx_switch(unsigned long val)
 {
 	return val == ~0UL;
diff -Nru a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
--- a/drivers/oprofile/cpu_buffer.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/oprofile/cpu_buffer.c	2004-09-15 20:29:40 -07:00
@@ -30,7 +30,7 @@
 static void wq_sync_buffer(void *);
 
 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
-int timers_enabled;
+int work_enabled;
 
 static void __free_cpu_buffers(int num)
 {
@@ -80,11 +80,11 @@
 }
 
 
-void start_cpu_timers(void)
+void start_cpu_work(void)
 {
 	int i;
 
-	timers_enabled = 1;
+	work_enabled = 1;
 
 	for_each_online_cpu(i) {
 		struct oprofile_cpu_buffer * b = &cpu_buffer[i];
@@ -98,11 +98,11 @@
 }
 
 
-void end_cpu_timers(void)
+void end_cpu_work(void)
 {
 	int i;
 
-	timers_enabled = 0;
+	work_enabled = 0;
 
 	for_each_online_cpu(i) {
 		struct oprofile_cpu_buffer * b = &cpu_buffer[i];
@@ -220,6 +220,6 @@
 	sync_buffer(b->cpu);
 
 	/* don't re-add the work if we're shutting down */
-	if (timers_enabled)
+	if (work_enabled)
 		schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
 }
diff -Nru a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
--- a/drivers/oprofile/cpu_buffer.h	2004-09-15 20:29:40 -07:00
+++ b/drivers/oprofile/cpu_buffer.h	2004-09-15 20:29:40 -07:00
@@ -20,8 +20,8 @@
 int alloc_cpu_buffers(void);
 void free_cpu_buffers(void);
 
-void start_cpu_timers(void);
-void end_cpu_timers(void);
+void start_cpu_work(void);
+void end_cpu_work(void);
 
 /* CPU buffer is composed of such entries (which are
  * also used for context switch notes)
diff -Nru a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
--- a/drivers/pci/hotplug/rpaphp_core.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/pci/hotplug/rpaphp_core.c	2004-09-15 20:29:40 -07:00
@@ -449,7 +449,7 @@
 
 static int disable_slot(struct hotplug_slot *hotplug_slot)
 {
-	int retval;
+	int retval = -EINVAL;
 	struct slot *slot = (struct slot *)hotplug_slot->private;
 
 	dbg("%s - Entry: slot[%s]\n", __FUNCTION__, slot->name);
diff -Nru a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
--- a/drivers/pci/hotplug/rpaphp_pci.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/pci/hotplug/rpaphp_pci.c	2004-09-15 20:29:40 -07:00
@@ -186,7 +186,7 @@
 rpaphp_pci_config_slot(struct device_node *dn, struct pci_bus *bus)
 {
 	struct device_node *eads_first_child = dn->child;
-	struct pci_dev *dev;
+	struct pci_dev *dev = NULL;
 	int num;
 	
 	dbg("Enter %s: dn=%s bus=%s\n", __FUNCTION__, dn->full_name, bus->name);
diff -Nru a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
--- a/drivers/pnp/pnpbios/rsparser.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/pnp/pnpbios/rsparser.c	2004-09-15 20:29:40 -07:00
@@ -346,12 +346,12 @@
 {
 	unsigned int len, tag;
 	int priority = 0;
-	struct pnp_option *option;
+	struct pnp_option *option, *option_independent;
 
 	if (!p)
 		return NULL;
 
-	option = pnp_register_independent_option(dev);
+	option_independent = option = pnp_register_independent_option(dev);
 	if (!option)
 		return NULL;
 
@@ -428,9 +428,14 @@
 		case SMALL_TAG_ENDDEP:
 			if (len != 0)
 				goto len_err;
+			if (option_independent == option)
+				printk(KERN_WARNING "PnPBIOS: Missing SMALL_TAG_STARTDEP tag\n");
+			option = option_independent;
 			break;
 
 		case SMALL_TAG_END:
+			if (option_independent != option)
+				printk(KERN_WARNING "PnPBIOS: Missing SMALL_TAG_ENDDEP tag\n");
 			p = p + 2;
         		return (unsigned char *)p;
 			break;
diff -Nru a/drivers/serial/8250.c b/drivers/serial/8250.c
--- a/drivers/serial/8250.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/serial/8250.c	2004-09-15 20:29:40 -07:00
@@ -188,6 +188,9 @@
 	case UPIO_MEM:
 		return readb(up->port.membase + offset);
 
+	case UPIO_MEM32:
+		return readl(up->port.membase + offset);
+
 	default:
 		return inb(up->port.iobase + offset);
 	}
@@ -206,6 +209,10 @@
 
 	case UPIO_MEM:
 		writeb(value, up->port.membase + offset);
+		break;
+
+	case UPIO_MEM32:
+		writel(value, up->port.membase + offset);
 		break;
 
 	default:
diff -Nru a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
--- a/drivers/serial/serial_core.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/serial/serial_core.c	2004-09-15 20:29:40 -07:00
@@ -1978,6 +1978,7 @@
 		printk("I/O 0x%x offset 0x%x", port->iobase, port->hub6);
 		break;
 	case UPIO_MEM:
+	case UPIO_MEM32:
 		printk("MMIO 0x%lx", port->mapbase);
 		break;
 	}
diff -Nru a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c
--- a/drivers/video/riva/rivafb-i2c.c	2004-09-15 20:29:40 -07:00
+++ b/drivers/video/riva/rivafb-i2c.c	2004-09-15 20:29:40 -07:00
@@ -105,9 +105,9 @@
 	chan->algo.setscl		= riva_gpio_setscl;
 	chan->algo.getsda		= riva_gpio_getsda;
 	chan->algo.getscl		= riva_gpio_getscl;
-	chan->algo.udelay		= 5;
+	chan->algo.udelay		= 40;
 	chan->algo.mdelay               = 5;
-	chan->algo.timeout		= 10;
+	chan->algo.timeout		= 20;
 	chan->algo.data 		= chan;
 
 	i2c_set_adapdata(&chan->adapter, chan);
diff -Nru a/fs/dquot.c b/fs/dquot.c
--- a/fs/dquot.c	2004-09-15 20:29:40 -07:00
+++ b/fs/dquot.c	2004-09-15 20:29:40 -07:00
@@ -120,7 +120,7 @@
  * i_sem on quota files is special (it's below dqio_sem)
  */
 
-spinlock_t dq_list_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t dq_list_lock = SPIN_LOCK_UNLOCKED;
 spinlock_t dq_data_lock = SPIN_LOCK_UNLOCKED;
 
 static char *quotatypes[] = INITQFNAMES;
@@ -1792,7 +1792,6 @@
 EXPORT_SYMBOL(register_quota_format);
 EXPORT_SYMBOL(unregister_quota_format);
 EXPORT_SYMBOL(dqstats);
-EXPORT_SYMBOL(dq_list_lock);
 EXPORT_SYMBOL(dq_data_lock);
 EXPORT_SYMBOL(vfs_quota_on);
 EXPORT_SYMBOL(vfs_quota_on_mount);
diff -Nru a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
--- a/fs/hugetlbfs/inode.c	2004-09-15 20:29:40 -07:00
+++ b/fs/hugetlbfs/inode.c	2004-09-15 20:29:40 -07:00
@@ -800,8 +800,7 @@
 
 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
 					sizeof(struct hugetlbfs_inode_info),
-					0, SLAB_RECLAIM_ACCOUNT,
-					init_once, NULL);
+					0, 0, init_once, NULL);
 	if (hugetlbfs_inode_cachep == NULL)
 		return -ENOMEM;
 
diff -Nru a/fs/isofs/rock.c b/fs/isofs/rock.c
--- a/fs/isofs/rock.c	2004-09-15 20:29:40 -07:00
+++ b/fs/isofs/rock.c	2004-09-15 20:29:40 -07:00
@@ -62,7 +62,7 @@
 }                                     
 
 #define MAYBE_CONTINUE(LABEL,DEV) \
-  {if (buffer) kfree(buffer); \
+  {if (buffer) { kfree(buffer); buffer = NULL; } \
   if (cont_extent){ \
     int block, offset, offset1; \
     struct buffer_head * pbh; \
diff -Nru a/fs/partitions/msdos.c b/fs/partitions/msdos.c
--- a/fs/partitions/msdos.c	2004-09-15 20:29:40 -07:00
+++ b/fs/partitions/msdos.c	2004-09-15 20:29:40 -07:00
@@ -246,6 +246,9 @@
 		put_partition(state, state->next++, bsd_start, bsd_size);
 	}
 	put_dev_sector(sect);
+	if (le16_to_cpu(l->d_npartitions) > max_partitions)
+		printk(" (ignored %d more)",
+		       le16_to_cpu(l->d_npartitions) - max_partitions);
 	printk(" >\n");
 }
 #endif
diff -Nru a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
--- a/include/asm-alpha/bitops.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-alpha/bitops.h	2004-09-15 20:29:40 -07:00
@@ -324,7 +324,7 @@
 #endif
 
 /* Compute powers of two for the given integer.  */
-static inline int floor_log2(unsigned long word)
+static inline long floor_log2(unsigned long word)
 {
 #if defined(__alpha_cix__) && defined(__alpha_fix__)
 	return 63 - __kernel_ctlz(word);
@@ -336,7 +336,7 @@
 #endif
 }
 
-static inline int ceil_log2(unsigned int word)
+static inline long ceil_log2(unsigned long word)
 {
 	long bit = floor_log2(word);
 	return bit + (word > (1UL << bit));
diff -Nru a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
--- a/include/asm-alpha/compiler.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-alpha/compiler.h	2004-09-15 20:29:40 -07:00
@@ -17,9 +17,9 @@
 # define __kernel_extbl(val, shift)	__builtin_alpha_extbl(val, shift)
 # define __kernel_extwl(val, shift)	__builtin_alpha_extwl(val, shift)
 # define __kernel_cmpbge(a, b)		__builtin_alpha_cmpbge(a, b)
-# define __kernel_cttz(x)		__builtin_ctz(x)
-# define __kernel_ctlz(x)		__builtin_clz(x)
-# define __kernel_ctpop(x)		__builtin_popcount(x)
+# define __kernel_cttz(x)		__builtin_ctzl(x)
+# define __kernel_ctlz(x)		__builtin_clzl(x)
+# define __kernel_ctpop(x)		__builtin_popcountl(x)
 #else
 # define __kernel_insbl(val, shift)					\
   ({ unsigned long __kir;						\
diff -Nru a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
--- a/include/asm-alpha/unistd.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-alpha/unistd.h	2004-09-15 20:29:40 -07:00
@@ -373,8 +373,9 @@
 #define __NR_mq_timedreceive		435
 #define __NR_mq_notify			436
 #define __NR_mq_getsetattr		437
+#define __NR_waitid			438
 
-#define NR_SYSCALLS			438
+#define NR_SYSCALLS			439
 
 #if defined(__GNUC__)
 
diff -Nru a/include/asm-arm/apm.h b/include/asm-arm/apm.h
--- a/include/asm-arm/apm.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-arm/apm.h	2004-09-15 20:29:40 -07:00
@@ -14,24 +14,7 @@
 #define ARM_ASM_SA1100_APM_H
 
 #include <linux/config.h>
-
-#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
-
-
-#define APM_AC_OFFLINE 0
-#define APM_AC_ONLINE 1
-#define APM_AC_BACKUP 2
-#define APM_AC_UNKNOWN 0xFF
-
-#define APM_BATTERY_STATUS_HIGH 0
-#define APM_BATTERY_STATUS_LOW  1
-#define APM_BATTERY_STATUS_CRITICAL 2
-#define APM_BATTERY_STATUS_CHARGING 3
-#define APM_BATTERY_STATUS_UNKNOWN 0xFF
-
-#define APM_BATTERY_LIFE_UNKNOWN 0xFFFF
-#define APM_BATTERY_LIFE_MINUTES 0x8000
-#define APM_BATTERY_LIFE_VALUE_MASK 0x7FFF
+#include <linux/apm_bios.h>
 
 /*
  * This structure gets filled in by the machine specific 'get_power_status'
@@ -39,18 +22,44 @@
  */
 struct apm_power_info {
 	unsigned char	ac_line_status;
+#define APM_AC_OFFLINE			0
+#define APM_AC_ONLINE			1
+#define APM_AC_BACKUP			2
+#define APM_AC_UNKNOWN			0xff
+
 	unsigned char	battery_status;
+#define APM_BATTERY_STATUS_HIGH		0
+#define APM_BATTERY_STATUS_LOW		1
+#define APM_BATTERY_STATUS_CRITICAL	2
+#define APM_BATTERY_STATUS_CHARGING	3
+#define APM_BATTERY_STATUS_NOT_PRESENT	4
+#define APM_BATTERY_STATUS_UNKNOWN	0xff
+
 	unsigned char	battery_flag;
-	unsigned char	battery_life;
+#define APM_BATTERY_FLAG_HIGH		(1 << 0)
+#define APM_BATTERY_FLAG_LOW		(1 << 1)
+#define APM_BATTERY_FLAG_CRITICAL	(1 << 2)
+#define APM_BATTERY_FLAG_CHARGING	(1 << 3)
+#define APM_BATTERY_FLAG_NOT_PRESENT	(1 << 7)
+#define APM_BATTERY_FLAG_UNKNOWN	0xff
+
+	int		battery_life;
 	int		time;
 	int		units;
+#define APM_UNITS_MINS			0
+#define APM_UNITS_SECS			1
+#define APM_UNITS_UNKNOWN		-1
+
 };
 
 /*
  * This allows machines to provide their own "apm get power status" function.
  */
 extern void (*apm_get_power_status)(struct apm_power_info *);
-#endif
 
+/*
+ * Queue an event (APM_SYS_SUSPEND or APM_CRITICAL_SUSPEND)
+ */
+void apm_queue_event(apm_event_t event);
 
 #endif
diff -Nru a/include/asm-arm/arch-s3c2410/hardware.h b/include/asm-arm/arch-s3c2410/hardware.h
--- a/include/asm-arm/arch-s3c2410/hardware.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-arm/arch-s3c2410/hardware.h	2004-09-15 20:29:40 -07:00
@@ -14,6 +14,7 @@
  *  06-Jun-2003 BJD  Added CPU frequency settings
  *  03-Sep-2003 BJD  Linux v2.6 support
  *  12-Mar-2004 BJD  Fixed include protection, fixed type of clock vars
+ *  14-Sep-2004 BJD  Added misccr and getpin to gpio
 */
 
 #ifndef __ASM_ARCH_HARDWARE_H
@@ -60,6 +61,10 @@
 extern void s3c2410_gpio_pullup(unsigned int pin, unsigned int to);
 
 extern void s3c2410_gpio_setpin(unsigned int pin, unsigned int to);
+
+extern unsigned int s3c2410_gpio_getpin(unsigned int pin);
+
+extern unsigned int s3c2410_modify_misccr(unsigned int clr, unsigned int chg);
 
 #endif /* __ASSEMBLY__ */
 
diff -Nru a/include/asm-arm/arch-s3c2410/regs-udc.h b/include/asm-arm/arch-s3c2410/regs-udc.h
--- /dev/null	Wed Dec 31 16:00:00 196900
+++ b/include/asm-arm/arch-s3c2410/regs-udc.h	2004-09-15 20:29:40 -07:00
@@ -0,0 +1,162 @@
+/* linux/include/asm/arch-s3c2410/regs-udc.h
+ *
+ * Copyright (C) 2004 Herbert Poetzl <herbert@13thfloor.at>
+ *
+ * This include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ *  Changelog:
+ *    01-08-2004	initial creation
+ *    12-09-2004	cleanup for submission
+ */
+
+#ifndef __ASM_ARCH_REGS_UDC_H
+#define __ASM_ARCH_REGS_UDC_H
+
+
+#define S3C2410_USBDREG(x) ((x) + S3C2410_VA_USBDEV)
+
+#define S3C2410_UDC_FUNC_ADDR_REG	S3C2410_USBDREG(0x0140)
+#define S3C2410_UDC_PWR_REG		S3C2410_USBDREG(0x0144)
+#define S3C2410_UDC_EP_INT_REG		S3C2410_USBDREG(0x0148)
+
+#define S3C2410_UDC_USB_INT_REG		S3C2410_USBDREG(0x0158)
+#define S3C2410_UDC_EP_INT_EN_REG	S3C2410_USBDREG(0x015c)
+
+#define S3C2410_UDC_USB_INT_EN_REG	S3C2410_USBDREG(0x016c)
+
+#define S3C2410_UDC_FRAME_NUM1_REG	S3C2410_USBDREG(0x0170)
+#define S3C2410_UDC_FRAME_NUM2_REG	S3C2410_USBDREG(0x0174)
+
+#define S3C2410_UDC_EP0_FIFO_REG	S3C2410_USBDREG(0x01c0)
+#define S3C2410_UDC_EP1_FIFO_REG	S3C2410_USBDREG(0x01c4)
+#define S3C2410_UDC_EP2_FIFO_REG	S3C2410_USBDREG(0x01c8)
+#define S3C2410_UDC_EP3_FIFO_REG	S3C2410_USBDREG(0x01cc)
+#define S3C2410_UDC_EP4_FIFO_REG	S3C2410_USBDREG(0x01d0)
+
+#define S3C2410_UDC_EP1_DMA_CON		S3C2410_USBDREG(0x0200)
+#define S3C2410_UDC_EP1_DMA_UNIT	S3C2410_USBDREG(0x0204)
+#define S3C2410_UDC_EP1_DMA_FIFO	S3C2410_USBDREG(0x0208)
+#define S3C2410_UDC_EP1_DMA_TTC_L	S3C2410_USBDREG(0x020c)
+#define S3C2410_UDC_EP1_DMA_TTC_M	S3C2410_USBDREG(0x0210)
+#define S3C2410_UDC_EP1_DMA_TTC_H	S3C2410_USBDREG(0x0214)
+
+#define S3C2410_UDC_EP2_DMA_CON		S3C2410_USBDREG(0x0218)
+#define S3C2410_UDC_EP2_DMA_UNIT	S3C2410_USBDREG(0x021c)
+#define S3C2410_UDC_EP2_DMA_FIFO	S3C2410_USBDREG(0x0220)
+#define S3C2410_UDC_EP2_DMA_TTC_L	S3C2410_USBDREG(0x0224)
+#define S3C2410_UDC_EP2_DMA_TTC_M	S3C2410_USBDREG(0x0228)
+#define S3C2410_UDC_EP2_DMA_TTC_H	S3C2410_USBDREG(0x022c)
+
+#define S3C2410_UDC_EP3_DMA_CON		S3C2410_USBDREG(0x0240)
+#define S3C2410_UDC_EP3_DMA_UNIT	S3C2410_USBDREG(0x0244)
+#define S3C2410_UDC_EP3_DMA_FIFO	S3C2410_USBDREG(0x0248)
+#define S3C2410_UDC_EP3_DMA_TTC_L	S3C2410_USBDREG(0x024c)
+#define S3C2410_UDC_EP3_DMA_TTC_M	S3C2410_USBDREG(0x0250)
+#define S3C2410_UDC_EP3_DMA_TTC_H	S3C2410_USBDREG(0x0254)
+
+#define S3C2410_UDC_EP4_DMA_CON		S3C2410_USBDREG(0x0258)
+#define S3C2410_UDC_EP4_DMA_UNIT	S3C2410_USBDREG(0x025c)
+#define S3C2410_UDC_EP4_DMA_FIFO	S3C2410_USBDREG(0x0260)
+#define S3C2410_UDC_EP4_DMA_TTC_L	S3C2410_USBDREG(0x0264)
+#define S3C2410_UDC_EP4_DMA_TTC_M	S3C2410_USBDREG(0x0268)
+#define S3C2410_UDC_EP4_DMA_TTC_H	S3C2410_USBDREG(0x026c)
+
+#define S3C2410_UDC_INDEX_REG		S3C2410_USBDREG(0x0178)
+
+/* indexed registers */
+
+#define S3C2410_UDC_MAXP_REG		S3C2410_USBDREG(0x018c)
+
+#define S3C2410_UDC_EP0_CSR_REG		S3C2410_USBDREG(0x0184)
+
+#define S3C2410_UDC_IN_CSR1_REG		S3C2410_USBDREG(0x0184)
+#define S3C2410_UDC_IN_CSR2_REG		S3C2410_USBDREG(0x0188)
+
+#define S3C2410_UDC_OUT_CSR1_REG	S3C2410_USBDREG(0x0190)
+#define S3C2410_UDC_OUT_CSR2_REG	S3C2410_USBDREG(0x0194)
+#define S3C2410_UDC_OUT_FIFO_CNT1_REG	S3C2410_USBDREG(0x0198)
+#define S3C2410_UDC_OUT_FIFO_CNT2_REG	S3C2410_USBDREG(0x019c)
+
+
+
+#define S3C2410_UDC_PWR_ISOUP		(1<<7) // R/W
+#define S3C2410_UDC_PWR_RESET		(1<<3) // R
+#define S3C2410_UDC_PWR_RESUME		(1<<2) // R/W
+#define S3C2410_UDC_PWR_SUSPEND		(1<<1) // R
+#define S3C2410_UDC_PWR_ENSUSPEND	(1<<0) // R/W
+
+#define S3C2410_UDC_PWR_DEFAULT		0x00
+
+#define S3C2410_UDC_INT_EP4		(1<<4) // R/W (clear only)
+#define S3C2410_UDC_INT_EP3		(1<<3) // R/W (clear only)
+#define S3C2410_UDC_INT_EP2		(1<<2) // R/W (clear only)
+#define S3C2410_UDC_INT_EP1		(1<<1) // R/W (clear only)
+#define S3C2410_UDC_INT_EP0		(1<<0) // R/W (clear only)
+
+#define S3C2410_UDC_USBINT_RESET	(1<<2) // R/W (clear only)
+#define S3C2410_UDC_USBINT_RESUME	(1<<1) // R/W (clear only)
+#define S3C2410_UDC_USBINT_SUSPEND	(1<<0) // R/W (clear only)
+
+#define S3C2410_UDC_INTE_EP4		(1<<4) // R/W
+#define S3C2410_UDC_INTE_EP3		(1<<3) // R/W
+#define S3C2410_UDC_INTE_EP2		(1<<2) // R/W
+#define S3C2410_UDC_INTE_EP1		(1<<1) // R/W
+#define S3C2410_UDC_INTE_EP0		(1<<0) // R/W
+
+#define S3C2410_UDC_USBINTE_RESET	(1<<2) // R/W
+#define S3C2410_UDC_USBINTE_SUSPEND	(1<<0) // R/W
+
+
+#define S3C2410_UDC_INDEX_EP0		(0x00)
+#define S3C2410_UDC_INDEX_EP1		(0x01) // ??
+#define S3C2410_UDC_INDEX_EP2		(0x02) // ??
+#define S3C2410_UDC_INDEX_EP3		(0x03) // ??
+#define S3C2410_UDC_INDEX_EP4		(0x04) // ??
+
+#define S3C2410_UDC_ICSR1_CLRDT		(1<<6) // R/W
+#define S3C2410_UDC_ICSR1_SENTSTL	(1<<5) // R/W (clear only)
+#define S3C2410_UDC_ICSR1_SENDSTL	(1<<4) // R/W
+#define S3C2410_UDC_ICSR1_FFLUSH	(1<<3) // W   (set only)
+#define S3C2410_UDC_ICSR1_UNDRUN	(1<<2) // R/W (clear only)
+#define S3C2410_UDC_ICSR1_PKTRDY	(1<<0) // R/W (set only)
+
+#define S3C2410_UDC_ICSR2_AUTOSET	(1<<7) // R/W
+#define S3C2410_UDC_ICSR2_ISO		(1<<6) // R/W
+#define S3C2410_UDC_ICSR2_MODEIN	(1<<5) // R/W
+#define S3C2410_UDC_ICSR2_DMAIEN	(1<<4) // R/W
+
+#define S3C2410_UDC_OCSR1_CLRDT		(1<<7) // R/W
+#define S3C2410_UDC_OCSR1_SENTSTL	(1<<6) // R/W (clear only)
+#define S3C2410_UDC_OCSR1_SENDSTL	(1<<5) // R/W
+#define S3C2410_UDC_OCSR1_FFLUSH	(1<<4) // R/W
+#define S3C2410_UDC_OCSR1_DERROR	(1<<3) // R
+#define S3C2410_UDC_OCSR1_OVRRUN	(1<<2) // R/W (clear only)
+#define S3C2410_UDC_OCSR1_PKTRDY	(1<<0) // R/W (clear only)
+
+#define S3C2410_UDC_OCSR2_AUTOCLR	(1<<7) // R/W
+#define S3C2410_UDC_OCSR2_ISO		(1<<6) // R/W
+#define S3C2410_UDC_OCSR2_DMAIEN	(1<<5) // R/W
+
+#define S3C2410_UDC_SETIX(x)	    \
+	__raw_writel(S3C2410_UDC_INDEX_ ## x, S3C2410_UDC_INDEX_REG);
+
+
+#define S3C2410_UDC_EP0_CSR_OPKRDY	(1<<0)
+#define S3C2410_UDC_EP0_CSR_IPKRDY	(1<<1)
+#define S3C2410_UDC_EP0_CSR_SENTSTL	(1<<2)
+#define S3C2410_UDC_EP0_CSR_DE		(1<<3)
+#define S3C2410_UDC_EP0_CSR_SE		(1<<4)
+#define S3C2410_UDC_EP0_CSR_SENDSTL	(1<<5)
+#define S3C2410_UDC_EP0_CSR_SOPKTRDY	(1<<6)
+#define S3C2410_UDC_EP0_CSR_SSE	(1<<7)
+
+#define S3C2410_UDC_MAXP_8		(1<<0)
+#define S3C2410_UDC_MAXP_16		(1<<1)
+#define S3C2410_UDC_MAXP_32		(1<<2)
+#define S3C2410_UDC_MAXP_64		(1<<3)
+
+
+#endif
diff -Nru a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
--- /dev/null	Wed Dec 31 16:00:00 196900
+++ b/include/asm-generic/iomap.h	2004-09-15 20:29:40 -07:00
@@ -0,0 +1,63 @@
+#ifndef __GENERIC_IO_H
+#define __GENERIC_IO_H
+
+#include <linux/linkage.h>
+
+/*
+ * These are the "generic" interfaces for doing new-style
+ * memory-mapped or PIO accesses. Architectures may do
+ * their own arch-optimized versions, these just act as
+ * wrappers around the old-style IO register access functions:
+ * read[bwl]/write[bwl]/in[bwl]/out[bwl]
+ *
+ * Don't include this directly, include it from <asm/io.h>.
+ */
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines just encode the PIO/MMIO as part of the
+ * cookie, and coldly assume that the MMIO IO mappings are not
+ * in the low address range. Architectures for which this is not
+ * true can't use this generic implementation.
+ */
+extern unsigned int fastcall ioread8(void __iomem *);
+extern unsigned int fastcall ioread16(void __iomem *);
+extern unsigned int fastcall ioread32(void __iomem *);
+
+extern void fastcall iowrite8(u8, void __iomem *);
+extern void fastcall iowrite16(u16, void __iomem *);
+extern void fastcall iowrite32(u32, void __iomem *);
+
+/*
+ * "string" versions of the above. Note that they
+ * use native byte ordering for the accesses (on
+ * the assumption that IO and memory agree on a
+ * byte order, and CPU byteorder is irrelevant).
+ *
+ * They do _not_ update the port address. If you
+ * want MMIO that copies stuff laid out in MMIO
+ * memory across multiple ports, use "memcpy_toio()"
+ * and friends.
+ */
+extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long count);
+extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long count);
+extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+
+extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
+
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+#endif
diff -Nru a/include/asm-h8300/softirq.h b/include/asm-h8300/softirq.h
--- a/include/asm-h8300/softirq.h	2004-09-15 20:29:40 -07:00
+++ /dev/null	Wed Dec 31 16:00:00 196900
@@ -1,20 +0,0 @@
-#ifndef __ASM_SOFTIRQ_H
-#define __ASM_SOFTIRQ_H
-
-#include <linux/preempt.h>
-#include <asm/hardirq.h>
-
-#define local_bh_disable() \
-		do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
-#define __local_bh_enable() \
-		do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
-
-#define local_bh_enable()						\
-do {									\
-	__local_bh_enable();						\
-	if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
-		do_softirq();						\
-	preempt_check_resched();					\
-} while (0)
-
-#endif	/* __ASM_SOFTIRQ_H */
diff -Nru a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
--- a/include/asm-i386/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-i386/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -1,7 +1,6 @@
 #ifndef _ASM_I386_DMA_MAPPING_H
 #define _ASM_I386_DMA_MAPPING_H
 
-#include <linux/device.h>
 #include <linux/mm.h>
 
 #include <asm/cache.h>
diff -Nru a/include/asm-i386/elf.h b/include/asm-i386/elf.h
--- a/include/asm-i386/elf.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-i386/elf.h	2004-09-15 20:29:40 -07:00
@@ -123,7 +123,7 @@
  * An executable for which elf_read_implies_exec() returns TRUE will
  * have the READ_IMPLIES_EXEC personality flag set automatically.
  */
-#define elf_read_implies_exec_binary(ex, have_pt_gnu_stack)	(!(have_pt_gnu_stack))
+#define elf_read_implies_exec(ex, have_pt_gnu_stack)	(!(have_pt_gnu_stack))
 
 extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
 extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
diff -Nru a/include/asm-i386/io.h b/include/asm-i386/io.h
--- a/include/asm-i386/io.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-i386/io.h	2004-09-15 20:29:40 -07:00
@@ -45,6 +45,8 @@
 
 #ifdef __KERNEL__
 
+#include <asm-generic/iomap.h>
+
 #include <linux/vmalloc.h>
 
 /**
diff -Nru a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
--- a/include/asm-ia64/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ia64/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -6,7 +6,6 @@
  *	David Mosberger-Tang <davidm@hpl.hp.com>
  */
 #include <linux/config.h>
-#include <linux/device.h>
 #include <asm/machvec.h>
 
 #define dma_alloc_coherent	platform_dma_alloc_coherent
diff -Nru a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
--- a/include/asm-mips/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-mips/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -1,7 +1,6 @@
 #ifndef _ASM_DMA_MAPPING_H
 #define _ASM_DMA_MAPPING_H
 
-#include <linux/device.h>
 #include <asm/scatterlist.h>
 #include <asm/cache.h>
 
diff -Nru a/include/asm-ppc/dma-mapping.h b/include/asm-ppc/dma-mapping.h
--- a/include/asm-ppc/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -8,7 +8,6 @@
 #include <linux/config.h>
 /* need struct page definitions */
 #include <linux/mm.h>
-#include <linux/device.h>
 #include <asm/scatterlist.h>
 #include <asm/io.h>
 
diff -Nru a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h
--- a/include/asm-ppc64/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -8,7 +8,6 @@
 #define _ASM_DMA_MAPPING_H
 
 #include <linux/types.h>
-#include <linux/device.h>
 #include <linux/cache.h>
 /* need struct page definitions */
 #include <linux/mm.h>
diff -Nru a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
--- a/include/asm-ppc64/io.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/io.h	2004-09-15 20:29:40 -07:00
@@ -18,6 +18,8 @@
 #include <asm/memory.h>
 #include <asm/delay.h>
 
+#include <asm-generic/iomap.h>
+
 #define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 *)(p), (a), (c))
 #define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 *)(p), (a), (c))
 #define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 *)(p), (a), (c))
diff -Nru a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
--- a/include/asm-ppc64/memory.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/memory.h	2004-09-15 20:29:40 -07:00
@@ -56,14 +56,4 @@
 #define HMT_MEDIUM_HIGH "\tor   5,5,5           # medium high priority\n"
 #define HMT_HIGH	"\tor	3,3,3		# high priority\n"
 
-/* 
- * Various operational modes for SMT
- * Off    : never run threaded
- * On     : always run threaded
- * Dynamic: Allow the system to switch modes as needed
- */
-#define SMT_OFF      0
-#define SMT_ON       1
-#define SMT_DYNAMIC  2
-
 #endif
diff -Nru a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
--- a/include/asm-ppc64/mmu.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/mmu.h	2004-09-15 20:29:40 -07:00
@@ -15,6 +15,7 @@
 
 #include <linux/config.h>
 #include <asm/page.h>
+#include <linux/stringify.h>
 
 #ifndef __ASSEMBLY__
 
@@ -215,12 +216,44 @@
 #define SLB_VSID_KERNEL		(SLB_VSID_KP|SLB_VSID_C)
 #define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS)
 
-#define VSID_RANDOMIZER ASM_CONST(42470972311)
-#define VSID_MASK	0xfffffffffUL
-/* Because we never access addresses below KERNELBASE as kernel
- * addresses, this VSID is never used for anything real, and will
- * never have pages hashed into it */
-#define BAD_VSID	ASM_CONST(0)
+#define VSID_MULTIPLIER	ASM_CONST(268435399)	/* largest 28-bit prime */
+#define VSID_BITS	36
+#define VSID_MODULUS	((1UL<<VSID_BITS)-1)
+
+#define CONTEXT_BITS	20
+#define USER_ESID_BITS	15
+
+/*
+ * This macro generates asm code to compute the VSID scramble
+ * function.  Used in slb_allocate() and do_stab_bolted.  The function
+ * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
+ *
+ *	rt = register continaing the proto-VSID and into which the
+ *		VSID will be stored
+ *	rx = scratch register (clobbered)
+ *
+ * 	- rt and rx must be different registers
+ * 	- The answer will end up in the low 36 bits of rt.  The higher
+ * 	  bits may contain other garbage, so you may need to mask the
+ * 	  result.
+ */
+#define ASM_VSID_SCRAMBLE(rt, rx)	\
+	lis	rx,VSID_MULTIPLIER@h;					\
+	ori	rx,rx,VSID_MULTIPLIER@l;				\
+	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
+									\
+	srdi	rx,rt,VSID_BITS;					\
+	clrldi	rt,rt,(64-VSID_BITS);					\
+	add	rt,rt,rx;		/* add high and low bits */	\
+	/* Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
+	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
+	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
+	 * the bit clear, r3 already has the answer we want, if it	\
+	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
+	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
+	addi	rx,rt,1;						\
+	srdi	rx,rx,VSID_BITS;	/* extract 2^36 bit */		\
+	add	rt,rt,rx
 
 /* Block size masks */
 #define BL_128K	0x000
diff -Nru a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
--- a/include/asm-ppc64/mmu_context.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/mmu_context.h	2004-09-15 20:29:40 -07:00
@@ -34,7 +34,7 @@
 }
 
 #define NO_CONTEXT		0
-#define FIRST_USER_CONTEXT	0x10    /* First 16 reserved for kernel */
+#define FIRST_USER_CONTEXT	1
 #define LAST_USER_CONTEXT	0x8000  /* Same as PID_MAX for now... */
 #define NUM_USER_CONTEXT	(LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
 
@@ -181,46 +181,87 @@
 	local_irq_restore(flags);
 }
 
-/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
+/* VSID allocation
+ * ===============
+ *
+ * We first generate a 36-bit "proto-VSID".  For kernel addresses this
+ * is equal to the ESID, for user addresses it is:
+ *	(context << 15) | (esid & 0x7fff)
+ *
+ * The two forms are distinguishable because the top bit is 0 for user
+ * addresses, whereas the top two bits are 1 for kernel addresses.
+ * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
+ * now.
+ *
+ * The proto-VSIDs are then scrambled into real VSIDs with the
+ * multiplicative hash:
+ *
+ *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
+ *	where	VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
+ *		VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
+ *
+ * This scramble is only well defined for proto-VSIDs below
+ * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
+ * reserved.  VSID_MULTIPLIER is prime (the largest 28-bit prime, in
+ * fact), so in particular it is co-prime to VSID_MODULUS, making this
+ * a 1:1 scrambling function.  Because the modulus is 2^n-1 we can
+ * compute it efficiently without a divide or extra multiply (see
+ * below).
+ *
+ * This scheme has several advantages over older methods:
+ *
+ * 	- We have VSIDs allocated for every kernel address
+ * (i.e. everything above 0xC000000000000000), except the very top
+ * segment, which simplifies several things.
+ *
+ * 	- We allow for 15 significant bits of ESID and 20 bits of
+ * context for user addresses.  i.e. 8T (43 bits) of address space for
+ * up to 1M contexts (although the page table structure and context
+ * allocation will need changes to take advantage of this).
+ *
+ * 	- The scramble function gives robust scattering in the hash
+ * table (at least based on some initial results).  The previous
+ * method was more susceptible to pathological cases giving excessive
+ * hash collisions.
  */
-static inline unsigned long
-get_kernel_vsid( unsigned long ea )
+
+/*
+ * WARNING - If you change these you must make sure the asm
+ * implementations in slb_allocate(), do_stab_bolted and mmu.h
+ * (ASM_VSID_SCRAMBLE macro) are changed accordingly.
+ *
+ * You'll also need to change the precomputed VSID values in head.S
+ * which are used by the iSeries firmware.
+ */
+
+static inline unsigned long vsid_scramble(unsigned long protovsid)
+{
+#if 0
+	/* The code below is equivalent to this function for arguments
+	 * < 2^VSID_BITS, which is all this should ever be called
+	 * with.  However gcc is not clever enough to compute the
+	 * modulus (2^n-1) without a second multiply. */
+	return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
+#else /* 1 */
+	unsigned long x;
+
+	x = protovsid * VSID_MULTIPLIER;
+	x = (x >> VSID_BITS) + (x & VSID_MODULUS);
+	return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
+#endif /* 1 */
+}
+
+/* This is only valid for addresses >= KERNELBASE */
+static inline unsigned long get_kernel_vsid(unsigned long ea)
 {
-	unsigned long ordinal, vsid;
-	
-	ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | (ea >> 60);
-	vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
-
-#ifdef HTABSTRESS
-	/* For debug, this path creates a very poor vsid distribuition.
-	 * A user program can access virtual addresses in the form
-	 * 0x0yyyyxxxx000 where yyyy = xxxx to cause multiple mappings
-	 * to hash to the same page table group.
-	 */
-	ordinal = ((ea >> 28) & 0x1fff) | (ea >> 44);
-	vsid = ordinal & VSID_MASK;
-#endif /* HTABSTRESS */
-
-	return vsid;
-} 
-
-/* This is only valid for user EA's (user EA's do not exceed 2^41 (EADDR_SIZE))
- */
-static inline unsigned long
-get_vsid( unsigned long context, unsigned long ea )
-{
-	unsigned long ordinal, vsid;
-
-	ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | context;
-	vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
-
-#ifdef HTABSTRESS
-	/* See comment above. */
-	ordinal = ((ea >> 28) & 0x1fff) | (context << 16);
-	vsid = ordinal & VSID_MASK;
-#endif /* HTABSTRESS */
+	return vsid_scramble(ea >> SID_SHIFT);
+}
 
-	return vsid;
+/* This is only valid for user addresses (which are below 2^41) */
+static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
+{
+	return vsid_scramble((context << USER_ESID_BITS)
+			     | (ea >> SID_SHIFT));
 }
 
 #endif /* __PPC64_MMU_CONTEXT_H */
diff -Nru a/include/asm-ppc64/naca.h b/include/asm-ppc64/naca.h
--- a/include/asm-ppc64/naca.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/naca.h	2004-09-15 20:29:40 -07:00
@@ -37,9 +37,6 @@
 	u32 dCacheL1LinesPerPage;	/* L1 d-cache lines / page   0x64 */
 	u32 iCacheL1LogLineSize;	/* L1 i-cache line size Log2 0x68 */
 	u32 iCacheL1LinesPerPage;	/* L1 i-cache lines / page   0x6c */
-	u8  smt_state;                  /* 0 = SMT off               0x70 */
-	                                /* 1 = SMT on                     */
-	                                /* 2 = SMT dynamic                */
 	u8  resv0[15];                  /* Reserved           0x71 - 0x7F */
 };
 
diff -Nru a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
--- a/include/asm-ppc64/page.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/page.h	2004-09-15 20:29:40 -07:00
@@ -212,17 +212,6 @@
 #define USER_REGION_ID     (0UL)
 #define REGION_ID(X)	   (((unsigned long)(X))>>REGION_SHIFT)
 
-/*
- * Define valid/invalid EA bits (for all ranges)
- */
-#define VALID_EA_BITS   (0x000001ffffffffffUL)
-#define INVALID_EA_BITS (~(REGION_MASK|VALID_EA_BITS))
-
-#define IS_VALID_REGION_ID(x) \
-        (((x) == USER_REGION_ID) || ((x) >= KERNEL_REGION_ID))
-#define IS_VALID_EA(x) \
-        ((!((x) & INVALID_EA_BITS)) && IS_VALID_REGION_ID(REGION_ID(x)))
-
 #define __bpn_to_ba(x) ((((unsigned long)(x))<<PAGE_SHIFT) + KERNELBASE)
 #define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
 
diff -Nru a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
--- a/include/asm-ppc64/pgtable.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/pgtable.h	2004-09-15 20:29:40 -07:00
@@ -45,10 +45,16 @@
                     PGD_INDEX_SIZE + PAGE_SHIFT) 
 
 /*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EA_BITS	41
+#define PGTABLE_EA_MASK	((1UL<<PGTABLE_EA_BITS)-1)
+
+/*
  * Define the address range of the vmalloc VM area.
  */
 #define VMALLOC_START (0xD000000000000000ul)
-#define VMALLOC_END   (VMALLOC_START + VALID_EA_BITS)
+#define VMALLOC_END   (VMALLOC_START + PGTABLE_EA_MASK)
 
 /*
  * Define the address range of the imalloc VM area.
@@ -58,19 +64,19 @@
 #define IMALLOC_VMADDR(x) ((unsigned long)(x))
 #define PHBS_IO_BASE  	  (0xE000000000000000ul)	/* Reserve 2 gigs for PHBs */
 #define IMALLOC_BASE      (0xE000000080000000ul)  
-#define IMALLOC_END       (IMALLOC_BASE + VALID_EA_BITS)
+#define IMALLOC_END       (IMALLOC_BASE + PGTABLE_EA_MASK)
 
 /*
  * Define the address range mapped virt <-> physical
  */
 #define KRANGE_START KERNELBASE
-#define KRANGE_END   (KRANGE_START + VALID_EA_BITS)
+#define KRANGE_END   (KRANGE_START + PGTABLE_EA_MASK)
 
 /*
  * Define the user address range
  */
 #define USER_START (0UL)
-#define USER_END   (USER_START + VALID_EA_BITS)
+#define USER_END   (USER_START + PGTABLE_EA_MASK)
 
 
 /*
diff -Nru a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h
--- a/include/asm-ppc64/smp.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/smp.h	2004-09-15 20:29:40 -07:00
@@ -65,6 +65,8 @@
 #define set_hard_smp_processor_id(CPU, VAL) \
 	do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
 
+extern int smt_enabled_at_boot;
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* !(_PPC64_SMP_H) */
diff -Nru a/include/asm-ppc64/unistd.h b/include/asm-ppc64/unistd.h
--- a/include/asm-ppc64/unistd.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-ppc64/unistd.h	2004-09-15 20:29:40 -07:00
@@ -269,9 +269,9 @@
 /* Number 256 is reserved for sys_debug_setcontext */
 /* Number 257 is reserved for vserver */
 /* Number 258 is reserved for new sys_remap_file_pages */
-/* Number 259 is reserved for new sys_mbind */
-/* Number 260 is reserved for new sys_get_mempolicy */
-/* Number 261 is reserved for new sys_set_mempolicy */
+#define __NR_mbind		259
+#define __NR_get_mempolicy	260
+#define __NR_set_mempolicy	261
 #define __NR_mq_open		262
 #define __NR_mq_unlink		263
 #define __NR_mq_timedsend	264
diff -Nru a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
--- a/include/asm-sh/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-sh/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -3,7 +3,6 @@
 
 #include <linux/config.h>
 #include <linux/mm.h>
-#include <linux/device.h>
 #include <asm/scatterlist.h>
 #include <asm/io.h>
 
diff -Nru a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
--- a/include/asm-sh64/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-sh64/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -3,7 +3,6 @@
 
 #include <linux/config.h>
 #include <linux/mm.h>
-#include <linux/device.h>
 #include <asm/scatterlist.h>
 #include <asm/io.h>
 
diff -Nru a/include/asm-sh64/softirq.h b/include/asm-sh64/softirq.h
--- a/include/asm-sh64/softirq.h	2004-09-15 20:29:40 -07:00
+++ /dev/null	Wed Dec 31 16:00:00 196900
@@ -1,30 +0,0 @@
-#ifndef __ASM_SH_SOFTIRQ_H
-#define __ASM_SH_SOFTIRQ_H
-
-#include <asm/atomic.h>
-#include <asm/hardirq.h>
-
-#define local_bh_disable()			\
-do {						\
-	local_bh_count(smp_processor_id())++;	\
-	barrier();				\
-} while (0)
-
-#define __local_bh_enable()			\
-do {						\
-	barrier();				\
-	local_bh_count(smp_processor_id())--;	\
-} while (0)
-
-#define local_bh_enable()				\
-do {							\
-	barrier();					\
-	if (!--local_bh_count(smp_processor_id())	\
-	    && softirq_pending(smp_processor_id())) {	\
-		do_softirq();				\
-	}						\
-} while (0)
-
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
-
-#endif /* __ASM_SH_SOFTIRQ_H */
diff -Nru a/include/asm-sparc/dma-mapping.h b/include/asm-sparc/dma-mapping.h
--- a/include/asm-sparc/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-sparc/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -2,7 +2,6 @@
 #define _ASM_SPARC_DMA_MAPPING_H
 
 #include <linux/config.h>
-#include <linux/device.h>
 
 #ifdef CONFIG_PCI
 #include <asm-generic/dma-mapping.h>
diff -Nru a/include/asm-sparc64/ebus.h b/include/asm-sparc64/ebus.h
--- a/include/asm-sparc64/ebus.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-sparc64/ebus.h	2004-09-15 20:29:40 -07:00
@@ -53,7 +53,7 @@
 
 struct ebus_dma_info {
 	spinlock_t	lock;
-	unsigned long	regs;
+	void __iomem	*regs;
 
 	unsigned int	flags;
 #define EBUS_DMA_FLAG_USE_EBDMA_HANDLER		0x00000001
diff -Nru a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
--- a/include/asm-sparc64/io.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-sparc64/io.h	2004-09-15 20:29:40 -07:00
@@ -3,6 +3,7 @@
 #define __SPARC64_IO_H
 
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 #include <linux/types.h>
 
 #include <asm/page.h>      /* IO address mapping routines need this */
@@ -107,20 +108,17 @@
 extern void insl(unsigned long addr, void *dst, unsigned long count);
 
 /* Memory functions, same as I/O accesses on Ultra. */
-static __inline__ u8 _readb(unsigned long addr)
-{
-	u8 ret;
+static inline u8 _readb(void __iomem *addr)
+{	u8 ret;
 
 	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
 			     : "=r" (ret)
 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
-
 	return ret;
 }
 
-static __inline__ u16 _readw(unsigned long addr)
-{
-	u16 ret;
+static inline u16 _readw(void __iomem *addr)
+{	u16 ret;
 
 	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
 			     : "=r" (ret)
@@ -129,9 +127,8 @@
 	return ret;
 }
 
-static __inline__ u32 _readl(unsigned long addr)
-{
-	u32 ret;
+static inline u32 _readl(void __iomem *addr)
+{	u32 ret;
 
 	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
 			     : "=r" (ret)
@@ -140,9 +137,8 @@
 	return ret;
 }
 
-static __inline__ u64 _readq(unsigned long addr)
-{
-	u64 ret;
+static inline u64 _readq(void __iomem *addr)
+{	u64 ret;
 
 	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
 			     : "=r" (ret)
@@ -151,46 +147,46 @@
 	return ret;
 }
 
-static __inline__ void _writeb(u8 b, unsigned long addr)
+static inline void _writeb(u8 b, void __iomem *addr)
 {
 	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
 			     : /* no outputs */
 			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
 }
 
-static __inline__ void _writew(u16 w, unsigned long addr)
+static inline void _writew(u16 w, void __iomem *addr)
 {
 	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
 			     : /* no outputs */
 			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
 }
 
-static __inline__ void _writel(u32 l, unsigned long addr)
+static inline void _writel(u32 l, void __iomem *addr)
 {
 	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
 			     : /* no outputs */
 			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
 }
 
-static __inline__ void _writeq(u64 q, unsigned long addr)
+static inline void _writeq(u64 q, void __iomem *addr)
 {
 	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
 			     : /* no outputs */
 			     : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L));
 }
 
-#define readb(__addr)		(_readb((unsigned long)(__addr)))
-#define readw(__addr)		(_readw((unsigned long)(__addr)))
-#define readl(__addr)		(_readl((unsigned long)(__addr)))
-#define readq(__addr)		(_readq((unsigned long)(__addr)))
-#define readb_relaxed(a)	readb(a)
-#define readw_relaxed(a)	readw(a)
-#define readl_relaxed(a)	readl(a)
-#define readq_relaxed(a)	readq(a)
-#define writeb(__b, __addr)	(_writeb((u8)(__b), (unsigned long)(__addr)))
-#define writew(__w, __addr)	(_writew((u16)(__w), (unsigned long)(__addr)))
-#define writel(__l, __addr)	(_writel((u32)(__l), (unsigned long)(__addr)))
-#define writeq(__q, __addr)	(_writeq((u64)(__q), (unsigned long)(__addr)))
+#define readb(__addr)		_readb(__addr)
+#define readw(__addr)		_readw(__addr)
+#define readl(__addr)		_readl(__addr)
+#define readq(__addr)		_readq(__addr)
+#define readb_relaxed(__addr)	_readb(__addr)
+#define readw_relaxed(__addr)	_readw(__addr)
+#define readl_relaxed(__addr)	_readl(__addr)
+#define readq_relaxed(__addr)	_readq(__addr)
+#define writeb(__b, __addr)	_writeb(__b, __addr)
+#define writew(__w, __addr)	_writew(__w, __addr)
+#define writel(__l, __addr)	_writel(__l, __addr)
+#define writeq(__q, __addr)	_writeq(__q, __addr)
 
 /* Now versions without byte-swapping. */
 static __inline__ u8 _raw_readb(unsigned long addr)
@@ -282,7 +278,7 @@
 /* Now, SBUS variants, only difference from PCI is that we do
  * not use little-endian ASIs.
  */
-static __inline__ u8 _sbus_readb(unsigned long addr)
+static inline u8 _sbus_readb(void __iomem *addr)
 {
 	u8 ret;
 
@@ -293,7 +289,7 @@
 	return ret;
 }
 
-static __inline__ u16 _sbus_readw(unsigned long addr)
+static inline u16 _sbus_readw(void __iomem *addr)
 {
 	u16 ret;
 
@@ -304,7 +300,7 @@
 	return ret;
 }
 
-static __inline__ u32 _sbus_readl(unsigned long addr)
+static inline u32 _sbus_readl(void __iomem *addr)
 {
 	u32 ret;
 
@@ -315,7 +311,7 @@
 	return ret;
 }
 
-static __inline__ u64 _sbus_readq(unsigned long addr)
+static inline u64 _sbus_readq(void __iomem *addr)
 {
 	u64 ret;
 
@@ -326,44 +322,45 @@
 	return ret;
 }
 
-static __inline__ void _sbus_writeb(u8 b, unsigned long addr)
+static inline void _sbus_writeb(u8 b, void __iomem *addr)
 {
 	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */"
 			     : /* no outputs */
 			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
 }
 
-static __inline__ void _sbus_writew(u16 w, unsigned long addr)
+static inline void _sbus_writew(u16 w, void __iomem *addr)
 {
 	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */"
 			     : /* no outputs */
 			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
 }
 
-static __inline__ void _sbus_writel(u32 l, unsigned long addr)
+static inline void _sbus_writel(u32 l, void __iomem *addr)
 {
 	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */"
 			     : /* no outputs */
 			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
 }
 
-static __inline__ void _sbus_writeq(u64 l, unsigned long addr)
+static inline void _sbus_writeq(u64 l, void __iomem *addr)
 {
 	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */"
 			     : /* no outputs */
 			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
 }
 
-#define sbus_readb(__addr)		(_sbus_readb((unsigned long)(__addr)))
-#define sbus_readw(__addr)		(_sbus_readw((unsigned long)(__addr)))
-#define sbus_readl(__addr)		(_sbus_readl((unsigned long)(__addr)))
-#define sbus_readq(__addr)		(_sbus_readq((unsigned long)(__addr)))
-#define sbus_writeb(__b, __addr)	(_sbus_writeb((__b), (unsigned long)(__addr)))
-#define sbus_writew(__w, __addr)	(_sbus_writew((__w), (unsigned long)(__addr)))
-#define sbus_writel(__l, __addr)	(_sbus_writel((__l), (unsigned long)(__addr)))
-#define sbus_writeq(__l, __addr)	(_sbus_writeq((__l), (unsigned long)(__addr)))
+#define sbus_readb(__addr)		_sbus_readb(__addr)
+#define sbus_readw(__addr)		_sbus_readw(__addr)
+#define sbus_readl(__addr)		_sbus_readl(__addr)
+#define sbus_readq(__addr)		_sbus_readq(__addr)
+#define sbus_writeb(__b, __addr)	_sbus_writeb(__b, __addr)
+#define sbus_writew(__w, __addr)	_sbus_writew(__w, __addr)
+#define sbus_writel(__l, __addr)	_sbus_writel(__l, __addr)
+#define sbus_writeq(__l, __addr)	_sbus_writeq(__l, __addr)
 
-static inline void *_sbus_memset_io(unsigned long dst, int c, __kernel_size_t n)
+static inline void __iomem*_sbus_memset_io(void __iomem *dst, int c,
+					   __kernel_size_t n)
 {
 	while(n--) {
 		sbus_writeb(c, dst);
@@ -372,13 +369,12 @@
 	return (void *) dst;
 }
 
-#define sbus_memset_io(d,c,sz)	\
-	_sbus_memset_io((unsigned long)d,(int)c,(__kernel_size_t)sz)
+#define sbus_memset_io(d,c,sz)	_sbus_memset_io(d,c,sz)
 
-static inline void *
-_memset_io(void *dst, int c, __kernel_size_t n)
+static inline void __iomem *
+_memset_io(void __iomem *dst, int c, __kernel_size_t n)
 {
-	char *d = dst;
+	void __iomem *d = dst;
 
 	while (n--) {
 		writeb(c, d);
@@ -388,11 +384,10 @@
 	return dst;
 }
 
-#define memset_io(d,c,sz)	\
-	_memset_io((void *)d,(int)c,(__kernel_size_t)sz)
+#define memset_io(d,c,sz)	_memset_io(d,c,sz)
 
-static inline void *
-_memcpy_fromio(void *dst, unsigned long src, __kernel_size_t n)
+static inline void __iomem *
+_memcpy_fromio(void *dst, void __iomem *src, __kernel_size_t n)
 {
 	char *d = dst;
 
@@ -405,25 +400,23 @@
 	return dst;
 }
 
-#define memcpy_fromio(d,s,sz)	\
-	_memcpy_fromio((void *)d,(unsigned long)s,(__kernel_size_t)sz)
+#define memcpy_fromio(d,s,sz)	_memcpy_fromio(d,s,sz)
 
-static inline void *
-_memcpy_toio(unsigned long dst, const void *src, __kernel_size_t n)
+static inline void __iomem *
+_memcpy_toio(void __iomem *dst, const void *src, __kernel_size_t n)
 {
 	const char *s = src;
-	unsigned long d = dst;
+	void __iomem *d = dst;
 
 	while (n--) {
 		char tmp = *s++;
 		writeb(tmp, d);
 		d++;
 	}
-	return (void *)dst;
+	return dst;
 }
 
-#define memcpy_toio(d,s,sz)	\
-	_memcpy_toio((unsigned long)d,(const void *)s,(__kernel_size_t)sz)
+#define memcpy_toio(d,s,sz)	_memcpy_toio(d,s,sz)
 
 static inline int check_signature(unsigned long io_addr,
 				  const unsigned char *signature,
@@ -431,8 +424,9 @@
 {
 	int retval = 0;
 	do {
-		if (readb(io_addr++) != *signature++)
+		if (readb((void __iomem *)io_addr) != *signature++)
 			goto out;
+		io_addr++;
 	} while (--length);
 	retval = 1;
 out:
@@ -444,10 +438,26 @@
 /* On sparc64 we have the whole physical IO address space accessible
  * using physically addressed loads and stores, so this does nothing.
  */
-#define ioremap(__offset, __size)	((void *)(__offset))
+#define ioremap(__offset, __size)	((void __iomem *)(__offset))
 #define ioremap_nocache(X,Y)		ioremap((X),(Y))
 #define iounmap(__addr)			do { (void)(__addr); } while(0)
 
+#define ioread8(X)			readb(X)
+#define ioread16(X)			readw(X)
+#define ioread32(X)			readl(X)
+#define iowrite8(val,X)			writeb(val,X)
+#define iowrite16(val,X)		writew(val,X)
+#define iowrite32(val,X)		writel(val,X)
+
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
 /* Similarly for SBUS. */
 #define sbus_ioremap(__res, __offset, __size, __name) \
 ({	unsigned long __ret; \
@@ -455,11 +465,11 @@
 	__ret += (unsigned long) (__offset); \
 	if (! request_region((__ret), (__size), (__name))) \
 		__ret = 0UL; \
-	__ret; \
+	(void __iomem *) __ret; \
 })
 
 #define sbus_iounmap(__addr, __size)	\
-	release_region((__addr), (__size))
+	release_region((unsigned long)(__addr), (__size))
 
 /* Nothing to do */
 
diff -Nru a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h
--- a/include/asm-um/processor-generic.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-um/processor-generic.h	2004-09-15 20:29:40 -07:00
@@ -22,7 +22,6 @@
 
 struct thread_struct {
 	int forking;
-	unsigned long kernel_stack;
 	int nsyscalls;
 	struct pt_regs regs;
 	unsigned long cr2;
@@ -73,7 +72,6 @@
 #define INIT_THREAD \
 { \
 	.forking		= 0, \
-	.kernel_stack		= 0, \
 	.nsyscalls		= 0, \
         .regs		   	= EMPTY_REGS, \
 	.cr2			= 0, \
diff -Nru a/include/asm-um/smp.h b/include/asm-um/smp.h
--- a/include/asm-um/smp.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-um/smp.h	2004-09-15 20:29:40 -07:00
@@ -9,6 +9,8 @@
 #include "linux/cpumask.h"
 
 extern cpumask_t cpu_online_map;
+extern cpumask_t cpu_possible_map;
+
 
 #define smp_processor_id() (current_thread->cpu)
 #define cpu_logical_map(n) (n)
diff -Nru a/include/asm-um/spinlock.h b/include/asm-um/spinlock.h
--- /dev/null	Wed Dec 31 16:00:00 196900
+++ b/include/asm-um/spinlock.h	2004-09-15 20:29:40 -07:00
@@ -0,0 +1,6 @@
+#ifndef __UM_SPINLOCK_H
+#define __UM_SPINLOCK_H
+
+#include "asm/arch/spinlock.h"
+
+#endif
diff -Nru a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
--- a/include/asm-x86_64/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/asm-x86_64/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -7,7 +7,6 @@
  */
 
 #include <linux/config.h>
-#include <linux/device.h>
 
 #include <asm/scatterlist.h>
 #include <asm/io.h>
diff -Nru a/include/linux/blkdev.h b/include/linux/blkdev.h
--- a/include/linux/blkdev.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/blkdev.h	2004-09-15 20:29:40 -07:00
@@ -344,6 +344,7 @@
 	unsigned int		nr_congestion_off;
 
 	unsigned short		max_sectors;
+	unsigned short		max_hw_sectors;
 	unsigned short		max_phys_segments;
 	unsigned short		max_hw_segments;
 	unsigned short		hardsect_size;
diff -Nru a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
--- a/include/linux/dma-mapping.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/dma-mapping.h	2004-09-15 20:29:40 -07:00
@@ -1,6 +1,7 @@
 #ifndef _ASM_LINUX_DMA_MAPPING_H
 #define _ASM_LINUX_DMA_MAPPING_H
 
+#include <linux/device.h>
 #include <linux/err.h>
 
 /* These definitions mirror those in pci.h, so they can be used
diff -Nru a/include/linux/genhd.h b/include/linux/genhd.h
--- a/include/linux/genhd.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/genhd.h	2004-09-15 20:29:40 -07:00
@@ -249,7 +249,7 @@
 /* check against BSD src/sys/sys/disklabel.h for consistency */
 
 #define BSD_DISKMAGIC	(0x82564557UL)	/* The disk magic number */
-#define BSD_MAXPARTITIONS	8
+#define BSD_MAXPARTITIONS	16
 #define OPENBSD_MAXPARTITIONS	16
 #define BSD_FS_UNUSED		0	/* disklabel unused partition entry ID */
 struct bsd_disklabel {
diff -Nru a/include/linux/ghash.h b/include/linux/ghash.h
--- a/include/linux/ghash.h	2004-09-15 20:29:40 -07:00
+++ /dev/null	Wed Dec 31 16:00:00 196900
@@ -1,236 +0,0 @@
-/*
- * include/linux/ghash.h -- generic hashing with fuzzy retrieval
- *
- * (C) 1997 Thomas Schoebel-Theuer
- *
- * The algorithms implemented here seem to be a completely new invention,
- * and I'll publish the fundamentals in a paper.
- */
-
-#ifndef _GHASH_H
-#define _GHASH_H
-/* HASHSIZE _must_ be a power of two!!! */
-
-
-#define DEF_HASH_FUZZY_STRUCTS(NAME,HASHSIZE,TYPE) \
-\
-struct NAME##_table {\
-	TYPE * hashtable[HASHSIZE];\
-	TYPE * sorted_list;\
-	int nr_entries;\
-};\
-\
-struct NAME##_ptrs {\
-	TYPE * next_hash;\
-	TYPE * prev_hash;\
-	TYPE * next_sorted;\
-	TYPE * prev_sorted;\
-};
-
-#define DEF_HASH_FUZZY(LINKAGE,NAME,HASHSIZE,TYPE,PTRS,KEYTYPE,KEY,KEYCMP,KEYEQ,HASHFN)\
-\
-LINKAGE void insert_##NAME##_hash(struct NAME##_table * tbl, TYPE * elem)\
-{\
-	int ix = HASHFN(elem->KEY);\
-	TYPE ** base = &tbl->hashtable[ix];\
-	TYPE * ptr = *base;\
-	TYPE * prev = NULL;\
-\
-	tbl->nr_entries++;\
-	while(ptr && KEYCMP(ptr->KEY, elem->KEY)) {\
-		base = &ptr->PTRS.next_hash;\
-		prev = ptr;\
-		ptr = *base;\
-	}\
-	elem->PTRS.next_hash = ptr;\
-	elem->PTRS.prev_hash = prev;\
-	if(ptr) {\
-		ptr->PTRS.prev_hash = elem;\
-	}\
-	*base = elem;\
-\
-	ptr = prev;\
-	if(!ptr) {\
-		ptr = tbl->sorted_list;\
-		prev = NULL;\
-	} else {\
-		prev = ptr->PTRS.prev_sorted;\
-	}\
-	while(ptr) {\
-		TYPE * next = ptr->PTRS.next_hash;\
-		if(next && KEYCMP(next->KEY, elem->KEY)) {\
-			prev = ptr;\
-			ptr = next;\
-		} else if(KEYCMP(ptr->KEY, elem->KEY)) {\
-			prev = ptr;\
-			ptr = ptr->PTRS.next_sorted;\
-		} else\
-			break;\
-	}\
-	elem->PTRS.next_sorted = ptr;\
-	elem->PTRS.prev_sorted = prev;\
-	if(ptr) {\
-		ptr->PTRS.prev_sorted = elem;\
-	}\
-	if(prev) {\
-		prev->PTRS.next_sorted = elem;\
-	} else {\
-		tbl->sorted_list = elem;\
-	}\
-}\
-\
-LINKAGE void remove_##NAME##_hash(struct NAME##_table * tbl, TYPE * elem)\
-{\
-	TYPE * next = elem->PTRS.next_hash;\
-	TYPE * prev = elem->PTRS.prev_hash;\
-\
-	tbl->nr_entries--;\
-	if(next)\
-		next->PTRS.prev_hash = prev;\
-	if(prev)\
-		prev->PTRS.next_hash = next;\
-	else {\
-		int ix = HASHFN(elem->KEY);\
-		tbl->hashtable[ix] = next;\
-	}\
-\
-	next = elem->PTRS.next_sorted;\
-	prev = elem->PTRS.prev_sorted;\
-	if(next)\
-		next->PTRS.prev_sorted = prev;\
-	if(prev)\
-		prev->PTRS.next_sorted = next;\
-	else\
-		tbl->sorted_list = next;\
-}\
-\
-LINKAGE TYPE * find_##NAME##_hash(struct NAME##_table * tbl, KEYTYPE pos)\
-{\
-	int ix = hashfn(pos);\
-	TYPE * ptr = tbl->hashtable[ix];\
-	while(ptr && KEYCMP(ptr->KEY, pos))\
-		ptr = ptr->PTRS.next_hash;\
-	if(ptr && !KEYEQ(ptr->KEY, pos))\
-		ptr = NULL;\
-	return ptr;\
-}\
-\
-LINKAGE TYPE * find_##NAME##_hash_fuzzy(struct NAME##_table * tbl, KEYTYPE pos)\
-{\
-	int ix;\
-	int offset;\
-	TYPE * ptr;\
-	TYPE * next;\
-\
-	ptr = tbl->sorted_list;\
-	if(!ptr || KEYCMP(pos, ptr->KEY))\
-		return NULL;\
-	ix = HASHFN(pos);\
-	offset = HASHSIZE;\
-	do {\
-		offset >>= 1;\
-		next = tbl->hashtable[(ix+offset) & ((HASHSIZE)-1)];\
-		if(next && (KEYCMP(next->KEY, pos) || KEYEQ(next->KEY, pos))\
-		   && KEYCMP(ptr->KEY, next->KEY))\
-			ptr = next;\
-	} while(offset);\
-\
-	for(;;) {\
-		next = ptr->PTRS.next_hash;\
-		if(next) {\
-			if(KEYCMP(next->KEY, pos)) {\
-				ptr = next;\
-				continue;\
-			}\
-		}\
-		next = ptr->PTRS.next_sorted;\
-		if(next && KEYCMP(next->KEY, pos)) {\
-			ptr = next;\
-			continue;\
-		}\
-		return ptr;\
-	}\
-	return NULL;\
-}
-
-/* LINKAGE - empty or "static", depending on whether you want the definitions to
- *	be public or not
- * NAME - a string to stick in names to make this hash table type distinct from
- * 	any others
- * HASHSIZE - number of buckets
- * TYPE - type of data contained in the buckets - must be a structure, one
- * 	field is of type NAME_ptrs, another is the hash key
- * PTRS - TYPE must contain a field of type NAME_ptrs, PTRS is the name of that
- * 	field
- * KEYTYPE - type of the key field within TYPE
- * KEY - name of the key field within TYPE
- * KEYCMP - pointer to function that compares KEYTYPEs to each other - the
- * 	prototype is int KEYCMP(KEYTYPE, KEYTYPE), it returns zero for equal,
- * 	non-zero for not equal
- * HASHFN - the hash function - the prototype is int HASHFN(KEYTYPE),
- * 	it returns a number in the range 0 ... HASHSIZE - 1
- * Call DEF_HASH_STRUCTS, define your hash table as a NAME_table, then call
- * DEF_HASH.
- */
-
-#define DEF_HASH_STRUCTS(NAME,HASHSIZE,TYPE) \
-\
-struct NAME##_table {\
-	TYPE * hashtable[HASHSIZE];\
-	int nr_entries;\
-};\
-\
-struct NAME##_ptrs {\
-	TYPE * next_hash;\
-	TYPE * prev_hash;\
-};
-
-#define DEF_HASH(LINKAGE,NAME,TYPE,PTRS,KEYTYPE,KEY,KEYCMP,HASHFN)\
-\
-LINKAGE void insert_##NAME##_hash(struct NAME##_table * tbl, TYPE * elem)\
-{\
-	int ix = HASHFN(elem->KEY);\
-	TYPE ** base = &tbl->hashtable[ix];\
-	TYPE * ptr = *base;\
-	TYPE * prev = NULL;\
-\
-	tbl->nr_entries++;\
-	while(ptr && KEYCMP(ptr->KEY, elem->KEY)) {\
-		base = &ptr->PTRS.next_hash;\
-		prev = ptr;\
-		ptr = *base;\
-	}\
-	elem->PTRS.next_hash = ptr;\
-	elem->PTRS.prev_hash = prev;\
-	if(ptr) {\
-		ptr->PTRS.prev_hash = elem;\
-	}\
-	*base = elem;\
-}\
-\
-LINKAGE void remove_##NAME##_hash(struct NAME##_table * tbl, TYPE * elem)\
-{\
-	TYPE * next = elem->PTRS.next_hash;\
-	TYPE * prev = elem->PTRS.prev_hash;\
-\
-	tbl->nr_entries--;\
-	if(next)\
-		next->PTRS.prev_hash = prev;\
-	if(prev)\
-		prev->PTRS.next_hash = next;\
-	else {\
-		int ix = HASHFN(elem->KEY);\
-		tbl->hashtable[ix] = next;\
-	}\
-}\
-\
-LINKAGE TYPE * find_##NAME##_hash(struct NAME##_table * tbl, KEYTYPE pos)\
-{\
-	int ix = HASHFN(pos);\
-	TYPE * ptr = tbl->hashtable[ix];\
-	while(ptr && KEYCMP(ptr->KEY, pos))\
-		ptr = ptr->PTRS.next_hash;\
-	return ptr;\
-}
-
-#endif
diff -Nru a/include/linux/hardirq.h b/include/linux/hardirq.h
--- a/include/linux/hardirq.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/hardirq.h	2004-09-15 20:29:40 -07:00
@@ -2,9 +2,7 @@
 #define LINUX_HARDIRQ_H
 
 #include <linux/config.h>
-#ifdef CONFIG_PREEPT
 #include <linux/smp_lock.h>
-#endif
 #include <asm/hardirq.h>
 
 #define __IRQ_MASK(x)	((1UL << (x))-1)
@@ -28,9 +26,6 @@
 #define in_irq()		(hardirq_count())
 #define in_softirq()		(softirq_count())
 #define in_interrupt()		(irq_count())
-
-#define hardirq_trylock()	(!in_interrupt())
-#define hardirq_endlock()	do { } while (0)
 
 #ifdef CONFIG_PREEMPT
 # define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
diff -Nru a/include/linux/if_ether.h b/include/linux/if_ether.h
--- a/include/linux/if_ether.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/if_ether.h	2004-09-15 20:29:40 -07:00
@@ -59,6 +59,8 @@
 #define ETH_P_8021Q	0x8100          /* 802.1Q VLAN Extended Header  */
 #define ETH_P_IPX	0x8137		/* IPX over DIX			*/
 #define ETH_P_IPV6	0x86DD		/* IPv6 over bluebook		*/
+#define ETH_P_WCCP	0x883E		/* Web-cache coordination protocol
+					 * defined in draft-wilson-wrec-wccp-v2-00.txt */
 #define ETH_P_PPP_DISC	0x8863		/* PPPoE discovery messages     */
 #define ETH_P_PPP_SES	0x8864		/* PPPoE session messages	*/
 #define ETH_P_MPLS_UC	0x8847		/* MPLS Unicast traffic		*/
diff -Nru a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
--- a/include/linux/ip6_tunnel.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/ip6_tunnel.h	2004-09-15 20:29:40 -07:00
@@ -16,6 +16,8 @@
 #define IP6_TNL_F_USE_ORIG_FLOWLABEL 0x4
 /* being used for Mobile IPv6 */
 #define IP6_TNL_F_MIP6_DEV 0x8
+/* copy DSCP from the outer packet */
+#define IP6_TNL_F_RCV_DSCP_COPY 0x10
 
 struct ip6_tnl_parm {
 	char name[IFNAMSIZ];	/* name of tunnel device */
diff -Nru a/include/linux/prctl.h b/include/linux/prctl.h
--- a/include/linux/prctl.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/prctl.h	2004-09-15 20:29:40 -07:00
@@ -50,4 +50,6 @@
                                                    process timing */
 
 
+#define PR_SET_NAME    15		/* Set process name */
+
 #endif /* _LINUX_PRCTL_H */
diff -Nru a/include/linux/quota.h b/include/linux/quota.h
--- a/include/linux/quota.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/quota.h	2004-09-15 20:29:40 -07:00
@@ -45,7 +45,6 @@
 typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
 typedef __u64 qsize_t;          /* Type in which we store sizes */
 
-extern spinlock_t dq_list_lock;
 extern spinlock_t dq_data_lock;
 
 /* Size of blocks in which are counted size limits */
diff -Nru a/include/linux/raid/md.h b/include/linux/raid/md.h
--- a/include/linux/raid/md.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/raid/md.h	2004-09-15 20:29:40 -07:00
@@ -69,7 +69,6 @@
 extern void md_unregister_thread (mdk_thread_t *thread);
 extern void md_wakeup_thread(mdk_thread_t *thread);
 extern void md_check_recovery(mddev_t *mddev);
-extern void md_interrupt_thread (mdk_thread_t *thread);
 extern void md_write_start(mddev_t *mddev);
 extern void md_write_end(mddev_t *mddev);
 extern void md_handle_safemode(mddev_t *mddev);
diff -Nru a/include/linux/sched.h b/include/linux/sched.h
--- a/include/linux/sched.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/sched.h	2004-09-15 20:29:40 -07:00
@@ -1024,6 +1024,9 @@
 }
 #endif
 
+extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
+extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
+
 #endif /* __KERNEL__ */
 
 #endif
diff -Nru a/include/linux/serial_core.h b/include/linux/serial_core.h
--- a/include/linux/serial_core.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/serial_core.h	2004-09-15 20:29:40 -07:00
@@ -176,6 +176,7 @@
 #define UPIO_PORT		(0)
 #define UPIO_HUB6		(1)
 #define UPIO_MEM		(2)
+#define UPIO_MEM32		(3)
 
 	unsigned int		read_status_mask;	/* driver specific */
 	unsigned int		ignore_status_mask;	/* driver specific */
diff -Nru a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
--- a/include/linux/shmem_fs.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/shmem_fs.h	2004-09-15 20:29:40 -07:00
@@ -10,14 +10,14 @@
 
 struct shmem_inode_info {
 	spinlock_t		lock;
-	unsigned long		next_index;
-	swp_entry_t		i_direct[SHMEM_NR_DIRECT]; /* for the first blocks */
-	struct page	       *i_indirect; /* indirect blocks */
-	unsigned long		alloced;    /* data pages allocated to file */
-	unsigned long		swapped;    /* subtotal assigned to swap */
 	unsigned long		flags;
-	struct shared_policy     policy;
-	struct list_head	list;
+	unsigned long		alloced;	/* data pages alloced to file */
+	unsigned long		swapped;	/* subtotal assigned to swap */
+	unsigned long		next_index;	/* highest alloced index + 1 */
+	struct shared_policy	policy;		/* NUMA memory alloc policy */
+	struct page		*i_indirect;	/* top indirect blocks page */
+	swp_entry_t		i_direct[SHMEM_NR_DIRECT]; /* first blocks */
+	struct list_head	swaplist;	/* chain of maybes on swap */
 	struct inode		vfs_inode;
 };
 
diff -Nru a/include/linux/slab.h b/include/linux/slab.h
--- a/include/linux/slab.h	2004-09-15 20:29:40 -07:00
+++ b/include/linux/slab.h	2004-09-15 20:29:40 -07:00
@@ -55,7 +55,6 @@
 /* prototypes */
 extern void kmem_cache_init(void);
 
-extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags);
 extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
 				       void (*)(void *, kmem_cache_t *, unsigned long),
 				       void (*)(void *, kmem_cache_t *, unsigned long));
diff -Nru a/include/net/inet_ecn.h b/include/net/inet_ecn.h
--- a/include/net/inet_ecn.h	2004-09-15 20:29:40 -07:00
+++ b/include/net/inet_ecn.h	2004-09-15 20:29:40 -07:00
@@ -2,6 +2,7 @@
 #define _INET_ECN_H_
 
 #include <linux/ip.h>
+#include <net/dsfield.h>
 
 enum {
 	INET_ECN_NOT_ECT = 0,
@@ -91,6 +92,12 @@
 static inline void IP6_ECN_clear(struct ipv6hdr *iph)
 {
 	*(u32*)iph &= ~htonl(INET_ECN_MASK << 20);
+}
+
+static inline void ipv6_copy_dscp(struct ipv6hdr *outer, struct ipv6hdr *inner)
+{
+	u32 dscp = ipv6_get_dsfield(outer) & ~INET_ECN_MASK;
+	ipv6_change_dsfield(inner, INET_ECN_MASK, dscp);
 }
 
 #endif
diff -Nru a/include/net/ip6_route.h b/include/net/ip6_route.h
--- a/include/net/ip6_route.h	2004-09-15 20:29:40 -07:00
+++ b/include/net/ip6_route.h	2004-09-15 20:29:40 -07:00
@@ -92,6 +92,7 @@
 extern void			rt6_redirect(struct in6_addr *dest,
 					     struct in6_addr *saddr,
 					     struct neighbour *neigh,
+					     u8 *lladdr,
 					     int on_link);
 
 extern void			rt6_pmtu_discovery(struct in6_addr *daddr,
diff -Nru a/include/net/neighbour.h b/include/net/neighbour.h
--- a/include/net/neighbour.h	2004-09-15 20:29:40 -07:00
+++ b/include/net/neighbour.h	2004-09-15 20:29:40 -07:00
@@ -51,7 +51,7 @@
 #include <linux/err.h>
 #include <linux/sysctl.h>
 
-#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_DELAY|NUD_PROBE)
+#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 #define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
 #define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
 
@@ -179,6 +179,13 @@
 	struct pneigh_entry	*phash_buckets[PNEIGH_HASHMASK+1];
 };
 
+/* flags for neigh_update() */
+#define NEIGH_UPDATE_F_OVERRIDE			0x00000001
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE		0x00000002
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	0x00000004
+#define NEIGH_UPDATE_F_ISROUTER			0x40000000
+#define NEIGH_UPDATE_F_ADMIN			0x80000000
+
 extern void			neigh_table_init(struct neigh_table *tbl);
 extern int			neigh_table_clear(struct neigh_table *tbl);
 extern struct neighbour *	neigh_lookup(struct neigh_table *tbl,
@@ -189,7 +196,8 @@
 					     struct net_device *dev);
 extern void			neigh_destroy(struct neighbour *neigh);
 extern int			__neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
-extern int			neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp);
+extern int			neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 
+					     u32 flags);
 extern void			neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
 extern int			neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
 extern int			neigh_resolve_output(struct sk_buff *skb);
diff -Nru a/kernel/audit.c b/kernel/audit.c
--- a/kernel/audit.c	2004-09-15 20:29:40 -07:00
+++ b/kernel/audit.c	2004-09-15 20:29:40 -07:00
@@ -810,16 +810,3 @@
 		audit_log_end(ab);
 	}
 }
-
-EXPORT_SYMBOL_GPL(audit_set_rate_limit);
-EXPORT_SYMBOL_GPL(audit_set_backlog_limit);
-EXPORT_SYMBOL_GPL(audit_set_enabled);
-EXPORT_SYMBOL_GPL(audit_set_failure);
-
-EXPORT_SYMBOL_GPL(audit_log_start);
-EXPORT_SYMBOL_GPL(audit_log_format);
-EXPORT_SYMBOL_GPL(audit_log_end_irq);
-EXPORT_SYMBOL_GPL(audit_log_end_fast);
-EXPORT_SYMBOL_GPL(audit_log_end);
-EXPORT_SYMBOL_GPL(audit_log);
-EXPORT_SYMBOL_GPL(audit_log_d_path);
diff -Nru a/kernel/auditsc.c b/kernel/auditsc.c
--- a/kernel/auditsc.c	2004-09-15 20:29:40 -07:00
+++ b/kernel/auditsc.c	2004-09-15 20:29:40 -07:00
@@ -857,6 +857,7 @@
 	}
 #endif
 }
+EXPORT_SYMBOL(audit_putname);
 
 /* Store the inode and device from a lookup.  Called from
  * fs/namei.c:path_lookup(). */
@@ -913,11 +914,3 @@
 	}
 	return 0;
 }
-
-EXPORT_SYMBOL_GPL(audit_alloc);
-EXPORT_SYMBOL_GPL(audit_free);
-EXPORT_SYMBOL_GPL(audit_syscall_entry);
-EXPORT_SYMBOL_GPL(audit_syscall_exit);
-EXPORT_SYMBOL_GPL(audit_getname);
-EXPORT_SYMBOL_GPL(audit_putname);
-EXPORT_SYMBOL_GPL(audit_inode);
diff -Nru a/kernel/compat.c b/kernel/compat.c
--- a/kernel/compat.c	2004-09-15 20:29:40 -07:00
+++ b/kernel/compat.c	2004-09-15 20:29:40 -07:00
@@ -412,92 +412,58 @@
 	}
 }
 
-/*
- * for maximum compatability, we allow programs to use a single (compat)
- * unsigned long bitmask if all cpus will fit. If not, you have to have
- * at least the kernel size available.
- */
-#define USE_COMPAT_ULONG_CPUMASK (NR_CPUS <= BITS_PER_COMPAT_LONG)
-
-asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, 
-					     unsigned int len,
-					     compat_ulong_t __user *user_mask_ptr)
+static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
+				    unsigned len, cpumask_t *new_mask)
 {
-	cpumask_t kern_mask;
-	mm_segment_t old_fs;
-	int ret;
+	unsigned long *k;
 
-	if (USE_COMPAT_ULONG_CPUMASK) {
-		compat_ulong_t user_mask;
+	if (len < sizeof(cpumask_t))
+		memset(new_mask, 0, sizeof(cpumask_t));
+	else if (len > sizeof(cpumask_t))
+		len = sizeof(cpumask_t);
 
-		if (len < sizeof(user_mask))
-			return -EINVAL;
+	k = cpus_addr(*new_mask);
+	return compat_get_bitmap(k, user_mask_ptr, len * 8);
+}
 
-		if (get_user(user_mask, user_mask_ptr))
-			return -EFAULT;
+asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
+					     unsigned int len,
+					     compat_ulong_t __user *user_mask_ptr)
+{
+	cpumask_t new_mask;
+	int retval;
 
-		cpus_addr(kern_mask)[0] = user_mask;
-	} else {
-		unsigned long *k;
-
-		if (len < sizeof(kern_mask))
-			return -EINVAL;
-
-		k = cpus_addr(kern_mask);
-		ret = compat_get_bitmap(k, user_mask_ptr,
-					sizeof(kern_mask) * BITS_PER_LONG);
-		if (ret)
-			return ret;
-	}
-
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	ret = sys_sched_setaffinity(pid,
-				    sizeof(kern_mask),
-				    (unsigned long __user *) &kern_mask);
-	set_fs(old_fs);
+	retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask);
+	if (retval)
+		return retval;
 
-	return ret;
+	return sched_setaffinity(pid, new_mask);
 }
 
 asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
 					     compat_ulong_t __user *user_mask_ptr)
 {
-	cpumask_t kern_mask;
-	mm_segment_t old_fs;
 	int ret;
+	cpumask_t mask;
+	unsigned long *k;
+	unsigned int min_length = sizeof(cpumask_t);
 
-	if (len < (USE_COMPAT_ULONG_CPUMASK ? sizeof(compat_ulong_t)
-				: sizeof(kern_mask)))
-		return -EINVAL;
+	if (NR_CPUS <= BITS_PER_COMPAT_LONG)
+		min_length = sizeof(compat_ulong_t);
 
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	ret = sys_sched_getaffinity(pid,
-				    sizeof(kern_mask),
-				    (unsigned long __user *) &kern_mask);
-	set_fs(old_fs);
+	if (len < min_length)
+		return -EINVAL;
 
+	ret = sched_getaffinity(pid, &mask);
 	if (ret < 0)
 		return ret;
 
-	if (USE_COMPAT_ULONG_CPUMASK) {
-		if (put_user(&cpus_addr(kern_mask)[0], user_mask_ptr))
-			return -EFAULT;
-		ret = sizeof(compat_ulong_t);
-	} else {
-		unsigned long *k;
-
-		k = cpus_addr(kern_mask);
-		ret = compat_put_bitmap(user_mask_ptr, k,
-					sizeof(kern_mask) * BITS_PER_LONG);
-		if (ret)
-			return ret;
-
-		ret = sizeof(kern_mask);
-	}
+	k = cpus_addr(mask);
+	ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
+	if (ret)
+		return ret;
 
-	return ret;
+	return min_length;
 }
 
 static int get_compat_itimerspec(struct itimerspec *dst, 
diff -Nru a/kernel/fork.c b/kernel/fork.c
--- a/kernel/fork.c	2004-09-15 20:29:40 -07:00
+++ b/kernel/fork.c	2004-09-15 20:29:40 -07:00
@@ -483,6 +483,7 @@
 		mmdrop(mm);
 	}
 }
+EXPORT_SYMBOL_GPL(mmput);
 
 /**
  * get_task_mm - acquire a reference to the task's mm
@@ -514,6 +515,7 @@
 	task_unlock(task);
 	return mm;
 }
+EXPORT_SYMBOL_GPL(get_task_mm);
 
 /* Please note the differences between mmput and mm_release.
  * mmput is called whenever we stop holding onto a mm_struct,
@@ -1144,9 +1146,7 @@
 bad_fork_cleanup_namespace:
 	exit_namespace(p);
 bad_fork_cleanup_mm:
-	exit_mm(p);
-	if (p->active_mm)
-		mmdrop(p->active_mm);
+	mmput(p->mm);
 bad_fork_cleanup_signal:
 	exit_signal(p);
 bad_fork_cleanup_sighand:
diff -Nru a/kernel/pid.c b/kernel/pid.c
--- a/kernel/pid.c	2004-09-15 20:29:40 -07:00
+++ b/kernel/pid.c	2004-09-15 20:29:40 -07:00
@@ -278,7 +278,7 @@
 
 	for (i = 0; i < PIDTYPE_MAX; i++) {
 		pid_hash[i] = alloc_bootmem(pidhash_size *
-					sizeof(struct list_head));
+					sizeof(*(pid_hash[i])));
 		if (!pid_hash[i])
 			panic("Could not alloc pidhash!\n");
 		for (j = 0; j < pidhash_size; j++)
diff -Nru a/kernel/sched.c b/kernel/sched.c
--- a/kernel/sched.c	2004-09-15 20:29:40 -07:00
+++ b/kernel/sched.c	2004-09-15 20:29:40 -07:00
@@ -1727,8 +1727,8 @@
 		if (tmp->flags & SD_BALANCE_EXEC)
 			sd = tmp;
 
-	schedstat_inc(sd, sbe_attempts);
 	if (sd) {
+		schedstat_inc(sd, sbe_attempts);
 		new_cpu = find_idlest_cpu(current, this_cpu, sd);
 		if (new_cpu != this_cpu) {
 			schedstat_inc(sd, sbe_pushed);
@@ -3362,33 +3362,10 @@
 	return retval;
 }
 
-static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
-			     cpumask_t *new_mask)
-{
-	if (len < sizeof(cpumask_t)) {
-		memset(new_mask, 0, sizeof(cpumask_t));
-	} else if (len > sizeof(cpumask_t)) {
-		len = sizeof(cpumask_t);
-	}
-	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
-}
-
-/**
- * sys_sched_setaffinity - set the cpu affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to the new cpu mask
- */
-asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
-				      unsigned long __user *user_mask_ptr)
+long sched_setaffinity(pid_t pid, cpumask_t new_mask)
 {
-	cpumask_t new_mask;
-	int retval;
 	task_t *p;
-
-	retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
-	if (retval)
-		return retval;
+	int retval;
 
 	lock_cpu_hotplug();
 	read_lock(&tasklist_lock);
@@ -3421,6 +3398,36 @@
 	return retval;
 }
 
+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
+			     cpumask_t *new_mask)
+{
+	if (len < sizeof(cpumask_t)) {
+		memset(new_mask, 0, sizeof(cpumask_t));
+	} else if (len > sizeof(cpumask_t)) {
+		len = sizeof(cpumask_t);
+	}
+	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
+}
+
+/**
+ * sys_sched_setaffinity - set the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new cpu mask
+ */
+asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
+				      unsigned long __user *user_mask_ptr)
+{
+	cpumask_t new_mask;
+	int retval;
+
+	retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
+	if (retval)
+		return retval;
+
+	return sched_setaffinity(pid, new_mask);
+}
+
 /*
  * Represents all cpu's present in the system
  * In systems capable of hotplug, this map could dynamically grow
@@ -3436,24 +3443,11 @@
 cpumask_t cpu_possible_map = CPU_MASK_ALL;
 #endif
 
-/**
- * sys_sched_getaffinity - get the cpu affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to hold the current cpu mask
- */
-asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
-				      unsigned long __user *user_mask_ptr)
+long sched_getaffinity(pid_t pid, cpumask_t *mask)
 {
-	unsigned int real_len;
-	cpumask_t mask;
 	int retval;
 	task_t *p;
 
-	real_len = sizeof(mask);
-	if (len < real_len)
-		return -EINVAL;
-
 	lock_cpu_hotplug();
 	read_lock(&tasklist_lock);
 
@@ -3463,16 +3457,40 @@
 		goto out_unlock;
 
 	retval = 0;
-	cpus_and(mask, p->cpus_allowed, cpu_possible_map);
+	cpus_and(*mask, p->cpus_allowed, cpu_possible_map);
 
 out_unlock:
 	read_unlock(&tasklist_lock);
 	unlock_cpu_hotplug();
 	if (retval)
 		return retval;
-	if (copy_to_user(user_mask_ptr, &mask, real_len))
+
+	return 0;
+}
+
+/**
+ * sys_sched_getaffinity - get the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ */
+asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
+				      unsigned long __user *user_mask_ptr)
+{
+	int ret;
+	cpumask_t mask;
+
+	if (len < sizeof(cpumask_t))
+		return -EINVAL;
+
+	ret = sched_getaffinity(pid, &mask);
+	if (ret < 0)
+		return ret;
+
+	if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
 		return -EFAULT;
-	return real_len;
+
+	return sizeof(cpumask_t);
 }
 
 /**
diff -Nru a/kernel/sys.c b/kernel/sys.c
--- a/kernel/sys.c	2004-09-15 20:29:40 -07:00
+++ b/kernel/sys.c	2004-09-15 20:29:40 -07:00
@@ -274,7 +274,9 @@
 cond_syscall(sys_mbind)
 cond_syscall(sys_get_mempolicy)
 cond_syscall(sys_set_mempolicy)
+cond_syscall(compat_mbind)
 cond_syscall(compat_get_mempolicy)
+cond_syscall(compat_set_mempolicy)
 
 /* arch-specific weak syscall entries */
 cond_syscall(sys_pciconfig_read)
@@ -1727,6 +1729,17 @@
 			}
 			current->keep_capabilities = arg2;
 			break;
+		case PR_SET_NAME: {
+			struct task_struct *me = current;
+			unsigned char ncomm[sizeof(me->comm)];
+
+			ncomm[sizeof(me->comm)-1] = 0;
+			if (strncpy_from_user(ncomm, (char *)arg2,
+						sizeof(me->comm)-1) < 0)
+				return -EFAULT;
+			set_task_comm(me, ncomm);
+			return 0;
+		}
 		default:
 			error = -EINVAL;
 			break;
diff -Nru a/lib/Makefile b/lib/Makefile
--- a/lib/Makefile	2004-09-15 20:29:40 -07:00
+++ b/lib/Makefile	2004-09-15 20:29:40 -07:00
@@ -10,6 +10,7 @@
 
 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+lib-$(CONFIG_GENERIC_IOMAP) += iomap.o
 
 ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 
   lib-y += dec_and_lock.o
diff -Nru a/lib/iomap.c b/lib/iomap.c
--- /dev/null	Wed Dec 31 16:00:00 196900
+++ b/lib/iomap.c	2004-09-15 20:29:40 -07:00
@@ -0,0 +1,206 @@
+/*
+ * Implement the default iomap interfaces
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines don't assume any hardware mappings, and just
+ * encode the PIO/MMIO as part of the cookie. They coldly assume that
+ * the MMIO IO mappings are not in the low address range.
+ *
+ * Architectures for which this is not true can't use this generic
+ * implementation and should do their own copy.
+ *
+ * We encode the physical PIO addresses (0-0xffff) into the
+ * pointer by offsetting them with a constant (0x10000) and
+ * assuming that all the low addresses are always PIO. That means
+ * we can do some sanity checks on the low bits, and don't
+ * need to just take things for granted.
+ */
+#define PIO_OFFSET	0x10000UL
+#define PIO_MASK	0x0ffffUL
+#define PIO_RESERVED	0x40000UL
+
+/*
+ * Ugly macros are a way of life.
+ */
+#define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
+
+#define IO_COND(addr, is_pio, is_mmio) do {			\
+	unsigned long port = (unsigned long __force)addr;	\
+	if (port < PIO_RESERVED) {				\
+		VERIFY_PIO(port);				\
+		port &= PIO_MASK;				\
+		is_pio;						\
+	} else {						\
+		is_mmio;					\
+	}							\
+} while (0)
+
+unsigned int fastcall ioread8(void __iomem *addr)
+{
+	IO_COND(addr, return inb(port), return readb(addr));
+}
+unsigned int fastcall ioread16(void __iomem *addr)
+{
+	IO_COND(addr, return inw(port), return readw(addr));
+}
+unsigned int fastcall ioread32(void __iomem *addr)
+{
+	IO_COND(addr, return inl(port), return readl(addr));
+}
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread32);
+
+void fastcall iowrite8(u8 val, void __iomem *addr)
+{
+	IO_COND(addr, outb(val,port), writeb(val, addr));
+}
+void fastcall iowrite16(u16 val, void __iomem *addr)
+{
+	IO_COND(addr, outw(val,port), writew(val, addr));
+}
+void fastcall iowrite32(u32 val, void __iomem *addr)
+{
+	IO_COND(addr, outl(val,port), writel(val, addr));
+}
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite32);
+
+/*
+ * These are the "repeat MMIO read/write" functions.
+ * Note the "__raw" accesses, since we don't want to
+ * convert to CPU byte order. We write in "IO byte
+ * order" (we also don't have IO barriers).
+ */
+static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+{
+	while (--count >= 0) {
+		u8 data = __raw_readb(addr);
+		*dst = data;
+		dst++;
+	}
+}
+static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+{
+	while (--count >= 0) {
+		u16 data = __raw_readw(addr);
+		*dst = data;
+		dst++;
+	}
+}
+static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+{
+	while (--count >= 0) {
+		u32 data = __raw_readl(addr);
+		*dst = data;
+		dst++;
+	}
+}
+
+static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
+{
+	while (--count >= 0) {
+		__raw_writeb(*src, addr);
+		src++;
+	}
+}
+static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
+{
+	while (--count >= 0) {
+		__raw_writew(*src, addr);
+		src++;
+	}
+}
+static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
+{
+	while (--count >= 0) {
+		__raw_writel(*src, addr);
+		src++;
+	}
+}
+
+void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
+}
+void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
+}
+void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
+}
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+
+void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
+}
+void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
+}
+void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
+}
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+
+/* Create a virtual mapping cookie for an IO port range */
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	if (port > PIO_MASK)
+		return NULL;
+	return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+	unsigned long start = pci_resource_start(dev, bar);
+	unsigned long len = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+	if (!len || !start)
+		return NULL;
+	if (maxlen && len > maxlen)
+		len = maxlen;
+	if (flags & IORESOURCE_IO)
+		return ioport_map(start, len);
+	if (flags & IORESOURCE_MEM) {
+		if (flags & IORESOURCE_CACHEABLE)
+			return ioremap(start, len);
+		return ioremap_nocache(start, len);
+	}
+	/* What? */
+	return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	IO_COND(addr, /* nothing */, iounmap(addr));
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff -Nru a/mm/highmem.c b/mm/highmem.c
--- a/mm/highmem.c	2004-09-15 20:29:40 -07:00
+++ b/mm/highmem.c	2004-09-15 20:29:40 -07:00
@@ -300,6 +300,7 @@
 		 */
 		vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
+		flush_dcache_page(tovec->bv_page);
 		bounce_copy_vec(tovec, vfrom);
 	}
 }
@@ -406,6 +407,7 @@
 		if (rw == WRITE) {
 			char *vto, *vfrom;
 
+			flush_dcache_page(from->bv_page);
 			vto = page_address(to->bv_page) + to->bv_offset;
 			vfrom = kmap(from->bv_page) + from->bv_offset;
 			memcpy(vto, vfrom, to->bv_len);
diff -Nru a/mm/mempolicy.c b/mm/mempolicy.c
--- a/mm/mempolicy.c	2004-09-15 20:29:40 -07:00
+++ b/mm/mempolicy.c	2004-09-15 20:29:40 -07:00
@@ -132,6 +132,7 @@
 	unsigned long nlongs;
 	unsigned long endmask;
 
+	--maxnode;
 	bitmap_zero(nodes, MAX_NUMNODES);
 	if (maxnode == 0 || !nmask)
 		return 0;
@@ -145,6 +146,8 @@
 	/* When the user specified more nodes than supported just check
 	   if the non supported part is all zero. */
 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
+		if (nlongs > PAGE_SIZE/sizeof(long))
+			return -EINVAL;
 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
 			unsigned long t;
 			if (get_user(t,  nmask + k))
@@ -557,7 +560,7 @@
 asmlinkage long compat_set_mempolicy(int mode, compat_ulong_t __user *nmask,
 				     compat_ulong_t maxnode)
 {
-	long err;
+	long err = 0;
 	unsigned long __user *nm = NULL;
 	unsigned long nr_bits, alloc_size;
 	DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -581,7 +584,7 @@
 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
 			     compat_ulong_t maxnode, compat_ulong_t flags)
 {
-	long err;
+	long err = 0;
 	unsigned long __user *nm = NULL;
 	unsigned long nr_bits, alloc_size;
 	DECLARE_BITMAP(bm, MAX_NUMNODES);
diff -Nru a/mm/shmem.c b/mm/shmem.c
--- a/mm/shmem.c	2004-09-15 20:29:40 -07:00
+++ b/mm/shmem.c	2004-09-15 20:29:40 -07:00
@@ -6,8 +6,8 @@
  *		 2000-2001 Christoph Rohland
  *		 2000-2001 SAP AG
  *		 2002 Red Hat Inc.
- * Copyright (C) 2002-2003 Hugh Dickins.
- * Copyright (C) 2002-2003 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Hugh Dickins.
+ * Copyright (C) 2002-2004 VERITAS Software Corporation.
  * Copyright (C) 2004 Andi Kleen, SuSE Labs
  *
  * This file is released under the GPL.
@@ -179,16 +179,18 @@
 	.unplug_io_fn = default_unplug_io_fn,
 };
 
-static LIST_HEAD(shmem_inodes);
-static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED;
+static LIST_HEAD(shmem_swaplist);
+static spinlock_t shmem_swaplist_lock = SPIN_LOCK_UNLOCKED;
 
 static void shmem_free_block(struct inode *inode)
 {
 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
-	spin_lock(&sbinfo->stat_lock);
-	sbinfo->free_blocks++;
-	inode->i_blocks -= BLOCKS_PER_PAGE;
-	spin_unlock(&sbinfo->stat_lock);
+	if (sbinfo) {
+		spin_lock(&sbinfo->stat_lock);
+		sbinfo->free_blocks++;
+		inode->i_blocks -= BLOCKS_PER_PAGE;
+		spin_unlock(&sbinfo->stat_lock);
+	}
 }
 
 /*
@@ -213,11 +215,13 @@
 	if (freed > 0) {
 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 		info->alloced -= freed;
-		spin_lock(&sbinfo->stat_lock);
-		sbinfo->free_blocks += freed;
-		inode->i_blocks -= freed*BLOCKS_PER_PAGE;
-		spin_unlock(&sbinfo->stat_lock);
 		shmem_unacct_blocks(info->flags, freed);
+		if (sbinfo) {
+			spin_lock(&sbinfo->stat_lock);
+			sbinfo->free_blocks += freed;
+			inode->i_blocks -= freed*BLOCKS_PER_PAGE;
+			spin_unlock(&sbinfo->stat_lock);
+		}
 	}
 }
 
@@ -350,14 +354,16 @@
 		 * page (and perhaps indirect index pages) yet to allocate:
 		 * a waste to allocate index if we cannot allocate data.
 		 */
-		spin_lock(&sbinfo->stat_lock);
-		if (sbinfo->free_blocks <= 1) {
+		if (sbinfo) {
+			spin_lock(&sbinfo->stat_lock);
+			if (sbinfo->free_blocks <= 1) {
+				spin_unlock(&sbinfo->stat_lock);
+				return ERR_PTR(-ENOSPC);
+			}
+			sbinfo->free_blocks--;
+			inode->i_blocks += BLOCKS_PER_PAGE;
 			spin_unlock(&sbinfo->stat_lock);
-			return ERR_PTR(-ENOSPC);
 		}
-		sbinfo->free_blocks--;
-		inode->i_blocks += BLOCKS_PER_PAGE;
-		spin_unlock(&sbinfo->stat_lock);
 
 		spin_unlock(&info->lock);
 		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
@@ -598,17 +604,21 @@
 	struct shmem_inode_info *info = SHMEM_I(inode);
 
 	if (inode->i_op->truncate == shmem_truncate) {
-		spin_lock(&shmem_ilock);
-		list_del(&info->list);
-		spin_unlock(&shmem_ilock);
 		shmem_unacct_size(info->flags, inode->i_size);
 		inode->i_size = 0;
 		shmem_truncate(inode);
+		if (!list_empty(&info->swaplist)) {
+			spin_lock(&shmem_swaplist_lock);
+			list_del_init(&info->swaplist);
+			spin_unlock(&shmem_swaplist_lock);
+		}
+	}
+	if (sbinfo) {
+		BUG_ON(inode->i_blocks);
+		spin_lock(&sbinfo->stat_lock);
+		sbinfo->free_inodes++;
+		spin_unlock(&sbinfo->stat_lock);
 	}
-	BUG_ON(inode->i_blocks);
-	spin_lock(&sbinfo->stat_lock);
-	sbinfo->free_inodes++;
-	spin_unlock(&sbinfo->stat_lock);
 	clear_inode(inode);
 }
 
@@ -713,22 +723,23 @@
  */
 int shmem_unuse(swp_entry_t entry, struct page *page)
 {
-	struct list_head *p;
+	struct list_head *p, *next;
 	struct shmem_inode_info *info;
 	int found = 0;
 
-	spin_lock(&shmem_ilock);
-	list_for_each(p, &shmem_inodes) {
-		info = list_entry(p, struct shmem_inode_info, list);
-
-		if (info->swapped && shmem_unuse_inode(info, entry, page)) {
+	spin_lock(&shmem_swaplist_lock);
+	list_for_each_safe(p, next, &shmem_swaplist) {
+		info = list_entry(p, struct shmem_inode_info, swaplist);
+		if (!info->swapped)
+			list_del_init(&info->swaplist);
+		else if (shmem_unuse_inode(info, entry, page)) {
 			/* move head to start search for next from here */
-			list_move_tail(&shmem_inodes, &info->list);
+			list_move_tail(&shmem_swaplist, &info->swaplist);
 			found = 1;
 			break;
 		}
 	}
-	spin_unlock(&shmem_ilock);
+	spin_unlock(&shmem_swaplist_lock);
 	return found;
 }
 
@@ -770,6 +781,12 @@
 		shmem_swp_set(info, entry, swap.val);
 		shmem_swp_unmap(entry);
 		spin_unlock(&info->lock);
+		if (list_empty(&info->swaplist)) {
+			spin_lock(&shmem_swaplist_lock);
+			/* move instead of add in case we're racing */
+			list_move_tail(&info->swaplist, &shmem_swaplist);
+			spin_unlock(&shmem_swaplist_lock);
+		}
 		unlock_page(page);
 		return 0;
 	}
@@ -868,7 +885,7 @@
 	struct page *swappage;
 	swp_entry_t *entry;
 	swp_entry_t swap;
-	int error, majmin = VM_FAULT_MINOR;
+	int error;
 
 	if (idx >= SHMEM_MAX_INDEX)
 		return -EFBIG;
@@ -906,9 +923,10 @@
 			shmem_swp_unmap(entry);
 			spin_unlock(&info->lock);
 			/* here we actually do the io */
-			if (majmin == VM_FAULT_MINOR && type)
+			if (type && *type == VM_FAULT_MINOR) {
 				inc_page_state(pgmajfault);
-			majmin = VM_FAULT_MAJOR;
+				*type = VM_FAULT_MAJOR;
+			}
 			swappage = shmem_swapin(info, swap, idx);
 			if (!swappage) {
 				spin_lock(&info->lock);
@@ -1001,16 +1019,23 @@
 	} else {
 		shmem_swp_unmap(entry);
 		sbinfo = SHMEM_SB(inode->i_sb);
-		spin_lock(&sbinfo->stat_lock);
-		if (sbinfo->free_blocks == 0 || shmem_acct_block(info->flags)) {
+		if (sbinfo) {
+			spin_lock(&sbinfo->stat_lock);
+			if (sbinfo->free_blocks == 0 ||
+			    shmem_acct_block(info->flags)) {
+				spin_unlock(&sbinfo->stat_lock);
+				spin_unlock(&info->lock);
+				error = -ENOSPC;
+				goto failed;
+			}
+			sbinfo->free_blocks--;
+			inode->i_blocks += BLOCKS_PER_PAGE;
 			spin_unlock(&sbinfo->stat_lock);
+		} else if (shmem_acct_block(info->flags)) {
 			spin_unlock(&info->lock);
 			error = -ENOSPC;
 			goto failed;
 		}
-		sbinfo->free_blocks--;
-		inode->i_blocks += BLOCKS_PER_PAGE;
-		spin_unlock(&sbinfo->stat_lock);
 
 		if (!filepage) {
 			spin_unlock(&info->lock);
@@ -1053,15 +1078,10 @@
 		SetPageUptodate(filepage);
 	}
 done:
-	if (!*pagep) {
-		if (filepage) {
-			unlock_page(filepage);
-			*pagep = filepage;
-		} else
-			*pagep = ZERO_PAGE(0);
+	if (*pagep != filepage) {
+		unlock_page(filepage);
+		*pagep = filepage;
 	}
-	if (type)
-		*type = majmin;
 	return 0;
 
 failed:
@@ -1187,13 +1207,15 @@
 	struct shmem_inode_info *info;
 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 
-	spin_lock(&sbinfo->stat_lock);
-	if (!sbinfo->free_inodes) {
+	if (sbinfo) {
+		spin_lock(&sbinfo->stat_lock);
+		if (!sbinfo->free_inodes) {
+			spin_unlock(&sbinfo->stat_lock);
+			return NULL;
+		}
+		sbinfo->free_inodes--;
 		spin_unlock(&sbinfo->stat_lock);
-		return NULL;
 	}
-	sbinfo->free_inodes--;
-	spin_unlock(&sbinfo->stat_lock);
 
 	inode = new_inode(sb);
 	if (inode) {
@@ -1209,6 +1231,8 @@
 		memset(info, 0, (char *)inode - (char *)info);
 		spin_lock_init(&info->lock);
  		mpol_shared_policy_init(&info->policy);
+		INIT_LIST_HEAD(&info->swaplist);
+
 		switch (mode & S_IFMT) {
 		default:
 			init_special_inode(inode, mode, dev);
@@ -1216,9 +1240,6 @@
 		case S_IFREG:
 			inode->i_op = &shmem_inode_operations;
 			inode->i_fop = &shmem_file_operations;
-			spin_lock(&shmem_ilock);
-			list_add_tail(&info->list, &shmem_inodes);
-			spin_unlock(&shmem_ilock);
 			break;
 		case S_IFDIR:
 			inode->i_nlink++;
@@ -1234,32 +1255,32 @@
 	return inode;
 }
 
-static int shmem_set_size(struct shmem_sb_info *info,
+#ifdef CONFIG_TMPFS
+
+static int shmem_set_size(struct shmem_sb_info *sbinfo,
 			  unsigned long max_blocks, unsigned long max_inodes)
 {
 	int error;
 	unsigned long blocks, inodes;
 
-	spin_lock(&info->stat_lock);
-	blocks = info->max_blocks - info->free_blocks;
-	inodes = info->max_inodes - info->free_inodes;
+	spin_lock(&sbinfo->stat_lock);
+	blocks = sbinfo->max_blocks - sbinfo->free_blocks;
+	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
 	error = -EINVAL;
 	if (max_blocks < blocks)
 		goto out;
 	if (max_inodes < inodes)
 		goto out;
 	error = 0;
-	info->max_blocks  = max_blocks;
-	info->free_blocks = max_blocks - blocks;
-	info->max_inodes  = max_inodes;
-	info->free_inodes = max_inodes - inodes;
+	sbinfo->max_blocks  = max_blocks;
+	sbinfo->free_blocks = max_blocks - blocks;
+	sbinfo->max_inodes  = max_inodes;
+	sbinfo->free_inodes = max_inodes - inodes;
 out:
-	spin_unlock(&info->stat_lock);
+	spin_unlock(&sbinfo->stat_lock);
 	return error;
 }
 
-#ifdef CONFIG_TMPFS
-
 static struct inode_operations shmem_symlink_inode_operations;
 static struct inode_operations shmem_symlink_inline_operations;
 
@@ -1417,13 +1438,14 @@
 		if (index == end_index) {
 			nr = i_size & ~PAGE_CACHE_MASK;
 			if (nr <= offset) {
-				page_cache_release(page);
+				if (page)
+					page_cache_release(page);
 				break;
 			}
 		}
 		nr -= offset;
 
-		if (page != ZERO_PAGE(0)) {
+		if (page) {
 			/*
 			 * If users can be writing to this page using arbitrary
 			 * virtual addresses, take care about potential aliasing
@@ -1436,7 +1458,8 @@
 			 */
 			if (!offset)
 				mark_page_accessed(page);
-		}
+		} else
+			page = ZERO_PAGE(0);
 
 		/*
 		 * Ok, we have the page, and it's up-to-date, so
@@ -1511,13 +1534,16 @@
 
 	buf->f_type = TMPFS_MAGIC;
 	buf->f_bsize = PAGE_CACHE_SIZE;
-	spin_lock(&sbinfo->stat_lock);
-	buf->f_blocks = sbinfo->max_blocks;
-	buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
-	buf->f_files = sbinfo->max_inodes;
-	buf->f_ffree = sbinfo->free_inodes;
-	spin_unlock(&sbinfo->stat_lock);
 	buf->f_namelen = NAME_MAX;
+	if (sbinfo) {
+		spin_lock(&sbinfo->stat_lock);
+		buf->f_blocks = sbinfo->max_blocks;
+		buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
+		buf->f_files = sbinfo->max_inodes;
+		buf->f_ffree = sbinfo->free_inodes;
+		spin_unlock(&sbinfo->stat_lock);
+	}
+	/* else leave those fields 0 like simple_statfs */
 	return 0;
 }
 
@@ -1567,6 +1593,22 @@
 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 {
 	struct inode *inode = old_dentry->d_inode;
+	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+
+	/*
+	 * No ordinary (disk based) filesystem counts links as inodes;
+	 * but each new link needs a new dentry, pinning lowmem, and
+	 * tmpfs dentries cannot be pruned until they are unlinked.
+	 */
+	if (sbinfo) {
+		spin_lock(&sbinfo->stat_lock);
+		if (!sbinfo->free_inodes) {
+			spin_unlock(&sbinfo->stat_lock);
+			return -ENOSPC;
+		}
+		sbinfo->free_inodes--;
+		spin_unlock(&sbinfo->stat_lock);
+	}
 
 	dir->i_size += BOGO_DIRENT_SIZE;
 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -1581,6 +1623,15 @@
 {
 	struct inode *inode = dentry->d_inode;
 
+	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
+		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+		if (sbinfo) {
+			spin_lock(&sbinfo->stat_lock);
+			sbinfo->free_inodes++;
+			spin_unlock(&sbinfo->stat_lock);
+		}
+	}
+
 	dir->i_size -= BOGO_DIRENT_SIZE;
 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
 	inode->i_nlink--;
@@ -1658,9 +1709,6 @@
 			return error;
 		}
 		inode->i_op = &shmem_symlink_inode_operations;
-		spin_lock(&shmem_ilock);
-		list_add_tail(&info->list, &shmem_inodes);
-		spin_unlock(&shmem_ilock);
 		kaddr = kmap_atomic(page, KM_USER0);
 		memcpy(kaddr, symname, len);
 		kunmap_atomic(kaddr, KM_USER0);
@@ -1789,53 +1837,75 @@
 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
 {
 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
-	unsigned long max_blocks = sbinfo->max_blocks;
-	unsigned long max_inodes = sbinfo->max_inodes;
+	unsigned long max_blocks = 0;
+	unsigned long max_inodes = 0;
 
+	if (sbinfo) {
+		max_blocks = sbinfo->max_blocks;
+		max_inodes = sbinfo->max_inodes;
+	}
 	if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes))
 		return -EINVAL;
+	/* Keep it simple: disallow limited <-> unlimited remount */
+	if ((max_blocks || max_inodes) == !sbinfo)
+		return -EINVAL;
+	/* But allow the pointless unlimited -> unlimited remount */
+	if (!sbinfo)
+		return 0;
 	return shmem_set_size(sbinfo, max_blocks, max_inodes);
 }
 #endif
 
+static void shmem_put_super(struct super_block *sb)
+{
+	kfree(sb->s_fs_info);
+	sb->s_fs_info = NULL;
+}
+
 static int shmem_fill_super(struct super_block *sb,
 			    void *data, int silent)
 {
 	struct inode *inode;
 	struct dentry *root;
-	unsigned long blocks, inodes;
 	int mode   = S_IRWXUGO | S_ISVTX;
 	uid_t uid = current->fsuid;
 	gid_t gid = current->fsgid;
-	struct shmem_sb_info *sbinfo;
 	int err = -ENOMEM;
 
-	sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL);
-	if (!sbinfo)
-		return -ENOMEM;
-	sb->s_fs_info = sbinfo;
-	memset(sbinfo, 0, sizeof(struct shmem_sb_info));
+#ifdef CONFIG_TMPFS
+	unsigned long blocks = 0;
+	unsigned long inodes = 0;
 
 	/*
 	 * Per default we only allow half of the physical ram per
-	 * tmpfs instance
+	 * tmpfs instance, limiting inodes to one per page of lowmem;
+	 * but the internal instance is left unlimited.
 	 */
-	blocks = inodes = totalram_pages / 2;
-
-#ifdef CONFIG_TMPFS
-	if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, &inodes)) {
-		err = -EINVAL;
-		goto failed;
+	if (!(sb->s_flags & MS_NOUSER)) {
+		blocks = totalram_pages / 2;
+		inodes = totalram_pages - totalhigh_pages;
+		if (inodes > blocks)
+			inodes = blocks;
+
+		if (shmem_parse_options(data, &mode,
+					&uid, &gid, &blocks, &inodes))
+			return -EINVAL;
+	}
+
+	if (blocks || inodes) {
+		struct shmem_sb_info *sbinfo;
+		sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL);
+		if (!sbinfo)
+			return -ENOMEM;
+		sb->s_fs_info = sbinfo;
+		spin_lock_init(&sbinfo->stat_lock);
+		sbinfo->max_blocks = blocks;
+		sbinfo->free_blocks = blocks;
+		sbinfo->max_inodes = inodes;
+		sbinfo->free_inodes = inodes;
 	}
-#else
-	sb->s_flags |= MS_NOUSER;
 #endif
 
-	spin_lock_init(&sbinfo->stat_lock);
-	sbinfo->max_blocks = blocks;
-	sbinfo->free_blocks = blocks;
-	sbinfo->max_inodes = inodes;
-	sbinfo->free_inodes = inodes;
 	sb->s_maxbytes = SHMEM_MAX_BYTES;
 	sb->s_blocksize = PAGE_CACHE_SIZE;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1855,17 +1925,10 @@
 failed_iput:
 	iput(inode);
 failed:
-	kfree(sbinfo);
-	sb->s_fs_info = NULL;
+	shmem_put_super(sb);
 	return err;
 }
 
-static void shmem_put_super(struct super_block *sb)
-{
-	kfree(sb->s_fs_info);
-	sb->s_fs_info = NULL;
-}
-
 static kmem_cache_t *shmem_inode_cachep;
 
 static struct inode *shmem_alloc_inode(struct super_block *sb)
@@ -1897,8 +1960,7 @@
 {
 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
 				sizeof(struct shmem_inode_info),
-				0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
-				init_once, NULL);
+				0, 0, init_once, NULL);
 	if (shmem_inode_cachep == NULL)
 		return -ENOMEM;
 	return 0;
@@ -2000,15 +2062,13 @@
 #ifdef CONFIG_TMPFS
 	devfs_mk_dir("shm");
 #endif
-	shm_mnt = kern_mount(&tmpfs_fs_type);
+	shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
+				tmpfs_fs_type.name, NULL);
 	if (IS_ERR(shm_mnt)) {
 		error = PTR_ERR(shm_mnt);
 		printk(KERN_ERR "Could not kern_mount tmpfs\n");
 		goto out1;
 	}
-
-	/* The internal instance should not do size checking */
-	shmem_set_size(SHMEM_SB(shm_mnt->mnt_sb), ULONG_MAX, ULONG_MAX);
 	return 0;
 
 out1:
@@ -2039,7 +2099,7 @@
 	if (IS_ERR(shm_mnt))
 		return (void *)shm_mnt;
 
-	if (size > SHMEM_MAX_BYTES)
+	if (size < 0 || size > SHMEM_MAX_BYTES)
 		return ERR_PTR(-EINVAL);
 
 	if (shmem_acct_size(flags, size))
@@ -2073,7 +2133,7 @@
 	file->f_mapping = inode->i_mapping;
 	file->f_op = &shmem_file_operations;
 	file->f_mode = FMODE_WRITE | FMODE_READ;
-	return(file);
+	return file;
 
 close_file:
 	put_filp(file);
diff -Nru a/mm/slab.c b/mm/slab.c
--- a/mm/slab.c	2004-09-15 20:29:40 -07:00
+++ b/mm/slab.c	2004-09-15 20:29:40 -07:00
@@ -562,6 +562,22 @@
 	return cachep->array[smp_processor_id()];
 }
 
+static kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
+{
+	struct cache_sizes *csizep = malloc_sizes;
+
+	/* This function could be moved to the header file, and
+	 * made inline so consumers can quickly determine what
+	 * cache pointer they require.
+	 */
+	for ( ; csizep->cs_size; csizep++) {
+		if (size > csizep->cs_size)
+			continue;
+		break;
+	}
+	return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;
+}
+
 /* Cal the num objs, wastage, and bytes left over for a given slab size. */
 static void cache_estimate (unsigned long gfporder, size_t size, size_t align,
 		 int flags, size_t *left_over, unsigned int *num)
@@ -2554,24 +2570,6 @@
 }
 
 EXPORT_SYMBOL(kmem_cache_size);
-
-kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
-{
-	struct cache_sizes *csizep = malloc_sizes;
-
-	/* This function could be moved to the header file, and
-	 * made inline so consumers can quickly determine what
-	 * cache pointer they require.
-	 */
-	for ( ; csizep->cs_size; csizep++) {
-		if (size > csizep->cs_size)
-			continue;
-		break;
-	}
-	return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;
-}
-
-EXPORT_SYMBOL(kmem_find_general_cachep);
 
 struct ccupdate_struct {
 	kmem_cache_t *cachep;
diff -Nru a/net/atm/clip.c b/net/atm/clip.c
--- a/net/atm/clip.c	2004-09-15 20:29:40 -07:00
+++ b/net/atm/clip.c	2004-09-15 20:29:40 -07:00
@@ -110,7 +110,8 @@
 				goto out;
 			entry->expires = jiffies-1;
 				/* force resolution or expiration */
-			error = neigh_update(entry->neigh,NULL,NUD_NONE,0,0);
+			error = neigh_update(entry->neigh, NULL, NUD_NONE,
+					     NEIGH_UPDATE_F_ADMIN);
 			if (error)
 				printk(KERN_CRIT "unlink_clip_vcc: "
 				    "neigh_update failed with %d\n",error);
@@ -570,7 +571,8 @@
 		}
 		link_vcc(clip_vcc,entry);
 	}
-	error = neigh_update(neigh,llc_oui,NUD_PERMANENT,1,0);
+	error = neigh_update(neigh, llc_oui, NUD_PERMANENT, 
+			     NEIGH_UPDATE_F_OVERRIDE|NEIGH_UPDATE_F_ADMIN);
 	neigh_release(neigh);
 	return error;
 }
diff -Nru a/net/core/neighbour.c b/net/core/neighbour.c
--- a/net/core/neighbour.c	2004-09-15 20:29:40 -07:00
+++ b/net/core/neighbour.c	2004-09-15 20:29:40 -07:00
@@ -133,7 +133,7 @@
 			if (atomic_read(&n->refcnt) == 1 &&
 			    !(n->nud_state & NUD_PERMANENT) &&
 			    (n->nud_state != NUD_INCOMPLETE ||
-			     jiffies - n->used > n->parms->retrans_time)) {
+			     time_after(jiffies, n->used + n->parms->retrans_time))) {
 				*np	= n->next;
 				n->dead = 1;
 				shrunk	= 1;
@@ -255,7 +255,7 @@
 
 	if (tbl->entries > tbl->gc_thresh3 ||
 	    (tbl->entries > tbl->gc_thresh2 &&
-	     now - tbl->last_flush > 5 * HZ)) {
+	     time_after(now, tbl->last_flush + 5 * HZ))) {
 		if (!neigh_forced_gc(tbl) &&
 		    tbl->entries > tbl->gc_thresh3)
 			goto out;
@@ -542,40 +542,6 @@
 		hh->hh_output = neigh->ops->hh_output;
 }
 
-/*
-   Transitions NUD_STALE <-> NUD_REACHABLE do not occur
-   when fast path is built: we have no timers associated with
-   these states, we do not have time to check state when sending.
-   neigh_periodic_timer check periodically neigh->confirmed
-   time and moves NUD_REACHABLE -> NUD_STALE.
-
-   If a routine wants to know TRUE entry state, it calls
-   neigh_sync before checking state.
-
-   Called with write_locked neigh.
- */
-
-static void neigh_sync(struct neighbour *n)
-{
-	unsigned long now = jiffies;
-	u8 state = n->nud_state;
-
-	if (state & (NUD_NOARP | NUD_PERMANENT))
-		return;
-	if (state & NUD_REACHABLE) {
-		if (now - n->confirmed > n->parms->reachable_time) {
-			n->nud_state = NUD_STALE;
-			neigh_suspect(n);
-		}
-	} else if (state & NUD_VALID) {
-		if (now - n->confirmed < n->parms->reachable_time) {
-			neigh_del_timer(n);
-			n->nud_state = NUD_REACHABLE;
-			neigh_connect(n);
-		}
-	}
-}
-
 static void neigh_periodic_timer(unsigned long arg)
 {
 	struct neigh_table *tbl = (struct neigh_table *)arg;
@@ -589,7 +555,7 @@
 	 *	periodically recompute ReachableTime from random function
 	 */
 
-	if (now - tbl->last_rand > 300 * HZ) {
+	if (time_after(now, tbl->last_rand + 300 * HZ)) {
 		struct neigh_parms *p;
 		tbl->last_rand = now;
 		for (p = &tbl->parms; p; p = p->next)
@@ -612,24 +578,18 @@
 				goto next_elt;
 			}
 
-			if ((long)(n->used - n->confirmed) < 0)
+			if (time_before(n->used, n->confirmed))
 				n->used = n->confirmed;
 
 			if (atomic_read(&n->refcnt) == 1 &&
 			    (state == NUD_FAILED ||
-			     now - n->used > n->parms->gc_staletime)) {
+			     time_after(now, n->used + n->parms->gc_staletime))) {
 				*np = n->next;
 				n->dead = 1;
 				write_unlock(&n->lock);
 				neigh_release(n);
 				continue;
 			}
-
-			if (n->nud_state & NUD_REACHABLE &&
-			    now - n->confirmed > n->parms->reachable_time) {
-				n->nud_state = NUD_STALE;
-				neigh_suspect(n);
-			}
 			write_unlock(&n->lock);
 
 next_elt:
@@ -654,7 +614,7 @@
 
 static void neigh_timer_handler(unsigned long arg)
 {
-	unsigned long now = jiffies;
+	unsigned long now, next;
 	struct neighbour *neigh = (struct neighbour *)arg;
 	unsigned state;
 	int notify = 0;
@@ -662,6 +622,8 @@
 	write_lock(&neigh->lock);
 
 	state = neigh->nud_state;
+	now = jiffies;
+	next = now + HZ;
 
 	if (!(state & NUD_IN_TIMER)) {
 #ifndef CONFIG_SMP
@@ -670,20 +632,42 @@
 		goto out;
 	}
 
-	if ((state & NUD_VALID) &&
-	    now - neigh->confirmed < neigh->parms->reachable_time) {
-		neigh->nud_state = NUD_REACHABLE;
-		NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
-		neigh_connect(neigh);
-		goto out;
-	}
-	if (state == NUD_DELAY) {
-		NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
-		neigh->nud_state = NUD_PROBE;
-		atomic_set(&neigh->probes, 0);
+	if (state & NUD_REACHABLE) {
+		if (time_before_eq(now, 
+				   neigh->confirmed + neigh->parms->reachable_time)) {
+			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
+			next = neigh->confirmed + neigh->parms->reachable_time;
+		} else if (time_before_eq(now,
+					  neigh->used + neigh->parms->delay_probe_time)) {
+			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+			neigh->nud_state = NUD_DELAY;
+			neigh_suspect(neigh);
+			next = now + neigh->parms->delay_probe_time;
+		} else {
+			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
+			neigh->nud_state = NUD_STALE;
+			neigh_suspect(neigh);
+		}
+	} else if (state & NUD_DELAY) {
+		if (time_before_eq(now, 
+				   neigh->confirmed + neigh->parms->delay_probe_time)) {
+			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
+			neigh->nud_state = NUD_REACHABLE;
+			neigh_connect(neigh);
+			next = neigh->confirmed + neigh->parms->reachable_time;
+		} else {
+			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
+			neigh->nud_state = NUD_PROBE;
+			atomic_set(&neigh->probes, 0);
+			next = now + neigh->parms->retrans_time;
+		}
+	} else {
+		/* NUD_PROBE|NUD_INCOMPLETE */
+		next = now + neigh->parms->retrans_time;
 	}
 
-	if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
+	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
+	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
 		struct sk_buff *skb;
 
 		neigh->nud_state = NUD_FAILED;
@@ -703,19 +687,24 @@
 			write_lock(&neigh->lock);
 		}
 		skb_queue_purge(&neigh->arp_queue);
-		goto out;
 	}
 
-	neigh->timer.expires = now + neigh->parms->retrans_time;
-	add_timer(&neigh->timer);
-	write_unlock(&neigh->lock);
-
-	neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
-	atomic_inc(&neigh->probes);
-	return;
-
+	if (neigh->nud_state & NUD_IN_TIMER) {
+		neigh_hold(neigh);
+		if (time_before(next, jiffies + HZ/2))
+			next = jiffies + HZ/2;
+		neigh->timer.expires = next;
+		add_timer(&neigh->timer);
+	}
+	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
+		write_unlock(&neigh->lock);
+		neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
+		atomic_inc(&neigh->probes);
+	} else {
 out:
-	write_unlock(&neigh->lock);
+		write_unlock(&neigh->lock);
+	}
+
 #ifdef CONFIG_ARPD
 	if (notify && neigh->parms->app_probes)
 		neigh_app_notify(neigh);
@@ -726,6 +715,7 @@
 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 {
 	int rc;
+	unsigned long now;
 
 	write_lock_bh(&neigh->lock);
 
@@ -733,18 +723,15 @@
 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
 		goto out_unlock_bh;
 
+	now = jiffies;
+	
 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
 			neigh->nud_state     = NUD_INCOMPLETE;
 			neigh_hold(neigh);
-			neigh->timer.expires = jiffies +
-					       neigh->parms->retrans_time;
+			neigh->timer.expires = now + 1;
 			add_timer(&neigh->timer);
-			write_unlock_bh(&neigh->lock);
-			neigh->ops->solicit(neigh, skb);
-			atomic_inc(&neigh->probes);
-			write_lock_bh(&neigh->lock);
 		} else {
 			neigh->nud_state = NUD_FAILED;
 			write_unlock_bh(&neigh->lock);
@@ -753,6 +740,12 @@
 				kfree_skb(skb);
 			return 1;
 		}
+	} else if (neigh->nud_state & NUD_STALE) {
+		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+		neigh_hold(neigh);
+		neigh->nud_state = NUD_DELAY;
+		neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
+		add_timer(&neigh->timer);
 	}
 
 	if (neigh->nud_state == NUD_INCOMPLETE) {
@@ -767,13 +760,6 @@
 			__skb_queue_tail(&neigh->arp_queue, skb);
 		}
 		rc = 1;
-	} else if (neigh->nud_state == NUD_STALE) {
-		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
-		neigh_hold(neigh);
-		neigh->nud_state = NUD_DELAY;
-		neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
-		add_timer(&neigh->timer);
-		rc = 0;
 	}
 out_unlock_bh:
 	write_unlock_bh(&neigh->lock);
@@ -800,14 +786,26 @@
 /* Generic update routine.
    -- lladdr is new lladdr or NULL, if it is not supplied.
    -- new    is new state.
-   -- override == 1 allows to override existing lladdr, if it is different.
-   -- arp == 0 means that the change is administrative.
+   -- flags
+	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
+				if it is different.
+	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
+				lladdr instead of overriding it 
+				if it is different.
+				It also allows to retain current state
+				if lladdr is unchanged.
+	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
+
+	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 
+				NTF_ROUTER flag.
+	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
+				a router.
 
    Caller MUST hold reference count on the entry.
  */
 
 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
-		 int override, int arp)
+		 u32 flags)
 {
 	u8 old;
 	int err;
@@ -815,6 +813,7 @@
 	int notify = 0;
 #endif
 	struct net_device *dev;
+	int update_isrouter = 0;
 
 	write_lock_bh(&neigh->lock);
 
@@ -822,7 +821,8 @@
 	old    = neigh->nud_state;
 	err    = -EPERM;
 
-	if (arp && (old & (NUD_NOARP | NUD_PERMANENT)))
+	if (!(flags & NEIGH_UPDATE_F_ADMIN) && 
+	    (old & (NUD_NOARP | NUD_PERMANENT)))
 		goto out;
 
 	if (!(new & NUD_VALID)) {
@@ -847,12 +847,9 @@
 		   - compare new & old
 		   - if they are different, check override flag
 		 */
-		if (old & NUD_VALID) {
-			if (!memcmp(lladdr, neigh->ha, dev->addr_len))
-				lladdr = neigh->ha;
-			else if (!override)
-				goto out;
-		}
+		if ((old & NUD_VALID) && 
+		    !memcmp(lladdr, neigh->ha, dev->addr_len))
+			lladdr = neigh->ha;
 	} else {
 		/* No address is supplied; if we know something,
 		   use it, otherwise discard the request.
@@ -863,8 +860,6 @@
 		lladdr = neigh->ha;
 	}
 
-	neigh_sync(neigh);
-	old = neigh->nud_state;
 	if (new & NUD_CONNECTED)
 		neigh->confirmed = jiffies;
 	neigh->updated = jiffies;
@@ -873,12 +868,37 @@
 	   do not change entry state, if new one is STALE.
 	 */
 	err = 0;
-	if ((old & NUD_VALID) && lladdr == neigh->ha &&
-	    (new == old || (new == NUD_STALE && (old & NUD_CONNECTED))))
-		goto out;
+	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
+	if (old & NUD_VALID) {
+		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
+			update_isrouter = 0;
+			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
+			    (old & NUD_CONNECTED)) {
+				lladdr = neigh->ha;
+				new = NUD_STALE;
+			} else
+				goto out;
+		} else {
+			if (lladdr == neigh->ha && new == NUD_STALE &&
+			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
+			     (old & NUD_CONNECTED))
+			    )
+				new = old;
+		}
+	}
+
+	if (new != old) {
+		neigh_del_timer(neigh);
+		if (new & NUD_IN_TIMER) {
+			neigh_hold(neigh);
+			neigh->timer.expires = jiffies + 
+						((new & NUD_REACHABLE) ? 
+						 neigh->parms->reachable_time : 0);
+			add_timer(&neigh->timer);
+		}
+		neigh->nud_state = new;
+	}
 
-	neigh_del_timer(neigh);
-	neigh->nud_state = new;
 	if (lladdr != neigh->ha) {
 		memcpy(&neigh->ha, lladdr, dev->addr_len);
 		neigh_update_hhs(neigh);
@@ -913,6 +933,11 @@
 		skb_queue_purge(&neigh->arp_queue);
 	}
 out:
+	if (update_isrouter) {
+		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
+			(neigh->flags | NTF_ROUTER) :
+			(neigh->flags & ~NTF_ROUTER);
+	}
 	write_unlock_bh(&neigh->lock);
 #ifdef CONFIG_ARPD
 	if (notify && neigh->parms->app_probes)
@@ -928,7 +953,8 @@
 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
 						 lladdr || !dev->addr_len);
 	if (neigh)
-		neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
+		neigh_update(neigh, lladdr, NUD_STALE, 
+			     NEIGH_UPDATE_F_OVERRIDE);
 	return neigh;
 }
 
@@ -1094,26 +1120,25 @@
 		    struct sk_buff *skb)
 {
 	unsigned long now = jiffies;
-	long sched_next = net_random() % p->proxy_delay;
+	unsigned long sched_next = now + (net_random() % p->proxy_delay);
 
 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
 		kfree_skb(skb);
 		return;
 	}
 	skb->stamp.tv_sec  = LOCALLY_ENQUEUED;
-	skb->stamp.tv_usec = now + sched_next;
+	skb->stamp.tv_usec = sched_next;
 
 	spin_lock(&tbl->proxy_queue.lock);
 	if (del_timer(&tbl->proxy_timer)) {
-		long tval = tbl->proxy_timer.expires - now;
-		if (tval < sched_next)
-			sched_next = tval;
+		if (time_before(tbl->proxy_timer.expires, sched_next))
+			sched_next = tbl->proxy_timer.expires;
 	}
 	dst_release(skb->dst);
 	skb->dst = NULL;
 	dev_hold(skb->dev);
 	__skb_queue_tail(&tbl->proxy_queue, skb);
-	mod_timer(&tbl->proxy_timer, now + sched_next);
+	mod_timer(&tbl->proxy_timer, sched_next);
 	spin_unlock(&tbl->proxy_queue.lock);
 }
 
@@ -1274,7 +1299,9 @@
 
 		n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST - 1]), dev);
 		if (n) {
-			err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
+			err = neigh_update(n, NULL, NUD_FAILED, 
+					   NEIGH_UPDATE_F_OVERRIDE|
+					   NEIGH_UPDATE_F_ADMIN);
 			neigh_release(n);
 		}
 		goto out_dev_put;
@@ -1347,7 +1374,8 @@
 						RTA_DATA(nda[NDA_LLADDR - 1]) :
 						NULL,
 					   ndm->ndm_state,
-					   override, 0);
+					   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
+					   NEIGH_UPDATE_F_ADMIN);
 		}
 		if (n)
 			neigh_release(n);
diff -Nru a/net/core/netpoll.c b/net/core/netpoll.c
--- a/net/core/netpoll.c	2004-09-15 20:29:40 -07:00
+++ b/net/core/netpoll.c	2004-09-15 20:29:40 -07:00
@@ -242,9 +242,9 @@
 	iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
 
 	/* iph->version = 4; iph->ihl = 5; */
-	put_unaligned(0x54, (unsigned char *)iph);
+	put_unaligned(0x45, (unsigned char *)iph);
 	iph->tos      = 0;
-	put_unaligned(htonl(ip_len), &(iph->tot_len));
+	put_unaligned(htons(ip_len), &(iph->tot_len));
 	iph->id       = 0;
 	iph->frag_off = 0;
 	iph->ttl      = 64;
diff -Nru a/net/ipv4/arp.c b/net/ipv4/arp.c
--- a/net/ipv4/arp.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv4/arp.c	2004-09-15 20:29:40 -07:00
@@ -914,7 +914,7 @@
 		if (arp->ar_op != htons(ARPOP_REPLY) ||
 		    skb->pkt_type != PACKET_HOST)
 			state = NUD_STALE;
-		neigh_update(n, sha, state, override, 1);
+		neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0);
 		neigh_release(n);
 	}
 
@@ -1021,7 +1021,9 @@
 		if (r->arp_flags & ATF_PERM)
 			state = NUD_PERMANENT;
 		err = neigh_update(neigh, (r->arp_flags&ATF_COM) ?
-				   r->arp_ha.sa_data : NULL, state, 1, 0);
+				   r->arp_ha.sa_data : NULL, state, 
+				   NEIGH_UPDATE_F_OVERRIDE|
+				   NEIGH_UPDATE_F_ADMIN);
 		neigh_release(neigh);
 	}
 	return err;
@@ -1101,7 +1103,9 @@
 	neigh = neigh_lookup(&arp_tbl, &ip, dev);
 	if (neigh) {
 		if (neigh->nud_state&~NUD_NOARP)
-			err = neigh_update(neigh, NULL, NUD_FAILED, 1, 0);
+			err = neigh_update(neigh, NULL, NUD_FAILED, 
+					   NEIGH_UPDATE_F_OVERRIDE|
+					   NEIGH_UPDATE_F_ADMIN);
 		neigh_release(neigh);
 	}
 	return err;
diff -Nru a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
--- a/net/ipv4/ip_gre.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv4/ip_gre.c	2004-09-15 20:29:40 -07:00
@@ -603,13 +603,24 @@
 	if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
 		secpath_reset(skb);
 
+		skb->protocol = *(u16*)(h + 2);
+		/* WCCP version 1 and 2 protocol decoding.
+		 * - Change protocol to IP
+		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+		 */
+		if (flags == 0 &&
+		    skb->protocol == __constant_htons(ETH_P_WCCP)) {
+			skb->protocol = __constant_htons(ETH_P_IP);
+			if ((*(h + offset) & 0xF0) != 0x40) 
+				offset += 4;
+		}
+
 		skb->mac.raw = skb->nh.raw;
 		skb->nh.raw = __pskb_pull(skb, offset);
 		memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
 		if (skb->ip_summed == CHECKSUM_HW)
 			skb->csum = csum_sub(skb->csum,
 					     csum_partial(skb->mac.raw, skb->nh.raw-skb->mac.raw, 0));
-		skb->protocol = *(u16*)(h + 2);
 		skb->pkt_type = PACKET_HOST;
 #ifdef CONFIG_NET_IPGRE_BROADCAST
 		if (MULTICAST(iph->daddr)) {
diff -Nru a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
--- a/net/ipv4/tcp_ipv4.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv4/tcp_ipv4.c	2004-09-15 20:29:40 -07:00
@@ -1033,11 +1033,7 @@
 
 	switch (type) {
 	case ICMP_SOURCE_QUENCH:
-		/* This is deprecated, but if someone generated it,
-		 * we have no reasons to ignore it.
-		 */
-		if (!sock_owned_by_user(sk))
-			tcp_enter_cwr(tp);
+		/* Just silently ignore these. */
 		goto out;
 	case ICMP_PARAMETERPROB:
 		err = EPROTO;
diff -Nru a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
--- a/net/ipv4/tcp_output.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv4/tcp_output.c	2004-09-15 20:29:40 -07:00
@@ -991,7 +991,7 @@
 		/* New SKB created, account for it. */
 		new_factor = TCP_SKB_CB(skb)->tso_factor;
 		tcp_dec_pcount_explicit(&tp->packets_out,
-					new_factor - old_factor);
+					old_factor - new_factor);
 		tcp_inc_pcount(&tp->packets_out, skb->next);
 	}
 
diff -Nru a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
--- a/net/ipv6/ip6_fib.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv6/ip6_fib.c	2004-09-15 20:29:40 -07:00
@@ -49,6 +49,9 @@
 
 struct rt6_statistics	rt6_stats;
 
+extern struct rt6_info *rt6_dflt_pointer;
+extern spinlock_t rt6_dflt_lock;
+
 static kmem_cache_t * fib6_node_kmem;
 
 enum fib_walk_state_t
@@ -1184,6 +1187,10 @@
 	if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
 		if (time_after(now, rt->rt6i_expires)) {
 			RT6_TRACE("expiring %p\n", rt);
+			spin_lock_bh(&rt6_dflt_lock);
+			if (rt == rt6_dflt_pointer)
+				rt6_dflt_pointer = NULL;
+			spin_unlock_bh(&rt6_dflt_lock);
 			return -1;
 		}
 		gc_args.more++;
@@ -1191,6 +1198,11 @@
 		if (atomic_read(&rt->u.dst.__refcnt) == 0 &&
 		    time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) {
 			RT6_TRACE("aging clone %p\n", rt);
+			return -1;
+		} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
+			   (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) {
+			RT6_TRACE("purging route %p via non-router but gateway\n",
+				  rt);
 			return -1;
 		}
 		gc_args.more++;
diff -Nru a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
--- a/net/ipv6/ip6_tunnel.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv6/ip6_tunnel.c	2004-09-15 20:29:40 -07:00
@@ -542,6 +542,8 @@
 		skb->dev = t->dev;
 		dst_release(skb->dst);
 		skb->dst = NULL;
+		if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
+			ipv6_copy_dscp(ipv6h, skb->nh.ipv6h);
 		ip6ip6_ecn_decapsulate(ipv6h, skb);
 		t->stat.rx_packets++;
 		t->stat.rx_bytes += skb->len;
diff -Nru a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
--- a/net/ipv6/ndisc.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv6/ndisc.c	2004-09-15 20:29:40 -07:00
@@ -810,8 +810,11 @@
 	 *	update / create cache entry
 	 *	for the source address
 	 */
-	neigh = neigh_event_ns(&nd_tbl, lladdr, saddr, dev);
-
+	neigh = __neigh_lookup(&nd_tbl, saddr, dev, lladdr || !dev->addr_len);
+	if (neigh)
+		neigh_update(neigh, lladdr, NUD_STALE, 
+			     NEIGH_UPDATE_F_WEAK_OVERRIDE|
+			     NEIGH_UPDATE_F_OVERRIDE);
 	if (neigh || !dev->hard_header) {
 		ndisc_send_na(dev, neigh, saddr, &msg->target,
 			      idev->cnf.forwarding, 
@@ -894,24 +897,25 @@
 	neigh = neigh_lookup(&nd_tbl, &msg->target, dev);
 
 	if (neigh) {
-		if (neigh->flags & NTF_ROUTER) {
-			if (msg->icmph.icmp6_router == 0) {
-				/*
-				 *	Change: router to host
-				 */
-				struct rt6_info *rt;
-				rt = rt6_get_dflt_router(saddr, dev);
-				if (rt)
-					ip6_del_rt(rt, NULL, NULL);
-			}
-		} else {
-			if (msg->icmph.icmp6_router)
-				neigh->flags |= NTF_ROUTER;
-		}
+		u8 old_flags = neigh->flags;
 
 		neigh_update(neigh, lladdr,
 			     msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE,
-			     msg->icmph.icmp6_override, 1);
+			     NEIGH_UPDATE_F_WEAK_OVERRIDE|
+			     (msg->icmph.icmp6_override ? NEIGH_UPDATE_F_OVERRIDE : 0)|
+			     NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
+			     (msg->icmph.icmp6_router ? NEIGH_UPDATE_F_ISROUTER : 0));
+
+		if ((old_flags & ~neigh->flags) & NTF_ROUTER) {
+			/*
+			 * Change: router to host
+			 */
+			struct rt6_info *rt;
+			rt = rt6_get_dflt_router(saddr, dev);
+			if (rt)
+				ip6_del_rt(rt, NULL, NULL);
+		}
+
 		neigh_release(neigh);
 	}
 }
@@ -1079,7 +1083,11 @@
 				goto out;
 			}
 		}
-		neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
+		neigh_update(neigh, lladdr, NUD_STALE,
+			     NEIGH_UPDATE_F_WEAK_OVERRIDE|
+			     NEIGH_UPDATE_F_OVERRIDE|
+			     NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
+			     NEIGH_UPDATE_F_ISROUTER);
 	}
 
 	if (ndopts.nd_opts_pi) {
@@ -1196,19 +1204,11 @@
 			return;
 		}
 	}
-	/* passed validation tests */
-
-	/*
-	   We install redirect only if nexthop state is valid.
-	 */
 
 	neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
 	if (neigh) {
-		neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
-		if (neigh->nud_state&NUD_VALID)
-			rt6_redirect(dest, &skb->nh.ipv6h->saddr, neigh, on_link);
-		else
-			__neigh_event_send(neigh, NULL);
+		rt6_redirect(dest, &skb->nh.ipv6h->saddr, neigh, lladdr, 
+			     on_link);
 		neigh_release(neigh);
 	}
 	in6_dev_put(in6_dev);
diff -Nru a/net/ipv6/route.c b/net/ipv6/route.c
--- a/net/ipv6/route.c	2004-09-15 20:29:40 -07:00
+++ b/net/ipv6/route.c	2004-09-15 20:29:40 -07:00
@@ -208,8 +208,8 @@
 /*
  *	pointer to the last default router chosen. BH is disabled locally.
  */
-static struct rt6_info *rt6_dflt_pointer;
-static spinlock_t rt6_dflt_lock = SPIN_LOCK_UNLOCKED;
+struct rt6_info *rt6_dflt_pointer;
+spinlock_t rt6_dflt_lock = SPIN_LOCK_UNLOCKED;
 
 /* Default Router Selection (RFC 2461 6.3.6) */
 static struct rt6_info *rt6_best_dflt(struct rt6_info *rt, int oif)
@@ -227,6 +227,10 @@
 		     sprt->rt6i_dev->ifindex == oif))
 			m += 8;
 
+		if ((sprt->rt6i_flags & RTF_EXPIRES) &&
+		    time_after(jiffies, sprt->rt6i_expires))
+			continue;
+
 		if (sprt == rt6_dflt_pointer)
 			m += 4;
 
@@ -1007,7 +1011,7 @@
  *	Handle redirects
  */
 void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr,
-		  struct neighbour *neigh, int on_link)
+		  struct neighbour *neigh, u8 *lladdr, int on_link)
 {
 	struct rt6_info *rt, *nrt;
 
@@ -1020,22 +1024,13 @@
 	if (neigh->dev != rt->rt6i_dev)
 		goto out;
 
-	/* Redirect received -> path was valid.
-	   Look, redirects are sent only in response to data packets,
-	   so that this nexthop apparently is reachable. --ANK
-	 */
-	dst_confirm(&rt->u.dst);
-
-	/* Duplicate redirect: silently ignore. */
-	if (neigh == rt->u.dst.neighbour)
-		goto out;
-
-	/* Current route is on-link; redirect is always invalid.
-	   
-	   Seems, previous statement is not true. It could
-	   be node, which looks for us as on-link (f.e. proxy ndisc)
-	   But then router serving it might decide, that we should
-	   know truth 8)8) --ANK (980726).
+	/*
+	 * Current route is on-link; redirect is always invalid.
+	 * 
+	 * Seems, previous statement is not true. It could
+	 * be node, which looks for us as on-link (f.e. proxy ndisc)
+	 * But then router serving it might decide, that we should
+	 * know truth 8)8) --ANK (980726).
 	 */
 	if (!(rt->rt6i_flags&RTF_GATEWAY))
 		goto out;
@@ -1047,7 +1042,6 @@
 	 *	is a bit fuzzy and one might need to check all default
 	 *	routers.
 	 */
-
 	if (ipv6_addr_cmp(saddr, &rt->rt6i_gateway)) {
 		if (rt->rt6i_flags & RTF_DEFAULT) {
 			struct rt6_info *rt1;
@@ -1076,6 +1070,24 @@
 	 *	We have finally decided to accept it.
 	 */
 
+	neigh_update(neigh, lladdr, NUD_STALE, 
+		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
+		     NEIGH_UPDATE_F_OVERRIDE|
+		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
+				     NEIGH_UPDATE_F_ISROUTER))
+		     );
+
+	/*
+	 * Redirect received -> path was valid.
+	 * Look, redirects are sent only in response to data packets,
+	 * so that this nexthop apparently is reachable. --ANK
+	 */
+	dst_confirm(&rt->u.dst);
+
+	/* Duplicate redirect: silently ignore. */
+	if (neigh == rt->u.dst.neighbour)
+		goto out;
+
 	nrt = ip6_rt_copy(rt);
 	if (nrt == NULL)
 		goto out;
@@ -1253,7 +1265,7 @@
 	rtmsg.rtmsg_type = RTMSG_NEWROUTE;
 	ipv6_addr_copy(&rtmsg.rtmsg_gateway, gwaddr);
 	rtmsg.rtmsg_metric = 1024;
-	rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP;
+	rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES;
 
 	rtmsg.rtmsg_ifindex = dev->ifindex;
 
diff -Nru a/net/sched/sch_generic.c b/net/sched/sch_generic.c
--- a/net/sched/sch_generic.c	2004-09-15 20:29:40 -07:00
+++ b/net/sched/sch_generic.c	2004-09-15 20:29:40 -07:00
@@ -148,8 +148,10 @@
 					spin_lock(&dev->queue_lock);
 					return -1;
 				}
-				if (ret == NETDEV_TX_LOCKED && nolock)
+				if (ret == NETDEV_TX_LOCKED && nolock) {
+					spin_lock(&dev->queue_lock);
 					goto collision; 
+				}
 			}
 
 			/* NETDEV_TX_BUSY - we need to requeue */
diff -Nru a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
--- a/scripts/kconfig/lkc.h	2004-09-15 20:29:40 -07:00
+++ b/scripts/kconfig/lkc.h	2004-09-15 20:29:40 -07:00
@@ -59,9 +59,6 @@
 struct file *file_lookup(const char *name);
 int file_write_dep(const char *name);
 
-extern struct menu *current_entry;
-extern struct menu *current_menu;
-
 /* symbol.c */
 void sym_init(void);
 void sym_clear_all_valid(void);
diff -Nru a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
--- a/scripts/kconfig/mconf.c	2004-09-15 20:29:40 -07:00
+++ b/scripts/kconfig/mconf.c	2004-09-15 20:29:40 -07:00
@@ -18,6 +18,7 @@
 #include <string.h>
 #include <termios.h>
 #include <unistd.h>
+#include <regex.h>
 
 #define LKC_DIRECT_LINK
 #include "lkc.h"
@@ -28,7 +29,7 @@
 	"<Enter> selects submenus --->.  "
 	"Highlighted letters are hotkeys.  "
 	"Pressing <Y> includes, <N> excludes, <M> modularizes features.  "
-	"Press <Esc><Esc> to exit, <?> for Help.  "
+	"Press <Esc><Esc> to exit, <?> for Help, </> for Search.  "
 	"Legend: [*] built-in  [ ] excluded  <M> module  < > module capable",
 radiolist_instructions[] =
 	"Use the arrow keys to navigate this window or "
@@ -88,7 +89,7 @@
 static int indent;
 static struct termios ios_org;
 static int rows = 0, cols = 0;
-struct menu *current_menu;
+static struct menu *current_menu;
 static int child_count;
 static int do_resize;
 static int single_menu_mode;
@@ -102,6 +103,10 @@
 static void show_helptext(const char *title, const char *text);
 static void show_help(struct menu *menu);
 static void show_readme(void);
+static void show_file(const char *filename, const char *title, int r, int c);
+static void show_expr(struct menu *menu, FILE *fp);
+static void search_conf(char *pattern);
+static int regex_match(const char *string, regex_t *re);
 
 static void cprint_init(void);
 static int cprint1(const char *fmt, ...);
@@ -274,6 +279,114 @@
 	return WEXITSTATUS(stat);
 }
 
+static int regex_match(const char *string, regex_t *re)
+{
+	int rc;
+
+	rc = regexec(re, string, (size_t) 0, NULL, 0);
+	if (rc)
+		return 0;
+	return 1;
+}
+
+static void show_expr(struct menu *menu, FILE *fp)
+{
+	bool hit = false;
+	fprintf(fp, "Depends:\n ");
+	if (menu->prompt->visible.expr) {
+		if (!hit)
+			hit = true;
+		expr_fprint(menu->prompt->visible.expr, fp);
+	}
+	if (!hit)
+		fprintf(fp, "None");
+	if (menu->sym) {
+		struct property *prop;
+		hit = false;
+		fprintf(fp, "\nSelects:\n ");
+		for_all_properties(menu->sym, prop, P_SELECT) {
+			if (!hit)
+				hit = true;
+			expr_fprint(prop->expr, fp);
+		}
+		if (!hit)
+			fprintf(fp, "None");
+		hit = false;
+		fprintf(fp, "\nSelected by:\n ");
+		if (menu->sym->rev_dep.expr) {
+			hit = true;
+			expr_fprint(menu->sym->rev_dep.expr, fp);
+		}
+		if (!hit)
+			fprintf(fp, "None");
+	}
+}
+
+static void search_conf(char *pattern)
+{
+	struct symbol *sym = NULL;
+	struct menu *menu[32] = { 0 };
+	struct property *prop = NULL;
+	FILE *fp = NULL;
+	bool hit = false;
+	int i, j, k, l;
+	regex_t re;
+
+	if (regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB))
+		return;
+
+	fp = fopen(".search.tmp", "w");
+	if (fp == NULL) {
+		perror("fopen");
+		return;
+	}
+	for_all_symbols(i, sym) {
+		if (!sym->name)
+			continue;
+		if (!regex_match(sym->name, &re))
+			continue;
+		for_all_prompts(sym, prop) {
+			struct menu *submenu = prop->menu;
+			if (!submenu)
+				continue;
+			j = 0;
+			hit = false;
+			while (submenu) {
+				menu[j++] = submenu;
+				submenu = submenu->parent;
+			}
+			if (j > 0) {
+				if (!hit)
+					hit = true;
+				fprintf(fp, "%s (%s)\n", prop->text, sym->name);
+				fprintf(fp, "Location:\n");
+			}
+			for (k = j-2, l=1; k > 0; k--, l++) {
+				const char *prompt = menu_get_prompt(menu[k]);
+				if (menu[k]->sym)
+					fprintf(fp, "%*c-> %s (%s)\n",
+								l, ' ',
+								prompt,
+								menu[k]->sym->name);
+				else
+					fprintf(fp, "%*c-> %s\n",
+								l, ' ',
+								prompt);
+			}
+			if (hit) {
+				show_expr(menu[0], fp);
+				fprintf(fp, "\n\n\n");
+			}
+		}
+	}
+	if (!hit)
+		fprintf(fp, "No matches found.");
+	regfree(&re);
+	fclose(fp);
+	show_file(".search.tmp", "Search Results", rows, cols);
+	unlink(".search.tmp");
+}
+
 static void build_conf(struct menu *menu)
 {
 	struct symbol *sym;
@@ -463,6 +576,23 @@
 			cprint("    Save Configuration to an Alternate File");
 		}
 		stat = exec_conf();
+		if (stat == 26) {
+			char *pattern;
+
+			if (!strlen(input_buf))
+				continue;
+			pattern = malloc(sizeof(char)*sizeof(input_buf));
+			if (pattern == NULL) {
+				perror("malloc");
+				continue;
+			}
+			for (i = 0; input_buf[i]; i++)
+				pattern[i] = toupper(input_buf[i]);
+			pattern[i] = '\0';
+			search_conf(pattern);
+			free(pattern);
+			continue;
+		}
 		if (stat < 0)
 			continue;
 
@@ -550,17 +680,7 @@
 	fd = creat(".help.tmp", 0777);
 	write(fd, text, strlen(text));
 	close(fd);
-	do {
-		cprint_init();
-		if (title) {
-			cprint("--title");
-			cprint("%s", title);
-		}
-		cprint("--textbox");
-		cprint(".help.tmp");
-		cprint("%d", r);
-		cprint("%d", c);
-	} while (exec_conf() < 0);
+	show_file(".help.tmp", title, r, c);
 	unlink(".help.tmp");
 }
 
@@ -589,13 +709,22 @@
 
 static void show_readme(void)
 {
+	show_file("scripts/README.Menuconfig", NULL, rows, cols);
+}
+
+static void show_file(const char *filename, const char *title, int r, int c)
+{
 	do {
 		cprint_init();
+		if (title) {
+			cprint("--title");
+			cprint("%s", title);
+		}
 		cprint("--textbox");
-		cprint("scripts/README.Menuconfig");
-		cprint("%d", rows);
-		cprint("%d", cols);
-	} while (exec_conf() == -1);
+		cprint("%s", filename);
+		cprint("%d", r);
+		cprint("%d", c);
+	} while (exec_conf() < 0);
 }
 
 static void conf_choice(struct menu *menu)
diff -Nru a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
--- a/scripts/kconfig/menu.c	2004-09-15 20:29:40 -07:00
+++ b/scripts/kconfig/menu.c	2004-09-15 20:29:40 -07:00
@@ -10,7 +10,6 @@
 #include "lkc.h"
 
 struct menu rootmenu;
-struct menu *current_menu, *current_entry;
 static struct menu **last_entry_ptr;
 
 struct file *file_list;
diff -Nru a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
--- a/scripts/kconfig/zconf.tab.c_shipped	2004-09-15 20:29:40 -07:00
+++ b/scripts/kconfig/zconf.tab.c_shipped	2004-09-15 20:29:40 -07:00
@@ -175,6 +175,8 @@
 
 struct symbol *symbol_hash[257];
 
+static struct menu *current_menu, *current_entry;
+
 #define YYERROR_VERBOSE
 
 
diff -Nru a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
--- a/scripts/kconfig/zconf.y	2004-09-15 20:29:40 -07:00
+++ b/scripts/kconfig/zconf.y	2004-09-15 20:29:40 -07:00
@@ -25,6 +25,8 @@
 
 struct symbol *symbol_hash[257];
 
+static struct menu *current_menu, *current_entry;
+
 #define YYERROR_VERBOSE
 %}
 %expect 40
diff -Nru a/scripts/lxdialog/menubox.c b/scripts/lxdialog/menubox.c
--- a/scripts/lxdialog/menubox.c	2004-09-15 20:29:40 -07:00
+++ b/scripts/lxdialog/menubox.c	2004-09-15 20:29:40 -07:00
@@ -276,6 +276,15 @@
 
     while (key != ESC) {
 	key = wgetch(menu);
+	if ( key == '/' ) {
+		int ret = dialog_inputbox("Search Configuration Parameter",
+					"Enter Keyword", height, width,
+					(char *) NULL);
+		if (ret == 0) {
+			fprintf(stderr, "%s", dialog_input_result);
+			return 26;
+		}
+	}
 
 	if (key < 256 && isalpha(key)) key = tolower(key);