diff -urN linux-2.6.22.5/arch/arm/kernel/process.c linux-2.6.22.5-android/arch/arm/kernel/process.c --- linux-2.6.22.5/arch/arm/kernel/process.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/arch/arm/kernel/process.c 2007-11-20 08:46:07.234228489 +1100 @@ -392,6 +392,16 @@ } EXPORT_SYMBOL(dump_thread); +/* + * Capture the user space registers if the task is not running (in user space) + */ +int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) +{ + struct pt_regs ptregs = *task_pt_regs(tsk); + elf_core_copy_regs(regs, &ptregs); + return 1; +} + /* * Shuffle the argument into the correct register before calling the * thread function. r1 is the thread argument, r2 is the pointer to diff -urN linux-2.6.22.5/arch/arm/kernel/process.c.orig linux-2.6.22.5-android/arch/arm/kernel/process.c.orig --- linux-2.6.22.5/arch/arm/kernel/process.c.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/arch/arm/kernel/process.c.orig 2007-11-20 08:21:16.624834710 +1100 @@ -0,0 +1,451 @@ +/* + * linux/arch/arm/kernel/process.c + * + * Copyright (C) 1996-2000 Russell King - Converted to ARM. + * Original Copyright (C) 1995 Linus Torvalds + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static const char *processor_modes[] = { + "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , + "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", + "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , + "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" +}; + +extern void setup_mm_for_reboot(char mode); + +static volatile int hlt_counter; + +#include + +void disable_hlt(void) +{ + hlt_counter++; +} + +EXPORT_SYMBOL(disable_hlt); + +void enable_hlt(void) +{ + hlt_counter--; +} + +EXPORT_SYMBOL(enable_hlt); + +static int __init nohlt_setup(char *__unused) +{ + hlt_counter = 1; + return 1; +} + +static int __init hlt_setup(char *__unused) +{ + hlt_counter = 0; + return 1; +} + +__setup("nohlt", nohlt_setup); +__setup("hlt", hlt_setup); + +void arm_machine_restart(char mode) +{ + /* + * Clean and disable cache, and turn off interrupts + */ + cpu_proc_fin(); + + /* + * Tell the mm system that we are going to reboot - + * we may need it to insert some 1:1 mappings so that + * soft boot works. + */ + setup_mm_for_reboot(mode); + + /* + * Now call the architecture specific reboot code. + */ + arch_reset(mode); + + /* + * Whoops - the architecture was unable to reboot. + * Tell the user! + */ + mdelay(1000); + printk("Reboot failed -- System halted\n"); + while (1); +} + +/* + * Function pointers to optional machine specific functions + */ +void (*pm_idle)(void); +EXPORT_SYMBOL(pm_idle); + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void (*arm_pm_restart)(char str) = arm_machine_restart; +EXPORT_SYMBOL_GPL(arm_pm_restart); + + +/* + * This is our default idle handler. We need to disable + * interrupts here to ensure we don't miss a wakeup call. + */ +static void default_idle(void) +{ + if (hlt_counter) + cpu_relax(); + else { + local_irq_disable(); + if (!need_resched()) { + timer_dyn_reprogram(); + arch_idle(); + } + local_irq_enable(); + } +} + +/* + * The idle thread. We try to conserve power, while trying to keep + * overall latency low. The architecture specific idle is passed + * a value to indicate the level of "idleness" of the system. + */ +void cpu_idle(void) +{ + local_fiq_enable(); + + /* endless idle loop with no priority at all */ + while (1) { + void (*idle)(void) = pm_idle; + +#ifdef CONFIG_HOTPLUG_CPU + if (cpu_is_offline(smp_processor_id())) { + leds_event(led_idle_start); + cpu_die(); + } +#endif + + if (!idle) + idle = default_idle; + leds_event(led_idle_start); + tick_nohz_stop_sched_tick(); + while (!need_resched()) + idle(); + leds_event(led_idle_end); + tick_nohz_restart_sched_tick(); + preempt_enable_no_resched(); + schedule(); + preempt_disable(); + } +} + +static char reboot_mode = 'h'; + +int __init reboot_setup(char *str) +{ + reboot_mode = str[0]; + return 1; +} + +__setup("reboot=", reboot_setup); + +void machine_halt(void) +{ +} + + +void machine_power_off(void) +{ + if (pm_power_off) + pm_power_off(); +} + +void machine_restart(char * __unused) +{ + arm_pm_restart(reboot_mode); +} + +void __show_regs(struct pt_regs *regs) +{ + unsigned long flags; + char buf[64]; + + printk("CPU: %d %s (%s %.*s)\n", + smp_processor_id(), print_tainted(), init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + print_symbol("PC is at %s\n", instruction_pointer(regs)); + print_symbol("LR is at %s\n", regs->ARM_lr); + printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" + "sp : %08lx ip : %08lx fp : %08lx\n", + regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, + regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); + printk("r10: %08lx r9 : %08lx r8 : %08lx\n", + regs->ARM_r10, regs->ARM_r9, + regs->ARM_r8); + printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", + regs->ARM_r7, regs->ARM_r6, + regs->ARM_r5, regs->ARM_r4); + printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", + regs->ARM_r3, regs->ARM_r2, + regs->ARM_r1, regs->ARM_r0); + + flags = regs->ARM_cpsr; + buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; + buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; + buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; + buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; + buf[4] = '\0'; + + printk("Flags: %s IRQs o%s FIQs o%s Mode %s%s Segment %s\n", + buf, interrupts_enabled(regs) ? "n" : "ff", + fast_interrupts_enabled(regs) ? "n" : "ff", + processor_modes[processor_mode(regs)], + thumb_mode(regs) ? " (T)" : "", + get_fs() == get_ds() ? "kernel" : "user"); +#ifdef CONFIG_CPU_CP15 + { + unsigned int ctrl; + + buf[0] = '\0'; +#ifdef CONFIG_CPU_CP15_MMU + { + unsigned int transbase, dac; + asm("mrc p15, 0, %0, c2, c0\n\t" + "mrc p15, 0, %1, c3, c0\n" + : "=r" (transbase), "=r" (dac)); + snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", + transbase, dac); + } +#endif + asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); + + printk("Control: %08x%s\n", ctrl, buf); + } +#endif +} + +void show_regs(struct pt_regs * regs) +{ + printk("\n"); + printk("Pid: %d, comm: %20s\n", current->pid, current->comm); + __show_regs(regs); + __backtrace(); +} + +void show_fpregs(struct user_fp *regs) +{ + int i; + + for (i = 0; i < 8; i++) { + unsigned long *p; + char type; + + p = (unsigned long *)(regs->fpregs + i); + + switch (regs->ftype[i]) { + case 1: type = 'f'; break; + case 2: type = 'd'; break; + case 3: type = 'e'; break; + default: type = '?'; break; + } + if (regs->init_flag) + type = '?'; + + printk(" f%d(%c): %08lx %08lx %08lx%c", + i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' '); + } + + + printk("FPSR: %08lx FPCR: %08lx\n", + (unsigned long)regs->fpsr, + (unsigned long)regs->fpcr); +} + +/* + * Free current thread data structures etc.. + */ +void exit_thread(void) +{ +} + +ATOMIC_NOTIFIER_HEAD(thread_notify_head); + +EXPORT_SYMBOL_GPL(thread_notify_head); + +void flush_thread(void) +{ + struct thread_info *thread = current_thread_info(); + struct task_struct *tsk = current; + + memset(thread->used_cp, 0, sizeof(thread->used_cp)); + memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); + memset(&thread->fpstate, 0, sizeof(union fp_state)); + + thread_notify(THREAD_NOTIFY_FLUSH, thread); +} + +void release_thread(struct task_struct *dead_task) +{ + struct thread_info *thread = task_thread_info(dead_task); + + thread_notify(THREAD_NOTIFY_RELEASE, thread); +} + +asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); + +int +copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) +{ + struct thread_info *thread = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); + + *childregs = *regs; + childregs->ARM_r0 = 0; + childregs->ARM_sp = stack_start; + + memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); + thread->cpu_context.sp = (unsigned long)childregs; + thread->cpu_context.pc = (unsigned long)ret_from_fork; + + if (clone_flags & CLONE_SETTLS) + thread->tp_value = regs->ARM_r3; + + return 0; +} + +/* + * fill in the fpe structure for a core dump... + */ +int dump_fpu (struct pt_regs *regs, struct user_fp *fp) +{ + struct thread_info *thread = current_thread_info(); + int used_math = thread->used_cp[1] | thread->used_cp[2]; + + if (used_math) + memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); + + return used_math != 0; +} +EXPORT_SYMBOL(dump_fpu); + +/* + * fill in the user structure for a core dump.. + */ +void dump_thread(struct pt_regs * regs, struct user * dump) +{ + struct task_struct *tsk = current; + + dump->magic = CMAGIC; + dump->start_code = tsk->mm->start_code; + dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); + + dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; + dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; + dump->u_ssize = 0; + + dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; + dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; + dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; + dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; + dump->u_debugreg[4] = tsk->thread.debug.nsaved; + + if (dump->start_stack < 0x04000000) + dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; + + dump->regs = *regs; + dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); +} +EXPORT_SYMBOL(dump_thread); + +/* + * Shuffle the argument into the correct register before calling the + * thread function. r1 is the thread argument, r2 is the pointer to + * the thread function, and r3 points to the exit function. + */ +extern void kernel_thread_helper(void); +asm( ".section .text\n" +" .align\n" +" .type kernel_thread_helper, #function\n" +"kernel_thread_helper:\n" +" mov r0, r1\n" +" mov lr, r3\n" +" mov pc, r2\n" +" .size kernel_thread_helper, . - kernel_thread_helper\n" +" .previous"); + +/* + * Create a kernel thread. + */ +pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) +{ + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + + regs.ARM_r1 = (unsigned long)arg; + regs.ARM_r2 = (unsigned long)fn; + regs.ARM_r3 = (unsigned long)do_exit; + regs.ARM_pc = (unsigned long)kernel_thread_helper; + regs.ARM_cpsr = SVC_MODE; + + return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); +} +EXPORT_SYMBOL(kernel_thread); + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long fp, lr; + unsigned long stack_start, stack_end; + int count = 0; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + stack_start = (unsigned long)end_of_stack(p); + stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE; + + fp = thread_saved_fp(p); + do { + if (fp < stack_start || fp > stack_end) + return 0; + lr = pc_pointer (((unsigned long *)fp)[-1]); + if (!in_sched_functions(lr)) + return lr; + fp = *(unsigned long *) (fp - 12); + } while (count ++ < 16); + return 0; +} diff -urN linux-2.6.22.5/arch/arm/kernel/signal.c linux-2.6.22.5-android/arch/arm/kernel/signal.c --- linux-2.6.22.5/arch/arm/kernel/signal.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/arch/arm/kernel/signal.c 2007-11-20 08:46:07.234228489 +1100 @@ -534,6 +534,14 @@ static inline void restart_syscall(struct pt_regs *regs) { + if (regs->ARM_ORIG_r0 == -ERESTARTNOHAND || + regs->ARM_ORIG_r0 == -ERESTARTSYS || + regs->ARM_ORIG_r0 == -ERESTARTNOINTR || + regs->ARM_ORIG_r0 == -ERESTART_RESTARTBLOCK) { + /* the syscall cannot be safely restarted, return -EINTR instead */ + regs->ARM_r0 = -EINTR; + return; + } regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; } @@ -650,6 +658,7 @@ */ if (syscall) { if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { + regs->ARM_r0 = -EAGAIN; /* prevent multiple restarts */ if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; diff -urN linux-2.6.22.5/.config.old linux-2.6.22.5-android/.config.old --- linux-2.6.22.5/.config.old 2007-11-20 08:07:46.281683424 +1100 +++ linux-2.6.22.5-android/.config.old 2007-11-20 08:24:38.605590459 +1100 @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.22.5 -# Wed Aug 29 01:48:54 2007 +# Tue Nov 20 08:09:22 2007 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y @@ -166,6 +166,7 @@ CONFIG_S3C2410_PM=y CONFIG_S3C2410_GPIO=y CONFIG_S3C2410_CLOCK=y +CONFIG_S3C2410_PWM=y # # S3C2410 Machines @@ -283,6 +284,7 @@ # At least one emulation must be selected # CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set # CONFIG_FPE_FASTFPE is not set # @@ -1048,6 +1050,7 @@ # CONFIG_SPI_AT25 is not set CONFIG_SPI_SPIDEV=m CONFIG_SPI_SLAVE_JBT6K74=y +# CONFIG_SPI_SLAVE_LIS302DL is not set # # Dallas's 1-wire bus @@ -1120,6 +1123,7 @@ # CONFIG_LEDS_S3C24XX=m CONFIG_LEDS_GTA01=y +# CONFIG_LEDS_GTA02 is not set # # LED Triggers @@ -1200,6 +1204,7 @@ # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_LOGO_OPENMOKO_CLUT224=y # # Sound @@ -1587,6 +1592,7 @@ # CONFIG_EFS_FS is not set CONFIG_YAFFS_FS=y CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set # CONFIG_YAFFS_DOES_ECC is not set CONFIG_YAFFS_YAFFS2=y CONFIG_YAFFS_AUTO_YAFFS2=y diff -urN linux-2.6.22.5/drivers/binder/binder.c linux-2.6.22.5-android/drivers/binder/binder.c --- linux-2.6.22.5/drivers/binder/binder.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder.c 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,691 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include +#include +#include +#include +#include +#include // includes +#include // for 'current' +#include // for vma, etc. +#include +#include +#include +#include "binder_defs.h" +#include "binder_proc.h" +#include "binder_thread.h" +#include "binder_node.h" +#include "binder_transaction.h" +#include "iobuffer.h" + +MODULE_LICENSE("GPL"); // class_* symbols get exported GPL +MODULE_AUTHOR("PalmSource, Inc."); +MODULE_DESCRIPTION("Capability-based IPC"); + +#define BINDER_MINOR 0 +#define BINDER_NUM_DEVS 1 +#define BINDER_NAME "binder" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) +#define CLASS_SIMPLE class_simple +#define CLASS_SIMPLE_CREATE class_simple_create +#define CLASS_SIMPLE_DEVICE_ADD class_simple_device_add +#define CLASS_SIMPLE_DESTROY class_simple_destroy +#define CLASS_SIMPLE_DEVICE_REMOVE class_simple_device_remove +#else +#define CLASS_SIMPLE class +#define CLASS_SIMPLE_CREATE class_create +#define CLASS_SIMPLE_DEVICE_ADD class_device_create +#define CLASS_SIMPLE_DESTROY class_destroy +#define CLASS_SIMPLE_DEVICE_REMOVE(a) class_device_destroy(binder_class, a) +#endif + +/* + * Prototypes + */ + +struct binder_thread* find_thread(pid_t pid, binder_proc_t *proc, bool remove); + +#if HAVE_UNLOCKED_IOCTL +#define USE_UNLOCKED_IOCTL 1 +#else +#define USE_UNLOCKED_IOCTL 0 +#endif +#if USE_UNLOCKED_IOCTL +static long binder_unlocked_ioctl(struct file *, unsigned int, unsigned long); +#else +static int binder_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +#endif +static int binder_open(struct inode *, struct file *); +static int binder_release(struct inode *, struct file *); +static int binder_mmap(struct file *, struct vm_area_struct *); + +/* + * Globals + */ + +struct binder_dev { + struct cdev cdev; +}; + +static int binder_major = 0; +static char const * const binder_name = BINDER_NAME; +static struct binder_dev binder_device; +static struct CLASS_SIMPLE *binder_class; + +static struct file_operations binder_fops = { + .owner = THIS_MODULE, +#if USE_UNLOCKED_IOCTL + .unlocked_ioctl = binder_unlocked_ioctl, +#else + .ioctl = binder_ioctl, +#endif + .mmap = binder_mmap, + .open = binder_open, + .release = binder_release +}; + +static void binder_vma_open(struct vm_area_struct * area); +static void binder_vma_close(struct vm_area_struct * area); +static struct page * binder_vma_nopage(struct vm_area_struct * area, unsigned long address, int *type); + +static struct vm_operations_struct binder_vm_ops = { + .open = binder_vma_open, + .close = binder_vma_close, + .nopage = binder_vma_nopage +}; + +struct kmem_cache *transaction_cache = NULL; +struct kmem_cache *thread_cache = NULL; +struct kmem_cache *node_cache = NULL; +struct kmem_cache *local_mapping_cache = NULL; +struct kmem_cache *reverse_mapping_cache = NULL; +struct kmem_cache *range_map_cache = NULL; + +spinlock_t cmpxchg32_spinner = SPIN_LOCK_UNLOCKED; +static DECLARE_MUTEX(maps_lock); + +/* + * The kernel sizes its process hash table based up on the amount of RAM, with + * a lower limit of 4 bits and an upper limit of 12 bits. We probably don't + * need 8 bits worth of entries on PDAs, but it make it very likely we will + * have chain lengths of one. + */ + +#define PID_HASH_BITS (8) +static int pid_hash_bits = PID_HASH_BITS; +#define hash_proc_id(pid) hash_long(pid, pid_hash_bits) + +static struct hlist_head *pid_table = NULL; + +static inline binder_thread_t * +binder_thread_alloc(pid_t pid, binder_proc_t *proc, int index) +{ + binder_thread_t *thread = binder_thread_init(pid, proc); + if (thread) { + if (proc) { + if(!binder_proc_AddThread(proc, thread)) + return NULL; // binder_proc_AddThread will cause the thread to be deleted if the process is dying + } + hlist_add_head(&(thread->node), pid_table + index); + } + DPRINTF(5, (KERN_WARNING "%s(%u, %p, %d): %p\n", __func__, pid, proc, index, thread)); + return thread; +} + +struct binder_thread * +core_find_thread(pid_t pid, binder_proc_t *proc, bool remove) +{ + binder_thread_t *thread; + struct hlist_node *_p; + const int index = hash_proc_id(pid); + + DPRINTF(5, (KERN_WARNING "%s(%u, %p, %s): index %d\n", __func__, pid, proc, remove ? "TRUE" : "FALSE", index)); + hlist_for_each_entry(thread, _p, pid_table + index, node) { + DPRINTF(5, (KERN_WARNING "thread: %p, thread->m_thid: %u\n", thread, thread->m_thid)); + if (thread->m_thid == pid) { + DPRINTF(5, (KERN_WARNING "found thread %p, proc=%p\n", thread, thread->m_team)); + if (remove) { + thread->attachedToThread = FALSE; + hlist_del(&thread->node); + } else if (proc) { + if (thread->m_team == NULL) { + binder_thread_AttachProcess(thread, proc); + } else { + BND_ASSERT(thread->m_team == proc, "proc changed"); + } + } + return thread; + } + } + + return NULL; +} + +binder_thread_t * +find_thread(pid_t pid, binder_proc_t *proc, bool remove) +{ + binder_thread_t *thread; + + DPRINTF(5, (KERN_WARNING "%s(%u, %p, %s)\n", __func__, pid, proc, remove ? "TRUE" : "FALSE")); + thread = core_find_thread(pid, proc, remove); + + /* binder_thread_alloc() fails for -ENOMEM only */ + if (thread == NULL && remove == FALSE) thread = binder_thread_alloc(pid, proc, hash_proc_id(pid)); + return thread; +} + +struct binder_thread * +check_for_thread(pid_t pid, bool create) +{ + binder_thread_t *thread; + int rv; + + rv = down_interruptible(&maps_lock); + if(rv != 0) + return NULL; + if (create) + thread = find_thread(pid, NULL, FALSE); + else + thread = core_find_thread(pid, NULL, FALSE); + if(thread != NULL) + BND_FIRST_ACQUIRE(binder_thread, thread, STRONG, thread); + up(&maps_lock); + + return thread; +} + +binder_thread_t * +attach_child_thread(pid_t child_pid, binder_thread_t *parent) +{ + binder_thread_t *thread; + int rv; + bool failed = FALSE; + + rv = down_interruptible(&maps_lock); + if(rv != 0) + return NULL; + thread = find_thread(child_pid, NULL, FALSE); + if(thread != NULL) { + BND_FIRST_ACQUIRE(binder_thread, thread, STRONG, parent); + // Note: it is important this be done with the lock + // held. See binder_thread_WaitForParent(). + failed = !binder_thread_SetParentThread(thread, parent); + } + up(&maps_lock); + + if (failed) { + forget_thread(thread); + thread = NULL; + } + + return thread; +} + +void +forget_thread(struct binder_thread *thread) +{ + pid_t pid; + bool attached; + int rv; + + rv = down_interruptible(&maps_lock); + if(rv != 0) + return; + pid = thread->m_thid; + attached = thread->attachedToThread; + if(BND_RELEASE(binder_thread, thread, STRONG, thread) == 1) { + // Remove it if not yet accessed by user space... + if (!attached) { + find_thread(pid, NULL, TRUE); + } + } + up(&maps_lock); +} + +#if BND_MEM_DEBUG +typedef struct dbg_mem_header_s { + unsigned long state; + kmem_cache_t *slab; + struct dbg_mem_header_s *next; + struct dbg_mem_header_s *prev; +} dbg_mem_header_t ; +static dbg_mem_header_t *dbg_active_memory; +#endif + +void generic_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ +#if BND_MEM_DEBUG + dbg_mem_header_t *h = p; + if(flags & SLAB_CTOR_CONSTRUCTOR) { + h->state = 0; + h->slab = slab; + h->next = dbg_active_memory; + if(h->next) + h->next->prev = h; + h->prev = NULL; + dbg_active_memory = h; + } + else { + BND_ASSERT(h->state == 0 || h->state == 0x22222222, "memory still in use"); + if(h->next) + h->next->prev = h->prev; + if(h->prev) + h->prev->next = h->next; + else + dbg_active_memory = h->next; + } +#endif +} + +void transaction_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void thread_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void node_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void local_mapping_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void reverse_mapping_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void range_map_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +static int /*__init*/ create_pools(void) +{ + //long cache_flags = /*SLAB_DEBUG_FREE | SLAB_DEBUG_INITIAL | SLAB_RED_ZONE |*/ SLAB_POISON; + //long cache_flags = SLAB_RECLAIM_ACCOUNT | SLAB_NO_REAP; + long cache_flags = 0; +#if BND_MEM_DEBUG + size_t pad = sizeof(dbg_mem_header_t); +#else + size_t pad = 0; +#endif + DPRINTF(4, (KERN_WARNING "%s()\n", __func__)); + + // small object pools + transaction_cache = kmem_cache_create("binder_transaction_t", sizeof(binder_transaction_t)+pad, 0, cache_flags, transaction_slab_xtor); + if (!transaction_cache) return -ENOMEM; + thread_cache = kmem_cache_create("binder_thread_t", sizeof(binder_thread_t)+pad, 0, cache_flags, thread_slab_xtor); + if (!thread_cache) return -ENOMEM; + node_cache = kmem_cache_create("binder_node_t", sizeof(binder_node_t)+pad, 0, cache_flags, node_slab_xtor); + if (!node_cache) return -ENOMEM; + local_mapping_cache = kmem_cache_create("local_mapping_t", sizeof(local_mapping_t)+pad, 0, cache_flags, local_mapping_slab_xtor); + if (!local_mapping_cache) return -ENOMEM; + reverse_mapping_cache = kmem_cache_create("reverse_mapping_t", sizeof(reverse_mapping_t)+pad, 0, cache_flags, reverse_mapping_slab_xtor); + if (!reverse_mapping_cache) return -ENOMEM; + range_map_cache = kmem_cache_create("range_map_t", sizeof(range_map_t)+pad, 0, cache_flags, range_map_slab_xtor); + if (!range_map_cache) return -ENOMEM; + + // hash tables + pid_table = kmalloc(sizeof(void *) << PID_HASH_BITS, GFP_KERNEL); + if (!pid_table) return -ENOMEM; + memset(pid_table, 0, sizeof(void *) << PID_HASH_BITS); + return 0; +} + +static int destroy_pools(void) +{ + int res = 0; +#if BND_MEM_DEBUG + dbg_mem_header_t *m, *mn; +#endif + DPRINTF(4, (KERN_WARNING "%s()\n", __func__)); + + /* + * These can fail if we haven't free'd all of the objects we've allocated. + */ + +#if BND_MEM_DEBUG + + + DPRINTF(4, (KERN_WARNING "%s() dbg_active_memory = %p\n", __func__, dbg_active_memory)); + m = dbg_active_memory; + while(m) { + mn = m->next; + if(m->state == 0x11111111) { + printk(KERN_WARNING "%s() memory still in use: %p slab %p\n", __func__, m + 1, m->slab); + dbg_kmem_cache_free(m->slab, m + 1); + } + m = mn; + } +#endif + + kmem_cache_destroy(transaction_cache); + kmem_cache_destroy(thread_cache); + kmem_cache_destroy(node_cache); + kmem_cache_destroy(local_mapping_cache); + kmem_cache_destroy(reverse_mapping_cache); + kmem_cache_destroy(range_map_cache); + if (pid_table) kfree(pid_table); + return res; +} + +static int __init init_binder(void) +{ + struct class_device *simple; + int result; + dev_t dev = 0; + + result = create_pools(); + if (result) { + goto free_pools; + } + + result = alloc_chrdev_region(&dev, BINDER_MINOR, BINDER_NUM_DEVS, binder_name); + if (result < 0) { + printk(KERN_WARNING "init_binder: alloc_chrdev_region() failed: %d\n", result); + return result; + } + + binder_major = MAJOR(dev); + binder_class = CLASS_SIMPLE_CREATE(THIS_MODULE, "binderipc"); + if (IS_ERR(binder_class)) { + result = PTR_ERR(binder_class); + printk(KERN_WARNING "init_binder: CLASS_SIMPLE_CREATE() failed: %d\n", result); + goto unalloc; + } + + memset(&binder_device, 0, sizeof(binder_device)); // overkill, but we don't care + cdev_init(&binder_device.cdev, &binder_fops); + binder_device.cdev.owner = THIS_MODULE; + result = cdev_add(&binder_device.cdev, dev, BINDER_NUM_DEVS); + if (result < 0) { + printk(KERN_WARNING "init_binder: cdev_add() failed: %d\n", result); + goto unregister_class; + } + + void* mem = kzalloc(sizeof(*simple), GFP_KERNEL); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) + simple = CLASS_SIMPLE_DEVICE_ADD(binder_class, dev, NULL, "%s", BINDER_NAME); +#else + // ARGH API CHANGE!!! + simple = CLASS_SIMPLE_DEVICE_ADD(binder_class, NULL, dev, NULL, "%s", BINDER_NAME); +#endif + if (IS_ERR(simple)) { + result = PTR_ERR(simple); + goto unadd_cdev; + } + + goto exit0; + +unadd_cdev: + cdev_del(&binder_device.cdev); +unregister_class: + CLASS_SIMPLE_DESTROY(binder_class); +unalloc: + unregister_chrdev_region(binder_major, BINDER_NUM_DEVS); +free_pools: + destroy_pools(); +exit0: + return result; +} + +static void __exit cleanup_binder(void) +{ + CLASS_SIMPLE_DEVICE_REMOVE(MKDEV(binder_major, 0)); + cdev_del(&binder_device.cdev); + CLASS_SIMPLE_DESTROY(binder_class); + unregister_chrdev_region(binder_major, BINDER_NUM_DEVS); + destroy_pools(); +} + + +module_init(init_binder); +module_exit(cleanup_binder); + +static int binder_open(struct inode *nodp, struct file *filp) +{ + binder_proc_t *proc; + + //printk(KERN_WARNING "%s(%p %p) (pid %d)\n", __func__, nodp, filp, current->pid); + // We only have one device, so we don't have to dig into the inode for it. + + down(&maps_lock); + proc = new_binder_proc(); + filp->private_data = proc; + up(&maps_lock); + printk(KERN_WARNING "%s(%p %p) (pid %d) got %p\n", __func__, nodp, filp, current->pid, proc); + if(proc == NULL) + return -ENOMEM; + return 0; +} + +static int binder_release(struct inode *nodp, struct file *filp) +{ + binder_proc_t *that; + binder_thread_t *thread; + struct hlist_node *_p, *_pp; + int index; + printk(KERN_WARNING "%s(%p %p) (pid %d) pd %p\n", __func__, nodp, filp, current->pid, filp->private_data); + that = filp->private_data; + if (that) { + filp->private_data = NULL; + + // ensure the process stays around until we can verify termination + index = 1 << pid_hash_bits; + + DPRINTF(5, (KERN_WARNING "%s(%p) freeing threads\n", __func__, that)); + + down(&maps_lock); + while (index--) { + hlist_for_each_entry_safe(thread, _p, _pp, pid_table + index, node) { + if (thread->m_team == that) { + DPRINTF(5, (KERN_WARNING "%s(%p) freeing thread %d\n", __func__, that, thread->m_thid)); + hlist_del(&thread->node); + BND_RELEASE(binder_thread, thread, STRONG, that); + //BND_RELEASE(binder_thread, thread, WEAK, that); + } + } + } + DPRINTF(5, (KERN_WARNING "%s(%p) done freeing threads\n", __func__, that)); + up(&maps_lock); + + binder_proc_Die(that, FALSE); + BND_RELEASE(binder_proc, that, STRONG, that); + } + else printk(KERN_WARNING "%s(pid %d): couldn't find binder_proc to Die()\n", __func__, current->pid); + return 0; +} + +#if USE_UNLOCKED_IOCTL +static long binder_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +#else +static int binder_ioctl(struct inode *nodp, struct file *filp, unsigned int cmd, unsigned long arg) +#endif +{ + binder_thread_t *thread; + struct binder_proc *proc; + int rv; + + if (_IOC_TYPE(cmd) != BINDER_IOC_MAGIC) return -ENOTTY; + if (_IOC_NR(cmd) > BINDER_IOC_MAXNR) return -ENOTTY; + + DPRINTF(5, (KERN_WARNING "%s: %08x\n", __func__, cmd)); + + // find the thread tracking structure + rv = down_interruptible(&maps_lock); + if(rv != 0) + return rv; + proc = filp->private_data; + BND_ASSERT(proc != NULL, "ioctl called after release"); + if(proc == NULL || !binder_proc_IsAlive(proc)) + thread = NULL; + else + thread = find_thread(current->pid, proc, cmd == BINDER_THREAD_EXIT); + if(thread != NULL) { + BND_ACQUIRE(binder_thread, thread, WEAK, thread); + thread->attachedToThread = TRUE; + } + + up(&maps_lock); + if(proc == NULL || !binder_proc_IsAlive(proc)) + return -ECONNREFUSED; + if (thread == NULL) + return -ENOMEM; + + //BND_ASSERT(thread->m_team == proc, "bad thread process ptr"); + if(thread->m_team != proc) { + printk( KERN_WARNING "%s: cmd %08x process ptr mismatch, " + "thread has %p, expected %p\n", + __func__, cmd, thread->m_team, proc ); + return -EIO; + } + + rv = binder_thread_Control(thread, cmd, (void*)arg); + BND_RELEASE(binder_thread, thread, WEAK, thread); + return rv; +} + +static int binder_mmap(struct file * filp, struct vm_area_struct * vma) +{ + // FIXME: Unil we see a device with ZONE_HIGH memory (currently, greater + // than 896MB RAM) we don't need to worry about alloc_page. + vma->vm_ops = &binder_vm_ops; + vma->vm_flags |= VM_RESERVED | VM_READ | VM_RAND_READ | VM_IO | VM_DONTCOPY | VM_DONTEXPAND; + vma->vm_flags &= ~(VM_SHARED); + vma->vm_private_data = filp->private_data; + binder_vma_open(vma); + return 0; +} + +static void binder_vma_open(struct vm_area_struct * area) +{ + binder_proc_t *that; + DPRINTF(5, (KERN_WARNING "binder_vma_open()\n")); + // Do we have to watch for clone()'d processes and hunt down the + // appropriate binder_proc_t? + + that = area->vm_private_data; + // initialize our free space map + if (that->m_freeMap.rb_node == NULL) { + range_map_t *rm = kmem_cache_alloc(range_map_cache, GFP_KERNEL); + that->m_mmap_start = rm->start = area->vm_start; + rm->end = area->vm_end; + rm->page = NULL; + rm->team = that; + BND_LOCK(that->m_map_pool_lock); + binder_proc_free_map_insert(that, rm); + BND_UNLOCK(that->m_map_pool_lock); + DPRINTF(5, (KERN_WARNING "vma(%08lx, %08lx) for %08x\n", rm->start, rm->end, (unsigned int)that)); + } +#if 0 + else printk(KERN_WARNING " --- didn't reconstruct the initial free-map\n"); +#endif +} + +static void binder_vma_close(struct vm_area_struct * area) +{ + // Uh, what? + DPRINTF(5, (KERN_WARNING "binder_vma_close() for %08x\n", (unsigned int)area->vm_private_data)); +} + +static struct page * binder_vma_nopage(struct vm_area_struct * area, unsigned long address, int *type) +{ + struct page *pageptr = NULL; + // the private data holds a pointer to owning binder_proc + binder_proc_t *bp = (binder_proc_t *)area->vm_private_data; + DPRINTF(5, ("binder_vma_nopage(%p, %08lx)\n", bp, address)); + // make sure this address corresponds to a valid transaction + if (!binder_proc_ValidTransactionAddress(bp, address, &pageptr)) + return NOPAGE_SIGBUS; + // bump the kernel reference counts + get_page(pageptr); + // record the fault type + if (type) *type = VM_FAULT_MINOR; + // return the page + return pageptr; +} + +void my_dump_stack(void) { printk(KERN_WARNING ""); dump_stack(); } + +void soft_yield() +{ + static int i = 0; + i++; + if(i < 10) + return; + i = 0; + yield(); +} + +#if BND_MEM_DEBUG + +#undef kmem_cache_alloc +#undef kmem_cache_free + +void *dbg_kmem_cache_alloc(struct kmem_cache *a, unsigned int b) +{ + dbg_mem_header_t *p; + p = kmem_cache_alloc(a, b); + BND_ASSERT(p != NULL, "memory allocation failed"); + if(p == NULL) + return NULL; + if(p->state != 0x00000000) { + if(p->state != 0x22222222) + DPRINTF(5, (KERN_WARNING "%s: kmem_cache_alloc(%p, %d) BAD PTR %p = 0x%08lx\n", __func__, a, b, p, p->state)); + else + DPRINTF(6, (KERN_WARNING "%s: kmem_cache_alloc(%p, %d) NEW PTR %p = 0x%08lx\n", __func__, a, b, p, p->state)); + } + p->state = 0x11111111; + p++; + DPRINTF(6, (KERN_WARNING "%s: kmem_cache_alloc(%p, %d) returned %p\n", __func__, a, b, p)); + return p; +} + +void dbg_kmem_cache_free(struct kmem_cache *a, void *b) +{ + dbg_mem_header_t *p = b; + DPRINTF(6, (KERN_WARNING "%s: kmem_cache_free(%p, %p)\n", __func__, a, p)); + p--; + if(p->state != 0x11111111) { + printk(KERN_WARNING "%s: kmem_cache_free(%p, %p) BAD ARG 0x%08lx\n", __func__, a, p, p->state); + dump_stack(); + return; + } + + p->state = 0x22222222; + kmem_cache_free(a, p); +} + +#endif diff -urN linux-2.6.22.5/drivers/binder/binder_defs.h linux-2.6.22.5-android/drivers/binder/binder_defs.h --- linux-2.6.22.5/drivers/binder/binder_defs.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_defs.h 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,340 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_DEFS_H +#define BINDER_DEFS_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(CONFIG_ARM) +/* Define this if you want to use the linux threads hack on ARM */ +#define USE_LINUXTHREADS +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) +#define assert_spin_locked(x) +#endif + +extern struct kmem_cache *transaction_cache; +extern struct kmem_cache *thread_cache; +extern struct kmem_cache *node_cache; +extern struct kmem_cache *local_mapping_cache; +extern struct kmem_cache *reverse_mapping_cache; +extern struct kmem_cache *range_map_cache; + +#define HASH_BITS 8 +#define HASH_SIZE (1 << HASH_BITS) + +enum ref_count_type { + STRONG = 1, + WEAK = 2 +}; + +/* ------------------------------------------------------------------ */ +/* --------------------- COMPILING AS A DRIVER ---------------------- */ +/* ------------------------------------------------------------------ */ + +void soft_yield(void); + +#define STOP_ON_ASSERT //msleep_interruptible(1000*60*60*24*7) + +#define BND_MEM_DEBUG 0 // slab destructors are no longer available + +#if 0 + +#define BINDER_DEBUG 1 +#define VALIDATES_BINDER 0 +#define DIPRINTF(level,a) do { if (level <= 9) printk a; } while(0) +#define DPRINTF(level,a) do { if (level <= 9) { printk a; soft_yield();} } while(0) +#define BND_FAIL(msg) +#define BND_ASSERT(cond, msg) do { if (!(cond)) { printk(KERN_WARNING "BND_ASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg); dump_stack(); STOP_ON_ASSERT;} } while (FALSE) +#define DBTRANSACT(x) printk x +#define DBSHUTDOWN(x) printk x +#define DBSPAWN(x) printk x +#define DBSTACK(x) printk x +#define DBLOCK(x) printk x +#define DBREFS(x) printk x +#define DBREAD(x) printk x +#define DBDEATH(x) printk x + +#else +#define BINDER_DEBUG 0 +#define DIPRINTF(level,a) +#define DPRINTF(level,a) +#define BND_FAIL(msg) +//#define BND_ASSERT(cond, msg) if (!(cond)) printk(KERN_WARNING "BND_ASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg) +#define BND_ASSERT(cond, msg) do { if (!(cond)) { printk(KERN_WARNING "BND_ASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg); dump_stack(); } } while (FALSE) +#define DBTRANSACT(x) +#define DBSHUTDOWN(x) //printk x +#define DBSPAWN(x) +#define DBSTACK(x) +#define DBLOCK(x) +#define DBREFS(x) +#define DBREAD(x) +#define DBDEATH(x) +#endif + +// errors triggered by userspace bugs +#define UPRINTF(a) do { printk a; } while(0) +#define BND_UASSERT(cond, msg) if (!(cond)) printk(KERN_WARNING "BND_UASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg) + +#if BND_MEM_DEBUG +void *dbg_kmem_cache_alloc(struct kmem_cache *a, unsigned int b); +void dbg_kmem_cache_free(struct kmem_cache *a, void *b); + +#define kmem_cache_alloc dbg_kmem_cache_alloc +#define kmem_cache_free dbg_kmem_cache_free +#endif + +struct binder_thread; + +typedef ssize_t status_t; + +//typedef unsigned int bool; +#define FALSE (0) +#define TRUE (~FALSE) + +/* Special function, implemented in binder.c, to try to find + a binder_thread structure for a pid. If 'create' is TRUE, + a new structure will be created for you (unattached to + a process) if it doesn't already exist; otherwise it will + return NULL. Returns with a strong reference held on the + thread. + + *** NOTE: Must not call this while holding a thread or + process lock! */ +struct binder_thread * check_for_thread(pid_t thread_pid, bool create); + +/* Special function, implemented in binder.c, for a parent to + lookup (or pre-create) the state for main thread of a child + process it is spawning. This function calls + binder_thread_SetParentThread() for you on the child thread, + and returns with a strong reference held on the thread. + + *** NOTE: Must not call this while holding a thread or + process lock! */ +struct binder_thread * attach_child_thread(pid_t child_pid, struct binder_thread *parent); + +/* Special function, implemented in binder.c, to remove a + thread structure from the global list. This needs to be + called when using the above two functions to create such + a structure, to remove it from the list when it is no + longer used. A strong reference is removed from the thread + and, if the strong count goes to zero AND the structure has + not yet been accessed by its user space thread, then the + thread structure will be removed from the list. + + *** NOTE: Must not call this while holding a thread or + process lock! */ +void forget_thread(struct binder_thread *thread); + +// Perform an accuire/release on an object. +#define BND_ACQUIRE(cname, that, type, id) cname##_Acquire(that, type) +#define BND_ATTEMPT_ACQUIRE(cname, that, type, id) cname##_AttemptAcquire(that, type) +#define BND_FIRST_ACQUIRE(cname, that, type, id) cname##_ForceAcquire(that, type) +#define BND_FORCE_ACQUIRE(cname, that, id) cname##_ForceAcquire(that, STRONG) +#define BND_RELEASE(cname, that, type, id) cname##_Release(that, type) + +// Declare acquire/release methods for a class. +#define BND_DECLARE_ACQUIRE_RELEASE(cname) \ + void cname##_Acquire(cname##_t *that, s32 type); \ + int cname##_ForceAcquire(cname##_t *that, s32 type); \ + int cname##_Release(cname##_t *that, s32 type); \ +/**/ + +// Declare attempt acquire method for a class. +#define BND_DECLARE_ATTEMPT_ACQUIRE(cname) \ + int cname##_AttemptAcquire(cname##_t *that, s32 type); \ +/**/ + +extern void dump_stack(void); +// Implement acquire/release methods for a class. +#define BND_IMPLEMENT_ACQUIRE_RELEASE(cname) \ +void \ +cname##_Acquire(cname##_t *that, s32 type) \ +{ \ + int res; \ + if (type == STRONG) { \ + res = atomic_inc_return(&that->m_primaryRefs); \ + BND_ASSERT(res > 1, "STRONG Acquire without strong ref"); \ + } \ + res = atomic_inc_return(&that->m_secondaryRefs); \ + if (type == STRONG) { \ + BND_ASSERT(res > 1, "STRONG Acquire without weak ref"); \ + } \ + else { \ + BND_ASSERT(res > 1, "WEAK Acquire without weak ref"); \ + } \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + /*dump_stack()*/;\ +} \ +int \ +cname##_ForceAcquire(cname##_t *that, s32 type) \ +{ \ + int res; \ + res = atomic_inc_return(&that->m_secondaryRefs); \ + if (type == STRONG) { \ + res = atomic_inc_return(&that->m_primaryRefs); \ + } \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + return res-1; \ +} \ +int \ +cname##_Release(cname##_t *that, s32 type) \ +{ \ + int rv1=-2, rv2=-2; \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + if(type == STRONG) { \ + BND_ASSERT(atomic_read(&that->m_primaryRefs) > 0, "Strong reference underflow");\ + } \ + BND_ASSERT(atomic_read(&that->m_secondaryRefs) > 0, "Weak reference underflow");\ + /*dump_stack()*/;\ + switch (type) { \ + case STRONG: \ + if ((rv1 = atomic_dec_return(&that->m_primaryRefs)) == 0) { \ + cname##_Released(that); \ + } \ + case WEAK: \ + if ((rv2 = atomic_dec_return(&that->m_secondaryRefs)) == 0) {\ + cname##_destroy(that); \ + } \ + } \ + return ((type == STRONG) ? rv1 : rv2) + 1; \ +} \ +/**/ + +// Implement attempt acquire method for a class. +#define BND_IMPLEMENT_ATTEMPT_ACQUIRE(cname) \ +int \ +cname##_AttemptAcquire(cname##_t *that, s32 type) \ +{ \ + int cur; \ + switch (type) { \ + case STRONG: \ + cur = atomic_read(&that->m_primaryRefs); \ + while (cur > 0 && \ + !cmpxchg32( &that->m_primaryRefs.counter, \ + &cur, cur+1)); \ + if (cur <= 0) {\ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) FAILED!\n", __func__, that, type == STRONG ? "STRONG" : "WEAK"));\ + /*dump_stack()*/;\ + return FALSE; \ + }\ + cur = atomic_inc_return(&that->m_secondaryRefs); \ + BND_ASSERT(cur > 1, "ATTEMPT ACQUIRE STONG without WEAK ref"); \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + /*dump_stack()*/;\ + return TRUE; \ + case WEAK: \ + cur = atomic_read(&that->m_secondaryRefs); \ + while (cur > 0 && \ + !cmpxchg32( &that->m_secondaryRefs.counter, \ + &cur, cur+1)); \ + if (cur <= 0) {\ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) FAILED!\n", __func__, that, type == STRONG ? "STRONG" : "WEAK"));\ + /*dump_stack()*/;\ + return FALSE; \ + }\ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + /*dump_stack()*/;\ + return TRUE; \ + } \ + return FALSE; \ +} \ +/**/ + +extern spinlock_t cmpxchg32_spinner; + +// Quick hack -- should be checking for x86, not ARM. + +#if defined(CONFIG_ARM) + +static __inline__ int cmpxchg32(volatile int *atom, int *val, int newVal) { + unsigned long flags; + spin_lock_irqsave(&cmpxchg32_spinner, flags); + if (*atom == *val) { + *atom = newVal; + spin_unlock_irqrestore(&cmpxchg32_spinner, flags); + return 1; + } + *val = *atom; + spin_unlock_irqrestore(&cmpxchg32_spinner, flags); + return 0; +}; + +#else + +static __inline__ int compare_and_swap32(volatile int *location, int oldValue, int newValue) +{ + int success; + asm volatile("lock; cmpxchg %%ecx, (%%edx); sete %%al; andl $1, %%eax" + : "=a" (success) : "a" (oldValue), "c" (newValue), "d" (location)); + return success; +} + +static __inline__ bool cmpxchg32(volatile int *atom, int *value, int newValue) +{ + int success = compare_and_swap32(atom, *value, newValue); + if (!success) + *value = *atom; + + return success; +}; + +#endif + +#define BND_LOCK(x) do { down(&(x)); \ + BND_ASSERT(atomic_read(&((x).count)) <= 0, "BND_LOCK() lock still free"); } while (0) +#define BND_UNLOCK(x) do { \ + BND_ASSERT(atomic_read(&((x).count)) <= 0, "BND_UNLOCK() lock already free"); \ + up(&(x)); } while (0) + +#if defined(CONFIG_ARM) +// __cpuc_flush_user_range is arm specific, but the generic function need a +// vm_area_struct and will flush the entire page. +#define BND_FLUSH_CACHE(start, end) do { \ + __cpuc_flush_user_range((size_t)start & ~(L1_CACHE_BYTES-1), L1_CACHE_ALIGN((size_t)end), 0); \ + } while(0) +#else +#define BND_FLUSH_CACHE(start, end) +#endif + +#define B_CAN_INTERRUPT (1) + +#define B_INFINITE_TIMEOUT ((~(0ULL))>>1) +#define B_ABSOLUTE_TIMEOUT (1) + +#define B_BAD_THREAD_ID ((pid_t)0) +#define B_REAL_TIME_PRIORITY (10) +#define B_NORMAL_PRIORITY (80) +#define B_LOW_PRIORITY (100) + +#define B_MIN_PRIORITY_VAL (5) +#define B_MAX_PRIORITY_VAL (100) + +#endif // BINDER_DEFS_H diff -urN linux-2.6.22.5/drivers/binder/binder_node.c linux-2.6.22.5-android/drivers/binder/binder_node.c --- linux-2.6.22.5/drivers/binder/binder_node.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_node.c 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,140 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "binder_node.h" +#include "binder_proc.h" +#include "binder_transaction.h" + +#define PURGATORY 0 +#if PURGATORY +static DECLARE_MUTEX(sem); +static binder_node_t* head = NULL; +static binder_node_t** tail = &head; +static int count = 0; + +static void my_free_node(binder_node_t *t) +{ + down(&sem); + *tail = t; + tail = (binder_node_t**)&t->m_ptr; + count++; + if (count > 20) { + t = head; + head = (binder_node_t*)head->m_ptr; + kmem_cache_free(node_cache, t); + count--; + } + up(&sem); +} +#define ALLOC_NODE kmem_cache_alloc(node_cache, GFP_KERNEL) +#define FREE_NODE(x) my_free_node(x) +#else +#define ALLOC_NODE kmem_cache_alloc(node_cache, GFP_KERNEL) +#define FREE_NODE(x) kmem_cache_free(node_cache, x) +#endif + +static atomic_t g_count = ATOMIC_INIT(0); + +int +binder_node_GlobalCount() +{ + return atomic_read(&g_count); +} + +BND_IMPLEMENT_ACQUIRE_RELEASE(binder_node); +BND_IMPLEMENT_ATTEMPT_ACQUIRE(binder_node); +// BND_IMPLEMENT_FORCE_ACQUIRE(binder_node); + +/* + * For the process which manages the contexts, we treat ptr == NULL specially. + * In particular, all transactions with a target descriptor of 0 get routed to + * the manager process and the target pointer the process receives gets set to + * NULL. We don't permit any team to send a binder with a NULL ptr, so we can + * never confuse the mappings. + */ +binder_node_t *binder_node_init(binder_proc_t *team, void *ptr, void *cookie) +{ + binder_node_t *that = ALLOC_NODE; + atomic_inc(&g_count); + DPRINTF(5, (KERN_WARNING "%s(team=%p, ptr=%p, cookie=%p): %p\n", __func__, + team, ptr, cookie, that)); + atomic_set(&that->m_primaryRefs, 0); + atomic_set(&that->m_secondaryRefs, 0); + that->m_ptr = ptr; + that->m_cookie = cookie; + that->m_home = team; + if (that->m_home) BND_ACQUIRE(binder_proc, that->m_home, WEAK, that); + return that; +} + +void binder_node_destroy(binder_node_t *that) +{ + atomic_dec(&g_count); + DPRINTF(4, (KERN_WARNING "%s(%p): ptr=%p, cookie=%p\n", __func__, that, + that->m_ptr, that->m_cookie)); + if (that->m_home) { + if (that->m_ptr) { + binder_proc_t* proc = binder_node_AcquireHome(that, that); + if (proc) { + binder_proc_Transact(proc, binder_transaction_CreateRef(tfDecRefs, that->m_ptr, that->m_cookie, proc)); + binder_proc_RemoveLocalMapping(proc, that->m_ptr, that); + BND_RELEASE(binder_proc, proc, STRONG, that); + } + } + BND_RELEASE(binder_proc, that->m_home, WEAK, that); + } + FREE_NODE(that); +} + +void +binder_node_Released(binder_node_t *that) +{ + binder_proc_t* proc = binder_node_AcquireHome(that, that); + DPRINTF(4, (KERN_WARNING "%s(%p): ptr=%p\n", __func__, that, that->m_ptr)); + if (proc) { + DPRINTF(5, (KERN_WARNING " -- m_secondaryRefs=%d\n",atomic_read(&that->m_secondaryRefs))); + binder_proc_Transact(proc, binder_transaction_CreateRef(tfRelease,that->m_ptr,that->m_cookie,proc)); + binder_proc_RemoveLocalStrongRef(proc, that); + BND_RELEASE(binder_proc, proc, STRONG, that); + } +} + +binder_proc_t* +binder_node_AcquireHome(binder_node_t *that, const void *id) +{ + if (that->m_home && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_home, STRONG, id)) { + return that->m_home; + } + return NULL; +} + +status_t +binder_node_Send(binder_node_t *that, struct binder_transaction *t) +{ + binder_proc_t* proc = binder_node_AcquireHome(that, that); + if (proc) { + status_t res = binder_proc_Transact(proc, t); + BND_RELEASE(binder_proc, proc, STRONG, that); + return res; + } + + if (t->sender) binder_thread_ReplyDead(t->sender); + binder_transaction_Destroy(t); + return 0; +} + diff -urN linux-2.6.22.5/drivers/binder/binder_node.h linux-2.6.22.5-android/drivers/binder/binder_node.h --- linux-2.6.22.5/drivers/binder/binder_node.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_node.h 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,70 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER2_NODE_H +#define BINDER2_NODE_H + +#include "binder_defs.h" +#include "binder_proc.h" + +typedef struct binder_node { + atomic_t m_primaryRefs; + atomic_t m_secondaryRefs; + void * m_ptr; + void * m_cookie; + binder_proc_t * m_home; +} binder_node_t; + +int binder_node_GlobalCount(void); + +binder_node_t * binder_node_init(binder_proc_t *team, void *ptr, void *cookie); +void binder_node_destroy(binder_node_t *that); + +void binder_node_Released(binder_node_t *that); + +// Return a new strong reference on the node's home team, or NULL +// if the team no longer exists. Be sure to release the reference +// (via BND_RELEASE(binder_proc, team, STRONG, id)) if the return is non-NULL. +binder_proc_t* binder_node_AcquireHome(binder_node_t *that, const void *id); + +// Dispatch a transaction to the node's process. +status_t binder_node_Send(binder_node_t *that, struct binder_transaction *t); + +BND_DECLARE_ACQUIRE_RELEASE(binder_node); +// BND_DECLARE_FORCE_ACQUIRE(binder_node); + +/* Super-special AttemptAcquire() that also lets you attempt + to acquire a secondary ref. But note that binder_proc_t is + the ONLY one who can attempt a secondary, ONLY while holding + its lock, for the simple reason that binder_node's destructor + unregisters itself from the team. In other words, it's a + dihrty hawck. +*/ +BND_DECLARE_ATTEMPT_ACQUIRE(binder_node); + +/* Send a transaction to this node. */ +// void binder_node_Send(struct binder_transaction *t); +// void * binder_node_Ptr(binder_node_t *that); +// binder_proc_t * binder_node_Home(binder_node_t *that); + +#define binder_node_Ptr(that) ((that)->m_ptr) +#define binder_node_Cookie(that) ((that)->m_cookie) +#define binder_node_IsAlive(that) (binder_proc_IsAlive((that)->m_home)) +#define binder_node_IsRoot(that) ((that)->m_isRoot) + +#endif // BINDER2_NODE_H diff -urN linux-2.6.22.5/drivers/binder/binder_proc.c linux-2.6.22.5-android/drivers/binder/binder_proc.c --- linux-2.6.22.5/drivers/binder/binder_proc.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_proc.c 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,2215 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include +// #include +#include +#include +#include + +#include "binder_defs.h" +#include "binder_proc.h" +#include "binder_thread.h" +#include "binder_node.h" +#include "binder_transaction.h" +#include "iobuffer.h" + +#define BND_PROC_MAX_IDLE_THREADS 3 + +static inline unsigned long calc_order_from_size(unsigned long size) +{ +#if 0 + unsigned long order = 0; + if (size) { + size -= 1; + size *= 2; + } + size >>= PAGE_SHIFT+1; + while (size) { + order++; + size >>= 1; + } + return order; +#else + return size ? get_order(size) : 0; +#endif +} + +static void binder_proc_init(binder_proc_t *that); +static void binder_proc_spawn_looper(binder_proc_t *that); +static void binder_proc_wakeup_timer(unsigned long); +static void binder_proc_idle_timer(unsigned long); +static void binder_proc_send_death_notification(binder_proc_t *that, death_notification_t *death); +static void binder_proc_death_notification_dec_ref(binder_proc_t *that, death_notification_t *death, bool locked); +static void binder_proc_RemoveThreadFromWaitStack(binder_proc_t *that, binder_thread_t *thread); + +static void set_thread_priority(pid_t thread, int priority) +{ + int nice; + + // The following must match SysThreadChangePriority in libbinder. + if(priority >= 80) + { + // Normal to low priority + // map 80..100 to 0..19 + nice = priority - 80; + if(nice > 19) + nice = 19; + } + else + { + // Normal priority or better + // map 0..79 to -20..-1 + nice = priority-3 - 80; + nice /= 4; + } + //printk("set_thread_priority tid %d pri %d == nice %d\n", thread, priority, nice); + set_user_nice(find_task_by_pid(thread), nice); +} + + +void binder_proc_init(binder_proc_t *that) +{ + int i; + atomic_set(&that->m_primaryRefs, 0); + atomic_set(&that->m_secondaryRefs, 0); + init_MUTEX(&that->m_lock); + spin_lock_init(&that->m_spin_lock); + init_MUTEX(&that->m_map_pool_lock); + that->m_threads = NULL; + INIT_LIST_HEAD(&that->m_waitStack); + that->m_waitStackCount = 0; + that->m_wakeThreadMask = 0; + that->m_wakeupTime = B_INFINITE_TIMEOUT; + that->m_wakeupPriority = 10; + init_timer(&that->m_wakeupTimer); + that->m_wakeupTimer.function = &binder_proc_wakeup_timer; + that->m_wakeupTimer.data = (unsigned long)that; + init_timer(&that->m_idleTimer); + that->m_idleTimer.function = &binder_proc_idle_timer; + that->m_idleTimer.data = (unsigned long)that; + that->m_idleTimeout = 5*HZ; + that->m_replyTimeout = 5*HZ; + //that->m_idleTimeout = 5*60*HZ; + //that->m_replyTimeout = 5*60*HZ; + that->m_syncCount = 0; + that->m_freeCount = 0; + that->m_head = NULL; + that->m_tail = &that->m_head; + that->m_needFree = NULL; + that->m_state = 0; + for (i=0;im_localHash[i] = NULL; + that->m_reverseHash[i] = NULL; + } + that->m_numRemoteStrongRefs = 0; + that->m_rootObject = NULL; + that->m_rootStopsProcess = 0; + that->m_descriptors = NULL; + that->m_descriptorCount = 0; + that->m_waitingThreads = 0; + that->m_nonblockedThreads = 0; + that->m_maxThreads = 5; + //that->m_idlePriority = B_REAL_TIME_PRIORITY; + that->m_idlePriority = B_NORMAL_PRIORITY; + atomic_set(&that->m_loopingThreads, 0); +#if 0 + that->m_spawningThreads = 0; +#endif + that->m_rangeMap = RB_ROOT; + that->m_freeMap = RB_ROOT; + BND_FIRST_ACQUIRE(binder_proc, that, STRONG, that); + that->m_eventTransaction = binder_transaction_CreateEmpty(); + binder_transaction_SetEvent(that->m_eventTransaction, TRUE); + that->m_pool = NULL; + that->m_pool_active = 0; + INIT_HLIST_HEAD(&that->m_incoming_death_notifications); + INIT_HLIST_HEAD(&that->m_outgoing_death_notifications); + INIT_HLIST_HEAD(&that->m_pending_death_notifications); + INIT_HLIST_HEAD(&that->m_active_death_notifications); + INIT_HLIST_HEAD(&that->m_deleted_death_notifications); +} + +binder_proc_t * +new_binder_proc() +{ + // allocate a binder_proc_t from the slab allocator + binder_proc_t *that = (binder_proc_t*)kmalloc(sizeof(binder_proc_t), GFP_KERNEL); + BND_ASSERT(that != NULL, "failed to allocate binder_proc"); + if(that == NULL) + return NULL; + binder_proc_init(that); + DPRINTF(2, (KERN_WARNING "************* Creating binder_proc %p *************\n", that)); + return that; +} + +void +binder_proc_destroy(binder_proc_t *that) +{ + local_mapping_t *lm; + reverse_mapping_t *rm; + local_mapping_t *localMappings; + reverse_mapping_t *reverseMappings; + range_map_t *r; + struct rb_node *n; + int i; + bool first; + + DPRINTF(2, (KERN_WARNING "************* Destroying binder_proc %p *************\n", that)); + + BND_ASSERT(that->m_state & btCleaned, "binder_proc_Die wns not done"); + BND_ASSERT(!(that->m_state & btFreed), "already free"); + if(that->m_state & btFreed) + return; + + //DPRINTF(5, (KERN_WARNING "Binder team %p: collecting mappings.\n", that)); + lm = localMappings = NULL; + rm = reverseMappings = NULL; + for (i=0;im_localHash[i]) { + // mark the front of the list + if (!localMappings) lm = localMappings = that->m_localHash[i]; + // or tack this chain on the end + else lm->next = that->m_localHash[i]; + // run to the end of the chain + while (lm->next) lm = lm->next; + // mark this chain handled + that->m_localHash[i] = NULL; + } + if (that->m_reverseHash[i]) { + // ditto for reverse mappings + if (!reverseMappings) rm = reverseMappings = that->m_reverseHash[i]; + else rm->next = that->m_reverseHash[i]; + while (rm->next) rm = rm->next; + that->m_reverseHash[i] = NULL; + } + } + + first = TRUE; + while ((lm = localMappings)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: cleaning up local mappings.\n", that)); + } + localMappings = lm->next; + // FIXME: send death notification + kmem_cache_free(local_mapping_cache, lm); + } + + first = TRUE; + while ((rm = reverseMappings)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: cleaning up reverse mappings.\n", that)); + } + reverseMappings = rm->next; + DBSHUTDOWN((KERN_WARNING "Removed reverse mapping from node %p to descriptor %d\n", + rm->node, rm->descriptor+1)); + // FIXME: decrement use count and possibly notify owner. It seems like we do this below. + kmem_cache_free(reverse_mapping_cache, rm); + } + + /* + for (i=0; im_localHash[i] == NULL, "Leaking some local mappings!"); + BND_ASSERT(that->m_reverseHash[i] == NULL, "Leaking some reverse mappings!"); + } + */ + + // Free up any items in the transaction data pool. + BND_LOCK(that->m_map_pool_lock); + n = rb_first(&that->m_rangeMap); + while (n) { + r = rb_entry(n, range_map_t, rm_rb); + n = rb_next(n); + + rb_erase(&r->rm_rb, &that->m_rangeMap); + //__free_pages(r->page, calc_order_from_size(r->end - r->start)); + kmem_cache_free(range_map_cache, r); + } + n = rb_first(&that->m_freeMap); + while (n) { + r = rb_entry(n, range_map_t, rm_rb); + n = rb_next(n); + rb_erase(&r->rm_rb, &that->m_rangeMap); + kmem_cache_free(range_map_cache, r); + } + BND_UNLOCK(that->m_map_pool_lock); + + // free_lock(&that->m_lock); + that->m_state |= btFreed; + kfree(that); +} + +void +binder_proc_SetRootObject(binder_proc_t *that, struct binder_node *node) +{ + BND_LOCK(that->m_lock); + if (that->m_rootObject == NULL) that->m_rootObject = node; + BND_UNLOCK(that->m_lock); +} + +void +binder_proc_Stop(binder_proc_t *that, bool now) +{ + bool goodbye; + + DBLOCK((KERN_WARNING "binder_proc_Stop() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + that->m_rootStopsProcess = TRUE; + goodbye = now || that->m_rootObject == (binder_node_t*)-1; + + BND_UNLOCK(that->m_lock); + + if (goodbye) binder_proc_Die(that, FALSE); +} + +bool +binder_proc_AddThread(binder_proc_t *that, binder_thread_t *t) +{ + BND_FIRST_ACQUIRE(binder_thread, t, STRONG, 0); + BND_LOCK(that->m_lock); + if (binder_proc_IsAlive(that)) { + t->next = that->m_threads; + that->m_threads = t; + BND_UNLOCK(that->m_lock); + } else { + BND_UNLOCK(that->m_lock); + BND_RELEASE(binder_thread, t, STRONG, that); + t = NULL; + } + DBSHUTDOWN((KERN_WARNING "%s(%p): %p\n", __func__, that, t)); + return t != NULL; +} + +void +binder_proc_RemoveThread(binder_proc_t *that, binder_thread_t *t) +{ + binder_thread_t **thread; + DBSHUTDOWN((KERN_WARNING "%s(%p): %p\n", __func__, that, t)); + DBLOCK((KERN_WARNING "RemoveThread() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + for (thread = &that->m_threads; *thread && *thread != t; thread = &(*thread)->next) + ; + if (*thread) { + *thread = (*thread)->next; + } else { + DPRINTF(5, (KERN_WARNING "binder_team %p: RemoveThread of %d does not exist\n", that, t->m_thid)); + } + + // If this is the last thread, the team is dead. + if (!that->m_threads) binder_proc_Die(that, TRUE); + else BND_UNLOCK(that->m_lock); +} + +void +binder_proc_Released(binder_proc_t *that) +{ + DBSHUTDOWN((KERN_WARNING "%s(%p)\n", __func__, that)); + binder_proc_Die(that, FALSE); +} + +void +binder_proc_Die(binder_proc_t *that, bool locked) +{ + binder_transaction_t *cmd; + binder_node_t *n; + binder_thread_t *thr; + descriptor_t *descriptors; + bool dying; + bool first; + binder_transaction_t *cmdHead; + binder_transaction_t *freeCmdHead; + s32 descriptorCount; + binder_thread_t *threads; + bool acquired; + struct hlist_node *_p, *_p2; + death_notification_t *death; + + DBSHUTDOWN((KERN_WARNING "*****************************************\n")); + DBSHUTDOWN((KERN_WARNING "**** %s(%p, %s)\n", __func__, that, locked ? "locked" : "unlocked")); + + // Make sure our destructor doesn't get called until Die() is done. + BND_ACQUIRE(binder_proc, that, WEAK, that); + + // Make sure that Released() doesn't get called if we are dying + // before all primary references have been removed. + acquired = BND_ATTEMPT_ACQUIRE(binder_proc, that, STRONG, that); + + if (!locked) { + DBLOCK((KERN_WARNING "%s() going to lock %p in %d\n", __func__, that, current->pid)); + BND_LOCK(that->m_lock); + } + dying = that->m_state&btDying; + that->m_state |= btDying; + BND_UNLOCK(that->m_lock); + + if (dying) { + DBSHUTDOWN((KERN_WARNING "racing to kill %p\n", that)); + while (!(that->m_state&btDead)) msleep(10); + BND_RELEASE(binder_proc, that, WEAK, that); + if (acquired) BND_RELEASE(binder_proc, that, STRONG, that); + DBSHUTDOWN((KERN_WARNING "race finished\n")); + return; + } + + /* + DPRINTF(5, (KERN_WARNING "Binder team %p: removing from driver.\n", that)); + remove_team(that->tgid); + delete_sem(that->m_spawnerSem); + that->m_spawnerSem = B_BAD_SEM_ID; + */ + + DBLOCK((KERN_WARNING "%s() #2 going to lock %p in %d\n", __func__, that, current->pid)); + BND_LOCK(that->m_lock); + + while(!hlist_empty(&that->m_outgoing_death_notifications)) { + binder_proc_t *observer_proc; + death = hlist_entry(that->m_outgoing_death_notifications.first, typeof(*death), observed_or_active); + hlist_del(&death->observed_or_active); + DBDEATH((KERN_WARNING "DeathNot %p: removed from proc %p m_outgoing_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + death->observed_proc = NULL; + observer_proc = death->observer_proc; + BND_UNLOCK(that->m_lock); + binder_proc_send_death_notification(observer_proc, death); + binder_proc_death_notification_dec_ref(observer_proc, death, FALSE); + BND_LOCK(that->m_lock); + } + + while(!hlist_empty(&that->m_incoming_death_notifications)) { + binder_proc_t *observed_proc; + death = hlist_entry(that->m_incoming_death_notifications.first, typeof(*death), observer); + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_incoming_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + observed_proc = death->observed_proc; + if(observed_proc != NULL) { + if(observed_proc != that) { + // We need to grab the observed process' lock since the record + // is on the outgoing list on that process. + BND_UNLOCK(that->m_lock); + BND_LOCK(observed_proc->m_lock); + } + if(death->observed_proc != NULL) { + // If we are removing the record from the outgoing list it may + // have already been removed by the time we get the lock. + hlist_del(&death->observed_or_active); + DBDEATH((KERN_WARNING "DeathNot %p: removed from proc %p observed_or_active, refcnt=%d\n", + death, death->observed_proc, atomic_read(&death->ref_count))); + } + if(observed_proc != that) { + // Reacquire our own process lock. + BND_UNLOCK(observed_proc->m_lock); + BND_LOCK(that->m_lock); + } + if(death->observed_proc != NULL) { + // Release the reference we got from the list before we + // switched the locks back. + death->observed_proc = NULL; + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + } + DBDEATH((KERN_WARNING "DeathNot %p: finishing remove from proc %p m_incoming_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observer); + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + hlist_for_each_entry_safe(death, _p, _p2, &that->m_pending_death_notifications, observed_or_active) { + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_pending_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + hlist_for_each_entry_safe(death, _p, _p2, &that->m_active_death_notifications, observed_or_active) { + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_active_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + hlist_for_each_entry_safe(death, _p, _p2, &that->m_deleted_death_notifications, observed_or_active) { + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_deleted_death_notifications and freeing, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + kfree(death); + } + + // Now collect everything we have to clean up. We don't want to + // do stuff on these until after our own lock is released, to avoid + // various horrible deadlock situations. + + del_timer_sync(&that->m_wakeupTimer); + del_timer_sync(&that->m_idleTimer); + + freeCmdHead = that->m_needFree; + that->m_needFree = NULL; + + cmdHead = that->m_head; + that->m_head = NULL; + that->m_tail = &that->m_head; + cmd = cmdHead; + while (cmd) { + // If a pending transaction is the event transaction, remove + // our global pointer so that nobody else tries to use it. + if (cmd == that->m_eventTransaction) that->m_eventTransaction = NULL; + cmd = cmd->next; + } + + descriptors = that->m_descriptors; + descriptorCount = that->m_descriptorCount; + that->m_descriptors = NULL; + that->m_descriptorCount = 0; + + threads = that->m_threads; + that->m_threads = NULL; + for (thr = threads; thr != NULL; thr = thr->next) BND_ACQUIRE(binder_thread, thr, WEAK, that); + + that->m_state |= btDead; + + BND_UNLOCK(that->m_lock); + + // Now do all the cleanup! + + first = TRUE; + while ((thr = threads)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: removing remaining threads.\n", that)); + } + threads = thr->next; + DBSHUTDOWN((KERN_WARNING "Killing thread %p (%d)\n", thr, binder_thread_Thid(thr))); + binder_thread_Die(thr); + BND_RELEASE(binder_thread, thr, WEAK, that); + } + + first = TRUE; + while ((cmd=freeCmdHead)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: detaching free transactions.\n", that)); + } + DBSHUTDOWN((KERN_WARNING "Detaching transaction %p from thread %p (%d) to thread %p (%d) node %p\n", + cmd, cmd->sender, cmd->sender ? binder_thread_Thid(cmd->sender) : -1, + cmd->receiver, cmd->receiver ? binder_thread_Thid(cmd->receiver) : -1, + cmd->target)); + + // XXX The old implementation of this would call ReleaseTeam() + // here to keep the transaction around so that user space could + // hold on to it after replying. For some reason this would + // cause leaks (if the process never got destroyed), and this + // system doesn't use this feature, so now we just destroy it. + freeCmdHead = cmd->next; + binder_transaction_Destroy(cmd); + /* + binder_transaction_ReleaseTeam(cmd); + cmd = cmd->next; + */ + } + + first = TRUE; + while ((cmd = cmdHead)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: cleaning up pending commands.\n", that)); + } + if (cmd->sender) { + DBSHUTDOWN((KERN_WARNING "Returning transaction %p to thread %p (%d)\n", cmd, cmd->sender, binder_thread_Thid(cmd->sender))); + binder_thread_ReplyDead(cmd->sender); + } + cmdHead = cmd->next; + binder_transaction_Destroy(cmd); + } + + first = TRUE; + if (descriptors) { + int i; + for (i=0;im_eventTransaction) binder_transaction_Destroy(that->m_eventTransaction); + that->m_eventTransaction = NULL; + + DBSHUTDOWN((KERN_WARNING "Binder process %p: DEAD!\n", that)); + + BND_ASSERT(that->m_head == NULL, "that->m_head != NULL"); + + that->m_state |= btCleaned; + BND_RELEASE(binder_proc, that, WEAK, that); + if (acquired) BND_RELEASE(binder_proc, that, STRONG, that); + + DBSHUTDOWN((KERN_WARNING "**** %s(%p, %s) done dying!\n", __func__, that, locked ? "locked" : "unlocked")); + DBSHUTDOWN((KERN_WARNING "*****************************************\n")); +} + +status_t +binder_proc_RequestDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie) +{ + bool already_dead = FALSE; + death_notification_t *death = kmalloc(sizeof(death_notification_t), GFP_KERNEL); + if(death == NULL) + return -ENOMEM; + DBDEATH((KERN_WARNING "DeathNot %p: RequestDeathNotification created proc %p watching proc %p\n", + death, client, that)); + atomic_set(&death->ref_count, 1); + death->observer_proc = client; + death->observed_proc = NULL; + death->cookie = cookie; + BND_LOCK(that->m_lock); + if(binder_proc_IsAlive(that)) { + atomic_inc(&death->ref_count); + death->observed_proc = that; + hlist_add_head(&death->observed_or_active, &that->m_outgoing_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: added to proc %p m_outgoing_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + } + else { + DBDEATH((KERN_WARNING "DeathNot %p: already dead!\n", death)); + already_dead = TRUE; + } + BND_UNLOCK(that->m_lock); + BND_LOCK(client->m_lock); + if(binder_proc_IsAlive(client)) { + atomic_inc(&death->ref_count); + hlist_add_head(&death->observer, &client->m_incoming_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: added to proc %p m_incoming_death_notifications, refcnt=%d\n", + death, client, atomic_read(&death->ref_count))); + } + BND_UNLOCK(client->m_lock); + + if(already_dead) + binder_proc_send_death_notification(client, death); + binder_proc_death_notification_dec_ref(client, death, FALSE); + return 0; +} + +status_t +binder_proc_ClearDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie) +{ + struct hlist_node *_p; + death_notification_t *death = NULL; + + BND_LOCK(client->m_lock); + hlist_for_each_entry(death, _p, &client->m_incoming_death_notifications, observer) { + if(death->cookie == cookie) { + hlist_del(&death->observer); + break; + } + } + BND_UNLOCK(client->m_lock); + + DBDEATH((KERN_WARNING "DeathNot %p: ClearDeathNotification for cookie %p\n", death, cookie)); + if(death == NULL) + return -ENOENT; + BND_LOCK(that->m_lock); + if(death->observed_proc == that) { + hlist_del(&death->observed_or_active); + binder_proc_death_notification_dec_ref(client, death, FALSE); // this is holding the wrong lock, but we have a second reference + DBDEATH((KERN_WARNING "DeathNot %p: removed from proc %p m_incoming_death_notifications, refcnt=%d\n", + death, client, atomic_read(&death->ref_count))); + death->observed_proc = NULL; + } + else { + DBDEATH((KERN_WARNING "DeathNot %p ClearDeathNotification: already pending or sent!\n", death)); + } + BND_UNLOCK(that->m_lock); + binder_proc_death_notification_dec_ref(client, death, FALSE); // from hlist_del(&death->observer); + return 0; +} + +status_t +binder_proc_DeadBinderDone(binder_proc_t *that, void *cookie) +{ + struct hlist_node *_p; + death_notification_t *death = NULL; + BND_LOCK(that->m_lock); + hlist_for_each_entry(death, _p, &that->m_active_death_notifications, observed_or_active) { + if(death->cookie == cookie) { + DBDEATH((KERN_WARNING "DeathNot %p DeadBinderDone: removing from proc %p m_active_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + death->observed_proc = NULL; + binder_proc_death_notification_dec_ref(that, death, TRUE); + break; + } + } + BND_UNLOCK(that->m_lock); + DBDEATH((KERN_WARNING "DeathNot %p: DeadBinderDone completed on cookie %p\n", death, cookie)); + if(death == NULL) + return -ENOENT; + return 0; +} + +static void +binder_proc_activate_death_processing_thread(binder_proc_t *that) +{ + binder_thread_t *thread; + + assert_spin_locked(&that->m_spin_lock); + if(!list_empty(&that->m_waitStack)) { + // TODO: pop thread from wait stack here + thread = list_entry(that->m_waitStack.next, binder_thread_t, waitStackEntry); + DBDEATH((KERN_WARNING "Activating death processing thread pid %d (proc %p)\n", + thread->m_thid, that)); + binder_proc_RemoveThreadFromWaitStack(that, thread); + thread->wakeReason = WAKE_REASON_PROCESS_DEATH; + BND_ASSERT(thread->nextRequest == NULL, "Thread has a request!"); + binder_thread_Wakeup(thread); + } + else { + BND_ASSERT((that->m_wakeThreadMask & WAKE_THREAD_FOR_PROCESS_DEATH) == 0, "WAKE_THREAD_FOR_PROCESS_DEATH already set"); + that->m_wakeThreadMask |= WAKE_THREAD_FOR_PROCESS_DEATH; + DBSPAWN((KERN_WARNING "%s(%p) empty waitstack\n", __func__, that)); + } +} + +void +binder_proc_send_death_notification(binder_proc_t *that, death_notification_t *death) +{ + unsigned long flags; + bool first; + + DIPRINTF(0, (KERN_WARNING "%s(%p)\n", __func__, that)); + + BND_LOCK(that->m_lock); + + DBDEATH((KERN_WARNING "DeathNot %p: Sending death notification to %p (alive=%d)\n", + death, that, binder_proc_IsAlive(that))); + + if(binder_proc_IsAlive(that)) { + spin_lock_irqsave(&that->m_spin_lock, flags); + first = hlist_empty(&that->m_pending_death_notifications) && hlist_empty(&that->m_deleted_death_notifications); + + atomic_inc(&death->ref_count); + hlist_add_head(&death->observed_or_active, &that->m_pending_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: adding to proc %p m_pending_death_notifications, refcnt=%d, first=%d\n", + death, that, atomic_read(&death->ref_count), first)); + death->observed_proc = that; + + if(first) { + binder_proc_activate_death_processing_thread(that); + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); + } + + BND_UNLOCK(that->m_lock); +} + +void +binder_proc_death_notification_dec_ref(binder_proc_t *that, death_notification_t *death, bool locked) +{ + DBDEATH((KERN_WARNING "DeathNot %p: decrementing refcnt, cur=%d\n", + death, atomic_read(&death->ref_count))); + if(atomic_dec_return(&death->ref_count) == 0) { + BND_ASSERT(death->observed_proc == NULL, "freeing death_notification_t with observed_proc still set"); + if(!locked) + BND_LOCK(that->m_lock); + if(binder_proc_IsAlive(that)) { + unsigned long flags; + bool first; + spin_lock_irqsave(&that->m_spin_lock, flags); + first = hlist_empty(&that->m_pending_death_notifications) && hlist_empty(&that->m_deleted_death_notifications); +#if BINDER_DEBUG + struct hlist_node *_p, *_p2; + death_notification_t *node; + hlist_for_each_entry_safe(node, _p, _p2, &that->m_outgoing_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_outgoing_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_incoming_death_notifications, observer) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_incoming_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_pending_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_pending_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_active_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_active_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_deleted_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_deleted_death_notifications list"); + } + DBDEATH((KERN_WARNING "DeathNot %p: observer.next=%p, active.next=%p\n", + death, death->observer.next, death->observed_or_active.next)); +#endif + BND_ASSERT(death->observer.next == LIST_POISON1, "death ref count reached 0 while still on observer list"); + BND_ASSERT(death->observed_or_active.next == LIST_POISON1, "death ref count reached 0 while still on observed_or_active list"); + DBDEATH((KERN_WARNING "DeathNot %p: adding to deleted list, first=%d\n", death, first)); + hlist_add_head(&death->observed_or_active, &that->m_deleted_death_notifications); + if(first) + binder_proc_activate_death_processing_thread(that); + spin_unlock_irqrestore(&that->m_spin_lock, flags); + } + else { + kfree(death); + } + if(!locked) + BND_UNLOCK(that->m_lock); + } +} + +void +binder_proc_GetPendingDeathNotifications(binder_proc_t *that, binder_thread_t *thread, iobuffer_t *io) +{ + struct hlist_node *_p, *_p2; + death_notification_t *death; + BND_LOCK(that->m_lock); + + hlist_for_each_entry_safe(death, _p, _p2, &that->m_deleted_death_notifications, observed_or_active) { + if(iobuffer_remaining(io) < 8) + goto buffer_full; + DBDEATH((KERN_WARNING "DeathNot %p: GetPending removing from proc %p m_deleted_death_notifications and freeing\n", + death, that)); + hlist_del(&death->observed_or_active); + iobuffer_write_u32(io, brCLEAR_DEATH_NOTIFICATION_DONE); + iobuffer_write_u32(io, (int32_t)death->cookie); + kfree(death); + } + + hlist_for_each_entry_safe(death, _p, _p2, &that->m_pending_death_notifications, observed_or_active) { + if(iobuffer_remaining(io) < 8) + goto buffer_full; + hlist_del(&death->observed_or_active); + iobuffer_write_u32(io, brDEAD_BINDER); + iobuffer_write_u32(io, (int32_t)death->cookie); + hlist_add_head(&death->observed_or_active, &that->m_active_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: moved from proc %p m_pending_death_notifications to m_active_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + } + thread->wakeReason = WAKE_REASON_NONE; +buffer_full: + BND_UNLOCK(that->m_lock); +} + +status_t +binder_proc_AddToNeedFreeList(binder_proc_t *that, binder_transaction_t *t) +{ + BND_ACQUIRE(binder_proc, that, WEAK, that); + + binder_transaction_ReleaseTarget(t); + + DBLOCK((KERN_WARNING "AddToNeedFreeList() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + DPRINTF(2, (KERN_WARNING "AddToNeedFreeList %p for team %p\n",t,that)); + if (!binder_proc_IsAlive(that)) { + // Don't call this with lock held -- it could cause all other + // sorts of things to happen. + BND_UNLOCK(that->m_lock); + binder_transaction_ReleaseTeam(t); + BND_LOCK(that->m_lock); + } + t->next = that->m_needFree; + that->m_needFree = t; + that->m_freeCount++; + BND_UNLOCK(that->m_lock); + + BND_RELEASE(binder_proc, that, WEAK, that); + + return 0; +} + +BND_IMPLEMENT_ACQUIRE_RELEASE(binder_proc); +BND_IMPLEMENT_ATTEMPT_ACQUIRE(binder_proc); + +s32 +binder_proc_Node2Descriptor(binder_proc_t *that, binder_node_t *n, bool ref, s32 type) +{ + s32 desc=-2; + reverse_mapping_t **head; + + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s, %s)\n", __func__, that, n, ref ? "true" : "false", type == STRONG ? "STRONG" : "WEAK")); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + u32 bucket = hash_ptr(n, HASH_BITS); + DPRINTF(5, (KERN_WARNING " -- node(%p) mapping to descr bucket %d\n",n,bucket)); + head = &that->m_reverseHash[bucket]; + while (*head && (n < (*head)->node)) head = &(*head)->next; + if (*head && (n == (*head)->node)) { + desc = (*head)->descriptor; + DPRINTF(5, (KERN_WARNING "node(%p) found map to descriptor(%d), strong=%d\n",n,desc+1,that->m_descriptors[desc].priRef)); + if (!ref || type == WEAK || that->m_descriptors[desc].priRef > 0 + || BND_ATTEMPT_ACQUIRE(binder_node, n, STRONG, that)) { + if (ref) { + DPRINTF(5, (KERN_WARNING "Incrementing descriptor %d %s: strong=%d weak=%d in team %p\n", desc+1, type == STRONG ? "STRONG" : "WEAK", that->m_descriptors[desc].priRef, that->m_descriptors[desc].secRef, that)); + if (type == STRONG) that->m_descriptors[desc].priRef++; + else that->m_descriptors[desc].secRef++; + } + DPRINTF(5, (KERN_WARNING "node(%p) mapped to descriptor(%d) in team %p\n",n,desc+1,that)); + } else { + // No longer exists! + desc = -2; + } + } else if (ref && (type != STRONG || BND_ATTEMPT_ACQUIRE(binder_node, n, STRONG, that))) { + reverse_mapping_t *map; + int i; + if (type != STRONG) BND_ACQUIRE(binder_node, n, WEAK, that); + for (i=0;im_descriptorCount;i++) { + if (that->m_descriptors[i].node == NULL) { + that->m_descriptors[i].node = n; + if (type == STRONG) { + that->m_descriptors[i].priRef = 1; + that->m_descriptors[i].secRef = 0; + } else { + that->m_descriptors[i].priRef = 0; + that->m_descriptors[i].secRef = 1; + } + desc = i; + // DPRINTF(5, (KERN_WARNING "Initializing descriptor %d: strong=%d weak=%d in team %p\n", i+1, that->m_descriptors[i].priRef,that->m_descriptors[i].secRef,that)); + DPRINTF(5, (KERN_WARNING "node(%p) mapped to NEW descriptor(%d) in team %p\n",n,desc+1,that)); + break; + } + } + + if (desc < 0) { + int i; + s32 newCount = that->m_descriptorCount*2; + if (!newCount) newCount = 32; + // that->m_descriptors = (descriptor_t*)kernel_realloc(that->m_descriptors,sizeof(descriptor_t)*newCount,"descriptors"); + { + descriptor_t *d = kmalloc(sizeof(descriptor_t)*newCount, GFP_KERNEL); + // FIXME: BeOS code did not deal with allocation failures + memcpy(d, that->m_descriptors, that->m_descriptorCount*sizeof(descriptor_t)); + kfree(that->m_descriptors); + that->m_descriptors = d; + } + for (i=newCount-1;i>=that->m_descriptorCount;i--) that->m_descriptors[i].node = NULL; + desc = that->m_descriptorCount; + DPRINTF(5, (KERN_WARNING "Initializing descriptor %d: strong=%d weak=%d in team %p\n", desc+1, that->m_descriptors[desc].priRef,that->m_descriptors[desc].secRef,that)); + that->m_descriptors[desc].node = n; + if (type == STRONG) { + that->m_descriptors[desc].priRef = 1; + that->m_descriptors[desc].secRef = 0; + } else { + that->m_descriptors[desc].priRef = 0; + that->m_descriptors[desc].secRef = 1; + } + that->m_descriptorCount = newCount; + DPRINTF(5, (KERN_WARNING "node(%p) mapped to NEW descriptor(%d) in team %p\n",n,desc+1,that)); + } + + map = (reverse_mapping_t*)kmem_cache_alloc(reverse_mapping_cache, GFP_KERNEL); + map->node = n; + map->descriptor = desc; + map->next = *head; + *head = map; + } + } + + BND_UNLOCK(that->m_lock); + return desc+1; +} + +binder_node_t * +binder_proc_Descriptor2Node(binder_proc_t *that, s32 descriptor, const void* id, s32 type) +{ + binder_node_t *n; + (void)id; + + descriptor--; + + DBLOCK((KERN_WARNING "Descriptor2Node() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + n = NULL; + if (binder_proc_IsAlive(that)) { + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + (that->m_descriptors[descriptor].node != NULL)) { + if (type == STRONG) { + if (that->m_descriptors[descriptor].priRef > 0) { + n = that->m_descriptors[descriptor].node; + BND_ACQUIRE(binder_node, n, STRONG, id); + } else { + UPRINTF((KERN_WARNING "Descriptor2Node failed primary: desc=%d, max=%d, node=%p, strong=%d\n", + descriptor+1, that->m_descriptorCount, + that->m_descriptors[descriptor].node, + that->m_descriptors[descriptor].priRef)); + } + } else { + if (that->m_descriptors[descriptor].secRef > 0) { + n = that->m_descriptors[descriptor].node; + BND_ACQUIRE(binder_node, n, WEAK, id); + } else { + UPRINTF((KERN_WARNING "Descriptor2Node failed secondary: desc=%d, max=%d, node=%p, weak=%d\n", + descriptor+1, that->m_descriptorCount, + that->m_descriptors[descriptor].node , + that->m_descriptors[descriptor].secRef)); + } + } + } else { + UPRINTF((KERN_WARNING "Descriptor2Node failed: desc=%d, max=%d, node=%p, strong=%d\n", + descriptor+1, that->m_descriptorCount, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].node : NULL, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].priRef : 0)); + } + } + + BND_UNLOCK(that->m_lock); + return n; +} + +status_t +binder_proc_Ptr2Node(binder_proc_t *that, void *ptr, void *cookie, binder_node_t **n, iobuffer_t *io, const void* id, s32 type) +{ + u32 bucket; + local_mapping_t **head; + local_mapping_t *newMapping; + (void)id; + + if (ptr == NULL) { + DPRINTF(5, (KERN_WARNING "ptr(%p) mapping to NULL node in team %p\n",ptr,that)); + *n = NULL; + return 0; + } + + DBLOCK((KERN_WARNING "Ptr2Node() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + bucket = hash_ptr(ptr, HASH_BITS); + DPRINTF(9, (KERN_WARNING "ptr(%p) mapping to ptr bucket %u (value %p) in team %p\n",ptr,bucket,that->m_localHash[bucket],that)); + head = &that->m_localHash[bucket]; + while (*head && (ptr < (*head)->ptr)) head = &(*head)->next; + if (*head && (ptr == (*head)->ptr)) { + if ((type == STRONG) && BND_ATTEMPT_ACQUIRE(binder_node, (*head)->node, STRONG, id)) { + *n = (*head)->node; + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (OLD)\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", *n)); + BND_UNLOCK(that->m_lock); + return 0; + } else if (BND_ATTEMPT_ACQUIRE(binder_node, (*head)->node, WEAK, id)) { + if((*head)->next) + BND_ASSERT(io || (*head)->next->ptr != ptr || atomic_read(&((*head)->next->node->m_secondaryRefs)) == 0, "May remove wrong node"); + + *n = (*head)->node; + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (OLD)\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", *n)); + if (type == STRONG) { + /* Other teams have a secondary reference on this node, but no + primary reference. We need to make the node alive again, and + tell the calling team that the driver now has a primary + reference on it. The two calls below will force a new primary + reference on the node, and remove the secondary reference we + just acquired above. All the trickery with the secondary reference + is protection against a race condition where another team removes + the last secondary reference on the object, while we are here + trying to add one. + */ + int count; + DPRINTF(9, (KERN_WARNING "Apply a new primary reference to node (%p) in team %p\n",*n,that)); + count = BND_FORCE_ACQUIRE(binder_node, *n, id); + BND_RELEASE(binder_node, *n, WEAK, id); + + BND_ASSERT(io != NULL, "Acquiring new strong reference without io"); + if (count == 0) { + that->m_numRemoteStrongRefs++; + if (io) { + BND_ACQUIRE(binder_node, *n, STRONG, that); // add a second reference to avoid the node being released before the aquire has finished + iobuffer_write_u32(io, brACQUIRE); + iobuffer_write_void(io, ptr); + iobuffer_write_void(io, (*head)->cookie); + DPRINTF(5, (KERN_WARNING " -- wrote brACQUIRE: %p\n", ptr)); + } + } + else { + printk(KERN_WARNING "%s(%p, %p, %s): %p Reaquired strong reference, but someone beat us to it\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", (*head)->node); + } + } + BND_UNLOCK(that->m_lock); + return 0; + } +#if 1 + else { + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (OLD) FAILED AttempAcquire!\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", (*head)->node)); + } +#endif + } + + { + local_mapping_t **thead; + thead = &that->m_localHash[hash_ptr(ptr, HASH_BITS)]; + while (*thead) { + if((*thead)->ptr == ptr) { + BND_ASSERT(atomic_read(&((*head)->node->m_primaryRefs)) == 0, "Creating new node when a node with strong refs already exists"); + BND_ASSERT(atomic_read(&((*head)->node->m_secondaryRefs)) == 0, "Creating new node when a node with weak refs already exists"); + } + thead = &(*thead)->next; + } + } + + if (io && (iobuffer_remaining(io) < 8)) { + BND_UNLOCK(that->m_lock); + return -EINVAL; + } + + if (!binder_proc_IsAlive(that)) { + BND_UNLOCK(that->m_lock); + return -ENOENT; + } + + newMapping = (local_mapping_t*)kmem_cache_alloc(local_mapping_cache, GFP_KERNEL); + newMapping->ptr = ptr; + newMapping->cookie = cookie; + newMapping->node = binder_node_init(that,ptr,cookie); + *n = newMapping->node; + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (NEW)\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", *n)); + BND_FIRST_ACQUIRE(binder_node, *n, type, id); + newMapping->next = *head; + *head = newMapping; + + if (io) { + if (type == STRONG) { + BND_ACQUIRE(binder_node, *n, STRONG, that); // add a second reference to avoid the node being released before the aquire has finished + that->m_numRemoteStrongRefs++; + iobuffer_write_u32(io, brACQUIRE); + iobuffer_write_void(io, ptr); + iobuffer_write_void(io, cookie); + DPRINTF(5, (KERN_WARNING " -- wrote brACQUIRE: %p\n", ptr)); + } + BND_ACQUIRE(binder_node, *n, WEAK, that); // add a second reference to avoid the node being released before the aquire has finished + iobuffer_write_u32(io, brINCREFS); + iobuffer_write_void(io, ptr); + iobuffer_write_void(io, cookie); + DPRINTF(5, (KERN_WARNING " -- wrote brINCREFS: %p\n", ptr)); + } + else { + if (type == STRONG) + printk(KERN_WARNING "%s() creating new node without brACQUIRE\n", __func__); + else + printk(KERN_WARNING "%s() creating new node without brINCREFS\n", __func__); + } + + BND_UNLOCK(that->m_lock); + return 0; +} + +bool +binder_proc_RefDescriptor(binder_proc_t *that, s32 descriptor, s32 type) +{ + bool r=FALSE; + + descriptor--; + + DBLOCK((KERN_WARNING "RefDescriptor() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + r = TRUE; + DPRINTF(5, (KERN_WARNING "Incrementing descriptor %d %s: strong=%d weak=%d in team %p\n", descriptor+1, type == STRONG ? "STRONG" : "WEAK", d->priRef,d->secRef,that)); + if (type == STRONG) { + if (d->priRef > 0) d->priRef++; + else { + UPRINTF((KERN_WARNING "No strong references exist for descriptor: desc=%d, max=%d, node=%p, weak=%d\n", + descriptor+1, that->m_descriptorCount, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].node : NULL, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].secRef : 0)); + r = FALSE; + } + } else if (type == WEAK) { + if (d->secRef > 0) d->secRef++; + else if (d->priRef > 0) { + // Note that we allow the acquisition of a weak reference if only holding + // a strong because for transactions we only increment the strong ref + // count when sending a strong reference... so we need to be able to recover + // weak reference here. + d->secRef++; BND_ACQUIRE(binder_node, d->node, WEAK, that); + } else { + UPRINTF((KERN_WARNING "No weak references exist for descriptor: desc=%d, max=%d, node=%p, strong=%d\n", + descriptor+1, that->m_descriptorCount, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].node : NULL, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].priRef : 0)); + r = FALSE; + } + } + } + } + + BND_UNLOCK(that->m_lock); + return r; +} + +bool +binder_proc_UnrefDescriptor(binder_proc_t *that, s32 descriptor, s32 type) +{ + binder_node_t *n = NULL; + bool r=FALSE; + + descriptor--; + + DPRINTF(4, (KERN_WARNING "%s(%p, %d, %s)\n", __func__, that, descriptor, type == STRONG ? "STRONG" : "WEAK")); + + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + bool remove = FALSE; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + r = TRUE; + DPRINTF(5, (KERN_WARNING "Decrementing descriptor %d %s: strong=%d weak=%d in team %p\n", descriptor+1, type == STRONG ? "STRONG" : "WEAK", d->priRef,d->secRef,that)); + if (type == STRONG) { + if (--d->priRef == 0) n = d->node; + } else { + if (--d->secRef == 0) n = d->node; + } + DPRINTF(5, (KERN_WARNING "Descriptor %d is now: strong=%d weak=%d in team %p\n", descriptor+1, d->priRef,d->secRef,that)); + if (n && d->priRef <= 0 && d->secRef <= 0) { + d->node = NULL; + remove = TRUE; + } + } + + if (remove) { + reverse_mapping_t *entry,**head = &that->m_reverseHash[hash_ptr(n, HASH_BITS)]; + while (*head && (n < (*head)->node)) head = &(*head)->next; + if (*head && (n == (*head)->node)) { + entry = *head; + *head = entry->next; + kmem_cache_free(reverse_mapping_cache, entry); + } + } + } + + BND_UNLOCK(that->m_lock); + if (n) BND_RELEASE(binder_node, n, type, that); + return r; +} + +bool +binder_proc_RemoveLocalMapping(binder_proc_t *that, void *ptr, struct binder_node *node) +{ + local_mapping_t *entry=NULL; + + DBLOCK((KERN_WARNING "RemoveLocalMapping() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + local_mapping_t **head; + DPRINTF(5, (KERN_WARNING "RemoveLocalMapping %p in team %p\n", ptr, that)); + head = &that->m_localHash[hash_ptr(ptr, HASH_BITS)]; + while (*head) { +// (KERN_WARNING "RemoveLocalMapping %08x %08x\n",ptr,(*head)->ptr); + if (ptr >= (*head)->ptr && ((*head)->node == node || ptr > (*head)->ptr)) + break; + head = &(*head)->next; + } + +// while (*head && (ptr <= (*head)->ptr)) head = &(*head)->next; + if (*head && (ptr == (*head)->ptr)) { + entry = *head; + *head = entry->next; + } + BND_ASSERT(entry != NULL, "RemoveLocalMapping failed for live process"); + if(entry == NULL) { + head = &that->m_localHash[hash_ptr(ptr, HASH_BITS)]; + while (*head) { + if((*head)->node == node) + break; + head = &(*head)->next; + } + if(*head != NULL) + printk(KERN_WARNING "RemoveLocalMapping failed, but exists in the wrong place, ptr = %p node = %p node->ptr = %p\n", ptr, node, (*head)->ptr); + } + } + + BND_UNLOCK(that->m_lock); + + if (entry) { + kmem_cache_free(local_mapping_cache, entry); +// (KERN_WARNING "RemoveLocalMapping success\n"); + return TRUE; + } + + DPRINTF(0, (KERN_WARNING "RemoveLocalMapping failed for %p in team %p\n", ptr, that)); + return FALSE; +} + +void +binder_proc_RemoveLocalStrongRef(binder_proc_t *that, binder_node_t *node) +{ + bool goodbye; + + DBLOCK((KERN_WARNING "RemoveLocalStrongRef() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + // It is time for this process to go away if: + // (a) This is the last strong reference on it, and + // (b) The process published a root object. (If it didn't publish + // a root object, then someone else is responsible for managing its lifetime.) + goodbye = --that->m_numRemoteStrongRefs == 0 ? (that->m_rootObject != NULL) : FALSE; + + // Oh, and also, if the object being released -is- the root object, well that... + if (that->m_rootObject == node) { + that->m_rootObject = (binder_node_t*)-1; // something we know isn't a valid address. + if (that->m_rootStopsProcess) goodbye = TRUE; + } + + BND_UNLOCK(that->m_lock); + + if (goodbye) binder_proc_Die(that, FALSE); +} + +void +binder_proc_AddLocalStrongRef(binder_proc_t *that, binder_node_t *node) +{ + DBLOCK((KERN_WARNING "AddLocalStrongRef() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + that->m_numRemoteStrongRefs++; + BND_UNLOCK(that->m_lock); +} + +bool +binder_proc_AttemptRefDescriptor(binder_proc_t *that, s32 descriptor, binder_node_t **out_target) +{ + binder_node_t *n = NULL; + bool r=FALSE; + + descriptor--; + + DBLOCK((KERN_WARNING "AttemptRefDescriptor() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + r = TRUE; + DPRINTF(5, (KERN_WARNING "Attempt incrementing descriptor %d primary: strong=%d weak=%d in team %p\n", descriptor+1, d->priRef,d->secRef,that)); + if (d->priRef > 0 || (d->node && BND_ATTEMPT_ACQUIRE(binder_node, d->node, STRONG, that))) { + d->priRef++; + } else { + // If no strong references currently exist, we can't + // succeed. Instead return the node this attempt was + // made on. + r = FALSE; + if ((n=d->node) != NULL) BND_ACQUIRE(binder_node, n, WEAK, that); + } + } + } + + BND_UNLOCK(that->m_lock); + + *out_target = n; + return r; +} + +void +binder_proc_ForceRefNode(binder_proc_t *that, binder_node_t *node, iobuffer_t *io) +{ + bool recovered = FALSE; + const s32 descriptor = binder_proc_Node2Descriptor(that, node, FALSE, STRONG) - 1; + + DBLOCK((KERN_WARNING "ForceRefNode() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + DPRINTF(5, (KERN_WARNING "Force incrementing descriptor %d: strong=%d weak=%d in team %p\n", descriptor+1, d->priRef, d->secRef,that)); + if (d->priRef == 0) { + if (BND_FORCE_ACQUIRE(binder_node, node, that) == 0) { + recovered = TRUE; + } + } + d->priRef++; + } else { + BND_ASSERT(FALSE, "ForceRefNode() got invalid descriptor!"); + } + } + + BND_UNLOCK(that->m_lock); + + // If this operation recovered a strong reference on the object, we + // need to tell its owning process for proper bookkeeping; + if (recovered) { + binder_proc_t* proc = binder_node_AcquireHome(node, that); + if (proc != NULL) { + binder_proc_AddLocalStrongRef(proc, node); + BND_RELEASE(binder_proc, proc, STRONG, that); + } + } else { + iobuffer_write_u32(io, brRELEASE); + iobuffer_write_void(io, binder_node_Ptr(node)); // binder object token + iobuffer_write_void(io, binder_node_Cookie(node)); // binder object cookie + } +} + +status_t +binder_proc_FreeBuffer(binder_proc_t *that, void *ptr) +{ + binder_transaction_t **p,*t; + DBLOCK((KERN_WARNING "FreeBuffer() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + for (p = &that->m_needFree; *p && (binder_transaction_UserData(*p) != ptr); p = &(*p)->next); + if ((t = *p)) *p = t->next; + if (t) that->m_freeCount--; + BND_UNLOCK(that->m_lock); + + if (t) { + DPRINTF(5, (KERN_WARNING "FreeBuffer %p in team %p, now have %d\n",ptr,that,that->m_freeCount)); + + binder_transaction_Destroy(t); + return 0; + } else { + BND_ASSERT(!binder_proc_IsAlive(that), "FreeBuffer failed"); + } + return -EINVAL; +} + +static void +binder_proc_RemoveThreadFromWaitStack(binder_proc_t *that, binder_thread_t *thread) +{ + assert_spin_locked(&that->m_spin_lock); + BND_ASSERT(!list_empty(&thread->waitStackEntry), "thread not on waitstack"); + + list_del_init(&thread->waitStackEntry); + that->m_waitStackCount--; + DIPRINTF(0, (KERN_WARNING "%s(%p) popped thread %p from waitStack %d threads left\n", __func__, that, thread, that->m_waitStackCount)); + if(thread->wakeReason == WAKE_REASON_IDLE && that->m_waitStackCount > BND_PROC_MAX_IDLE_THREADS) + mod_timer(&that->m_idleTimer, that->m_idleTimeout + jiffies); + else if(that->m_waitStackCount == BND_PROC_MAX_IDLE_THREADS) + del_timer(&that->m_idleTimer); +} + +static void +binder_proc_DeliverTransacton(binder_proc_t *that, binder_transaction_t *t) +{ + binder_thread_t *thread; + + assert_spin_locked(&that->m_spin_lock); + + if(!list_empty(&that->m_waitStack)) { + // TODO: pop thread from wait stack here + thread = list_entry(that->m_waitStack.next, binder_thread_t, waitStackEntry); + binder_proc_RemoveThreadFromWaitStack(that, thread); + BND_ASSERT(thread->nextRequest == NULL, "Thread already has a request!"); + //DBTRANSACT((KERN_WARNING "Delivering transaction %p to thread %d from thread %d!\n", + // t, binder_thread_Thid(thread), current->pid)); + thread->nextRequest = t; + set_thread_priority(binder_thread_Thid(thread), binder_transaction_Priority(t)); + binder_thread_Wakeup(thread); + } + else { + DBSPAWN((KERN_WARNING "%s(%p) empty waitstack\n", __func__, that)); + *that->m_tail = t; + that->m_tail = &t->next; + } +} + +status_t +binder_proc_Transact(binder_proc_t *that, binder_transaction_t *t) +{ + binder_thread_t *thread; + unsigned long flags; + + DBLOCK((KERN_WARNING "Transact() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + DBTRANSACT((KERN_WARNING "Thread %d transacting %p to team %p, vthid=%d\n", + current->pid, t, that, t->sender ? binder_thread_VirtualThid(t->sender) : -1)); + + if (!binder_proc_IsAlive(that)) { + BND_UNLOCK(that->m_lock); + if (t->sender) binder_thread_ReplyDead(t->sender); + binder_transaction_Destroy(t); + return 0; + } + + BND_ASSERT(t->next == NULL, "Transaction not correctly initialized"); + + /* First check if the target team is already waiting on a reply from + this thread. If so, we must reflect this transaction directly + into the thread that is waiting for us. + */ + if (t->sender && binder_thread_VirtualThid(t->sender)) { + for (thread = that->m_threads; + thread && + (binder_thread_VirtualThid(thread) != binder_thread_VirtualThid(t->sender)) && + (binder_thread_Thid(thread) != binder_thread_VirtualThid(t->sender)); + thread = thread->next); + + if (thread) { + /* Make sure this thread starts out at the correct priority. + Its user-space looper will restore the old priority when done. */ + set_thread_priority(binder_thread_Thid(thread), binder_transaction_Priority(t)); + BND_UNLOCK(that->m_lock); + DBTRANSACT((KERN_WARNING "Thread %d reflecting %p!\n", current->pid, t)); + binder_thread_Reflect(thread, t); + return 0; + } + } + + spin_lock_irqsave(&that->m_spin_lock, flags); + /* Enqueue or deliver this transaction */ + binder_proc_DeliverTransacton(that, t); + that->m_syncCount++; + + BND_ASSERT(that->m_syncCount > 0, "Synchronous transaction count is bad!"); + // that->m_syncCount++; + + // DBTRANSACT((KERN_WARNING "Added to team %p queue -- needNewThread=%d, that->m_nonblockedThreads=%d\n", that, needNewThread, that->m_nonblockedThreads)); + + spin_unlock_irqrestore(&that->m_spin_lock, flags); + + if (that->m_nonblockedThreads <= 0) { + DBSPAWN((KERN_WARNING "*** TRANSACT NEEDS TO SPAWN NEW THREAD!\n")); + binder_proc_spawn_looper(that); + } + + BND_UNLOCK(that->m_lock); + + return 0; +} + +status_t +binder_proc_TakeMeOffYourList(binder_proc_t *that) +{ + DBLOCK((KERN_WARNING "binder_proc_TakeMeOffYourList() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + that->m_nonblockedThreads--; + DBSPAWN((KERN_WARNING "*** TAKE-ME-OFF-YOUR-LIST %p -- now have %d nonblocked\n", that, that->m_nonblockedThreads)); + BND_ASSERT(that->m_nonblockedThreads >= 0, "Nonblocked thread count is bad!"); + if ((that->m_nonblockedThreads <= 0) && that->m_syncCount) { + /* Spawn a thread if all blocked and synchronous transaction pending */ + DBSPAWN((KERN_WARNING "*** TAKE-ME-OFF-YOUR-LIST NEEDS TO SPAWN NEW THREAD!\n")); + binder_proc_spawn_looper(that); + } + BND_UNLOCK(that->m_lock); + return 0; +} + +status_t +binder_proc_PutMeBackInTheGameCoach(binder_proc_t *that) +{ + DBLOCK((KERN_WARNING "binder_proc_PutMeBackInTheGameCoach() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + BND_ASSERT(that->m_nonblockedThreads >= 0, "Nonblocked thread count is bad!"); + that->m_nonblockedThreads++; + DBSPAWN((KERN_WARNING "*** PUT-ME-BACK-IN-THE-GAME-COACH %p -- now have %d nonblocked\n", that, that->m_nonblockedThreads)); + BND_UNLOCK(that->m_lock); + return 0; +} + +status_t +binder_proc_WaitForRequest(binder_proc_t *that, binder_thread_t* who, binder_transaction_t **t) +{ + status_t err = 0; + unsigned long flags; + + if(that->m_wakeThreadMask) { + spin_lock_irqsave(&that->m_spin_lock, flags); + if(that->m_wakeThreadMask & WAKE_THREAD_FOR_PROCESS_DEATH) { + that->m_wakeThreadMask &= ~WAKE_THREAD_FOR_PROCESS_DEATH; + who->wakeReason = WAKE_REASON_PROCESS_DEATH; + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); + } + if(who->wakeReason == WAKE_REASON_PROCESS_DEATH) { + BND_LOCK(that->m_lock); + if(hlist_empty(&that->m_pending_death_notifications) && hlist_empty(&that->m_deleted_death_notifications)) { + printk(KERN_WARNING "%s() thread->wakeReason == WAKE_REASON_PROCESS_DEATH with no pending notifications\n", __func__); + who->wakeReason = WAKE_REASON_NONE; + } + BND_UNLOCK(that->m_lock); + return DEATH_NOTIFICATION_READY; + } + + DBLOCK((KERN_WARNING "WaitForRequest() going to lock %p in %d\n", that, binder_thread_Thid(who))); + BND_LOCK(that->m_lock); + + BND_ASSERT(atomic_read(&that->m_lock.count) <= 0, "WaitForRequest() lock still free after BND_LOCK"); + + if (who->m_isSpawned && who->m_firstLoop) { + /* This is a new thread that is waiting for its first time. */ +#if 0 + DPRINTF(0, (KERN_WARNING "*** ENTERING SPAWNED THREAD! Now looping %d, spawning %d\n", + atomic_read(&that->m_loopingThreads), that->m_spawningThreads)); + that->m_spawningThreads--; +#else + DPRINTF(0, (KERN_WARNING "*** ENTERING SPAWNED THREAD! Now looping %d\n", atomic_read(&that->m_loopingThreads))); +#endif + who->m_firstLoop = FALSE; + } else { + /* This is an existing thread that is going to go back to waiting. */ + that->m_waitingThreads++; + } + + BND_ASSERT(who->nextRequest == NULL, "Thread already has a request!"); + BND_ASSERT(list_empty(&who->waitStackEntry), "Thread on wait stack!"); + + /* Look for a pending request to service. Only do this if we are not + yet on the wait stack, or are at the top of the stack -- otherwise, + we need to wait for the thread on top of us to execute. */ + spin_lock_irqsave(&that->m_spin_lock, flags); + if((*t = that->m_head) != NULL) { + DIPRINTF(5, (KERN_WARNING "Processing transaction %p, next is %p\n", *t, (*t)->next)); + that->m_head = (*t)->next; + if (that->m_tail == &(*t)->next) that->m_tail = &that->m_head; + (*t)->next = NULL; + set_thread_priority(binder_thread_Thid(who), binder_transaction_Priority(*t)); + } + else { + /* If there are no pending transactions, unlock the team state and + wait for next thing to do. */ + + // Add to wait stack. + DIPRINTF(5, (KERN_WARNING "Pushing thread %d on to wait stack.\n", binder_thread_Thid(who))); + #if VALIDATES_BINDER + binder_thread_t* pos; + list_for_each_entry(pos, &that->m_waitStack, waitStackEntry) { + DBSTACK((KERN_WARNING "Thread %ld looking through wait stack: %p (%ld)\n", + current, pos, binder_thread_Thid(pos))); + BND_ASSERT(pos != who, "Pushing thread already on wait stack!"); + } + #endif + list_add(&who->waitStackEntry, &that->m_waitStack); + that->m_waitStackCount++; + DIPRINTF(0, (KERN_WARNING "%s(%p) added thread %p to waitStack %d threads now waiting\n", __func__, that, who, that->m_waitStackCount)); + if(that->m_waitStackCount == BND_PROC_MAX_IDLE_THREADS + 1) { + mod_timer(&that->m_idleTimer, that->m_idleTimeout + jiffies); + } + set_thread_priority(binder_thread_Thid(who), that->m_idlePriority); + spin_unlock_irqrestore(&that->m_spin_lock, flags); + + BND_UNLOCK(that->m_lock); + err = binder_thread_AcquireIOSem(who); + DBLOCK((KERN_WARNING "WaitForRequest() #2 going to lock %p in %d\n", that, binder_thread_Thid(who))); + BND_LOCK(that->m_lock); + + //DPRINTF(5, (KERN_WARNING "Thread %d: err=0x%08x, wakeupTime=%Ld\n", binder_thread_Thid(who), err, who->wakeupTime)); + + spin_lock_irqsave(&that->m_spin_lock, flags); + if(err != 0) { + // wakeup or idle timer may have released the thread + atomic_set(&who->m_wake_count, 0); + } + if ((*t=who->nextRequest) != NULL) { + /* A request has been delivered directly to us. In this + case the thread has already been removed from the wait + stack. */ + DIPRINTF(1, (KERN_WARNING "Thread %d received transaction %p, err=0x%08x\n", binder_thread_Thid(who), *t, err)); + who->nextRequest = NULL; + err = 0; + + } else { + /* The snooze ended without a transaction being returned. + If the thread ends up returning at this point, we will + need to pop it off the wait stack. Make note of that, + find out what happened, and deal with it. + */ + + DBTRANSACT((KERN_WARNING "Thread %d snooze returned with err=0x%08x\n", + binder_thread_Thid(who), err)); + + switch(who->wakeReason) { + case WAKE_REASON_IDLE: + who->wakeReason = WAKE_REASON_NONE; // the main thread may ignore a request to die + err = -ETIMEDOUT; + DBSPAWN((KERN_WARNING "*** TIME TO DIE! waiting=%d, nonblocked=%d\n", + that->m_waitingThreads, that->m_nonblockedThreads)); + break; + + case WAKE_REASON_PROCESS_DEATH: + // the threads stays in this state until the pending list becomes empty + err = DEATH_NOTIFICATION_READY; + break; + + default: + BND_ASSERT(err < 0 || !binder_proc_IsAlive(that), "thread woke up without a reason"); + /* If this thread is still on the wait stack, remove it. */ + DBTRANSACT((KERN_WARNING "Popping thread %d from wait stack.\n", + binder_thread_Thid(who))); + binder_proc_RemoveThreadFromWaitStack(that, who); + } + } + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); + + //DBTRANSACT(if ((*t) != NULL) (KERN_WARNING "*** EXECUTING TRANSACTION %p FROM %ld IN %ld\n", *t, (*t)->sender ? binder_thread_Thid((*t)->sender) : -1, binder_thread_Thid(who))); + + if ((*t) != NULL) { + if (!binder_transaction_IsEvent(*t)) { + /* Removing a synchronous transaction from the queue */ + BND_ASSERT(that->m_syncCount >= 0, "Count of synchronous transactions is bad!"); + that->m_syncCount--; + } else { + BND_ASSERT(*t == that->m_eventTransaction, "Event thread is not the expected instance!"); + + /* Tell caller to process an event. */ + who->returnedEventPriority = binder_transaction_Priority(*t); + err = REQUEST_EVENT_READY; + *t = NULL; + + /* Clear out current event information. */ + that->m_state &= ~btEventInQueue; + } + } else { + if(err == -ERESTARTSYS) { + DBTRANSACT((KERN_WARNING "*** NON-TRANSACTION IN %d! Error=-ERESTARTSYS\n", binder_thread_Thid(who))); + } + else { + DBTRANSACT((KERN_WARNING "*** NON-TRANSACTION IN %d! Error=0x%08x\n", binder_thread_Thid(who), err)); + } + // By default (such as errors) run at normal priority. + set_thread_priority(binder_thread_Thid(who), B_NORMAL_PRIORITY); + } + + #if VALIDATES_BINDER + { + binder_thread_t* pos; + list_for_each_entry(pos, &that->m_waitStack, waitStackEntry) { + DBSTACK((KERN_WARNING "Thread %d looking through wait stack: %p (%d)\n", + current, pos, binder_thread_Thid(pos))); + BND_ASSERT(pos != who, "Thread still on wait stack!"); + } + } + #endif + + that->m_waitingThreads--; + + /* Spawn a new looper thread if there are no more waiting + and we have not yet reached our limit. */ +#if 1 + if ((that->m_waitingThreads <= 0) && (atomic_read(&that->m_loopingThreads) < that->m_maxThreads)) { + DBSPAWN((KERN_WARNING "*** I THINK I WANT TO SPAWN A LOOPER THREAD!\n")); + binder_proc_spawn_looper(that); + } +#endif + + BND_ASSERT(who->nextRequest == NULL, "Thread leaving with a request!"); + BND_ASSERT(list_empty(&who->waitStackEntry), "Thread left on wait stack!"); + + BND_UNLOCK(that->m_lock); + + return err; +} + +void +binder_proc_StartLooper(binder_proc_t *that, bool driver_spawned) +{ + DBLOCK((KERN_WARNING "StartLooper() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + /* When the driver spawns a thread, it incremements the non-blocked + count right away. Otherwise, we must do it now. */ + if (!driver_spawned) that->m_nonblockedThreads++; + atomic_inc(&that->m_loopingThreads); + DPRINTF(0, (KERN_WARNING "*** STARTING A LOOPER FOR %p! Now have %d waiting, %d nonblocked.\n", + that, that->m_waitingThreads, that->m_nonblockedThreads)); + BND_UNLOCK(that->m_lock); +} + +void +binder_proc_FinishLooper(binder_proc_t *that, bool driverSpawned) +{ + DBLOCK((KERN_WARNING "FinishLooper() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + that->m_nonblockedThreads--; + DBSPAWN((KERN_WARNING "*** FINISHING A LOOPER FOR %p! Now have %d waiting, %d nonblocked, %d looping.\n", + that, that->m_waitingThreads, that->m_nonblockedThreads, atomic_read(&that->m_loopingThreads))); + if ((that->m_nonblockedThreads <= 1) && that->m_syncCount && binder_proc_IsAlive(that)) { + /* Spawn a thread if all blocked and synchronous transaction pending */ + DBSPAWN((KERN_WARNING "*** FINISH-LOOPER NEEDS TO SPAWN NEW THREAD!\n")); + binder_proc_spawn_looper(that); + } + BND_UNLOCK(that->m_lock); + + if (driverSpawned) { + atomic_dec(&that->m_loopingThreads); + BND_ASSERT(atomic_read(&that->m_loopingThreads) >= 0, "Looping thread count is bad!"); + } +} + +status_t +binder_proc_SetWakeupTime(binder_proc_t *that, bigtime_t time, s32 priority) +{ + unsigned long flags; + bool earlier; + if (time < 0) time = 0; + // convert to jiffies + do_div(time, TICK_NSEC); + time += get_jiffies_64(); + BND_LOCK(that->m_lock); + DPRINTF(4, (KERN_WARNING "%s(%p, %Ld, %d)\n", __func__, that, time, priority)); + spin_lock_irqsave(&that->m_spin_lock, flags); + if (time != that->m_wakeupTime && !(that->m_state & btEventInQueue)) { + DIPRINTF(9, (KERN_WARNING "-- previously %Ld\n", that->m_wakeupTime)); + earlier = time < that->m_wakeupTime; + that->m_wakeupTime = time; + mod_timer(&that->m_wakeupTimer, time); + } + that->m_wakeupPriority = priority; + spin_unlock_irqrestore(&that->m_spin_lock, flags); + BND_UNLOCK(that->m_lock); + return 0; +} + +status_t +binder_proc_SetIdleTimeout(binder_proc_t *that, bigtime_t timeDelta) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %Ld)\n", __func__, that, timeDelta)); + that->m_idleTimeout = timeDelta; + return 0; +} + +status_t +binder_proc_SetReplyTimeout(binder_proc_t *that, bigtime_t timeDelta) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %Ld)\n", __func__, that, timeDelta)); + that->m_replyTimeout = timeDelta; + return 0; +} + +status_t +binder_proc_SetMaxThreads(binder_proc_t *that, s32 num) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %d)\n", __func__, that, num)); + that->m_maxThreads = num; + return 0; +} + +status_t +binder_proc_SetIdlePriority(binder_proc_t *that, s32 pri) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %d)\n", __func__, that, pri)); + that->m_idlePriority = (pri > B_MIN_PRIORITY_VAL ? + (pri <= B_MAX_PRIORITY_VAL ? pri : B_MAX_PRIORITY_VAL) : + B_MIN_PRIORITY_VAL); + return 0; +} + +#define LARGE_TRANSACTION (64 * 1024) +static range_map_t * binder_proc_free_map_alloc_l(binder_proc_t *that, size_t length) +{ + bool large; + struct rb_node *n; + struct rb_node * (*rbstep)(struct rb_node *); + range_map_t *rm = NULL; + unsigned long avail; + + large = (length > LARGE_TRANSACTION ? TRUE : FALSE); + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) large = %d\n", __func__, that, length, large)); + n = large ? rb_last(&that->m_freeMap) : rb_first(&that->m_freeMap); + rbstep = large ? rb_prev : rb_next; + + while (n) { + rm = rb_entry(n, range_map_t, rm_rb); + avail = rm->end - rm->start; + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) rm = %p [%08lx-%08lx], avail %lu\n", __func__, that, length, rm, rm->start, rm->end, avail)); + if (avail >= length) { + avail -= length; + if (avail) { + range_map_t *newrm = kmem_cache_alloc(range_map_cache, GFP_KERNEL); + // use only part of range + if (large) { + // consume address space from the right + newrm->end = rm->end; + rm->end -= length; + newrm->start = rm->end; + newrm->page = NULL; + } else { + // consume address space from the left + newrm->start = rm->start; + rm->start += length; + newrm->end = rm->start; + } + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) newrm = %p [%08lx-%08lx]\n", __func__, that, length, newrm, newrm->start, newrm->end)); + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) remaining rm = %p [%08lx-%08lx], avail %lu\n", __func__, that, length, rm, rm->start, rm->end, avail)); + newrm->team = that; + rm = newrm; + } else { + // use entire range + rb_erase(n, &that->m_freeMap); + } + break; + } + n = rbstep(n); + rm = NULL; + } + return rm; +} + +range_map_t * binder_proc_free_map_insert(binder_proc_t *that, range_map_t *buffer) +{ + struct rb_node ** p = &that->m_freeMap.rb_node; + struct rb_node * parent = NULL; + range_map_t *rm = NULL; + const unsigned long address = buffer->start; + struct rb_node *next; + struct rb_node *prev; + + DPRINTF(0, (KERN_WARNING "%s(%p, %p) %08lx::%08lx\n", __func__, that, buffer, buffer->start, buffer->end)); + + while (*p) + { + parent = *p; + rm = rb_entry(parent, range_map_t, rm_rb); + + if (address < rm->start) + p = &(*p)->rb_left; + else if (address >= rm->end) + p = &(*p)->rb_right; + else { + DPRINTF(0, (KERN_WARNING "%s found buffer already in the free list!\n", __func__)); + return rm; + } + } + + if (rm) { + if (rm->end == buffer->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the right\n", __func__)); + // merge to the right + rm->end = buffer->end; + kmem_cache_free(range_map_cache, buffer); + // try merge right again (did we fill up a hole?) + next = rb_next(parent); + if (next) { + range_map_t *rm_next = rb_entry(next, range_map_t, rm_rb); + if (rm->end == rm_next->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the left, too\n", __func__)); + rm->end = rm_next->end; + rb_erase(next, &that->m_freeMap); + kmem_cache_free(range_map_cache, rm_next); + } + } + return NULL; + } else if (buffer->end == rm->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the left\n", __func__)); + // merge to the left + rm->start = buffer->start; + kmem_cache_free(range_map_cache, buffer); + // try merge left again (did we fill up a hole?) + prev = rb_prev(parent); + if (prev) { + range_map_t *rm_prev = rb_entry(prev, range_map_t, rm_rb); + if (rm_prev->end == rm->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the right, too\n", __func__)); + rm->start = rm_prev->start; + rb_erase(prev, &that->m_freeMap); + kmem_cache_free(range_map_cache, rm_prev); + } + } + return NULL; + } + } + DPRINTF(9, (KERN_WARNING "%s: buffer stands alone\n", __func__)); + + // default case: insert in the middle of nowhere + rb_link_node(&buffer->rm_rb, parent, p); + rb_insert_color(&buffer->rm_rb, &that->m_freeMap); + + return NULL; +} + +static inline range_map_t * binder_proc_range_map_insert(binder_proc_t *that, range_map_t *buffer) +{ + struct rb_node ** p = &that->m_rangeMap.rb_node; + struct rb_node * parent = NULL; + range_map_t *rm; + const unsigned long address = buffer->start; + + while (*p) + { + parent = *p; + rm = rb_entry(parent, range_map_t, rm_rb); + + if (address < rm->start) + p = &(*p)->rb_left; + else if (address >= rm->end) + p = &(*p)->rb_right; + else { + DPRINTF(1, (KERN_WARNING "%s: %p (%08lx::%08lx) overlaps with " + "existing entry %p (%08lx::%08lx)\n", + __func__, buffer, buffer->start, buffer->end, + rm, rm->start, rm->end)); + return rm; + } + } + + rb_link_node(&buffer->rm_rb, parent, p); + rb_insert_color(&buffer->rm_rb, &that->m_rangeMap); + + return NULL; +} + +static inline range_map_t * binder_proc_range_map_search(binder_proc_t *that, unsigned long address) +{ + struct rb_node * n = that->m_rangeMap.rb_node; + range_map_t *rm; + DPRINTF(0, (KERN_WARNING "%s(%p, %lu)\n", __func__, that, address)); + + while (n) + { + rm = rb_entry(n, range_map_t, rm_rb); + // range_map covers [start, end) + DPRINTF(9, (KERN_WARNING " -- trying %08lx::%08lx\n", rm->start, rm->end)); + if (address < rm->start) + n = n->rb_left; + else if (address >= rm->end) + n = n->rb_right; + else { + DPRINTF(9, (KERN_WARNING " -- found it!\n")); + return rm; + } + } + DPRINTF(0, (KERN_WARNING " -- failed to find containing range\n")); + return NULL; +} + +#if 0 +// Remove the buffer containing address from the tree. The caller owns the returned memory. +static inline range_map_t * binder_proc_range_map_remove(binder_proc_t *that, unsigned long address) +{ + range_map_t *rm = binder_proc_range_map_search(that, address); + if (rm) rb_erase(&rm->rm_rb, &that->m_rangeMap); + return rm; +} +#endif + +bool +binder_proc_ValidTransactionAddress(binder_proc_t *that, unsigned long address, struct page **pageptr) +{ + // Find the struct page* containing address in the process specified by + // that. Return FALSE and leave *pageptr unchanged if address doesn't + // represent a valid buffer. + + range_map_t *rm; + + BND_LOCK(that->m_map_pool_lock); + rm = binder_proc_range_map_search(that, address); + BND_UNLOCK(that->m_map_pool_lock); + + if (rm) { + unsigned int index = (address - rm->start) >> PAGE_SHIFT; + *pageptr = rm->page + index; + BND_ASSERT(rm->next == NULL, "binder_proc_ValidTransactionAddress found page in free pool"); + return TRUE; + } + return FALSE; +} + +// Alternatively, 2x number of active threads? +#define POOL_THRESHOLD 16 +// POOL_BUFFER_LIMIT should never exceed LARGE_TRANSACTION size, or things will get ugly +#define POOL_BUFFER_LIMIT LARGE_TRANSACTION +range_map_t * +binder_proc_AllocateTransactionBuffer(binder_proc_t *that, size_t size) +{ + // ensure order-sized allocations + unsigned long order = calc_order_from_size(size); + + range_map_t *rm; + unsigned long avail = ~0; + range_map_t **prev; + + BND_LOCK(that->m_map_pool_lock); + + rm = that->m_pool; + prev = &that->m_pool; + + size = (1 << order) << PAGE_SHIFT; + + DPRINTF(0, (KERN_WARNING "%s(%p, %u)\n", __func__, that, size)); + DPRINTF(9, (KERN_WARNING " -- order %lu produces size %u\n", order, size)); + // don't bother checking the pool for large buffers + //if (size < POOL_BUFFER_LIMIT) { + DPRINTF(9, (KERN_WARNING " -- searching the pool\n")); + while (rm && ((avail = rm->end - rm->start) < size)) { + prev = &rm->next; + rm = rm->next; + } + //} + + if (rm && (avail == size)) { + // unlink + *prev = rm->next; + rm->next = NULL; + // un-count + that->m_pool_active--; + DPRINTF(9, (KERN_WARNING " -- reusing transaction buffer\n")); + } else { + DPRINTF(9, (KERN_WARNING " -- allocating a new transaction buffer\n")); + // make a new one + rm = binder_proc_free_map_alloc_l(that, size); + if (rm) { + // allocate RAM for it + rm->page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, order); + if (!rm->page) { + binder_proc_free_map_insert(that, rm); + rm = 0; + DPRINTF(9, (KERN_WARNING " -- allocation failed\n")); + } else { + // add to the valid range maps + rm->next = NULL; + binder_proc_range_map_insert(that, rm); + } + } + } + DPRINTF(9, (KERN_WARNING " -- returning %p\n", rm)); + if (rm) { + DPRINTF(9, (KERN_WARNING " --- %08lx::%08lx\n", rm->start, rm->end)); + } + BND_UNLOCK(that->m_map_pool_lock); + return rm; +} + +void +binder_proc_FreeTransactionBuffer(binder_proc_t *that, range_map_t *buffer) +{ + unsigned long size = buffer->end - buffer->start; + range_map_t *rm; + range_map_t **prev; + + BND_LOCK(that->m_map_pool_lock); + + DPRINTF(5, (KERN_WARNING "%s(%p) m_pool_active: %d, size: %lu\n", __func__, that, that->m_pool_active, size)); + //if ((that->m_pool_active < POOL_THRESHOLD) && (size < POOL_BUFFER_LIMIT)) { + DPRINTF(5, (KERN_WARNING "%d putting %p (%08lx::%08lx) back in the pool\n", current->pid, buffer, buffer->start, buffer->end)); + rm = that->m_pool; + prev = &that->m_pool; + while (rm && ((rm->end - rm->start) < size)) { + prev = &rm->next; + rm = rm->next; + } + buffer->next = rm; + *prev = buffer; + that->m_pool_active++; +#if 0 // This is not safe to enable until we find some way to unmap the page from the userspace + } else { + DPRINTF(5, (KERN_WARNING "%d releasing %p (%08lx::%08lx) for later use\n", current->pid, buffer, buffer->start, buffer->end)); + // unmap the range +#if 0 + // FIXME: use unmap_mapping_range() to unmap pages + // FIXME: "as" always turns up NULL, so unmapping doesn't work + struct address_space *as = page_mapping(buffer->page); + DPRINTF(5, (KERN_WARNING " -- address_space: %p\n", as)); + if (as) unmap_mapping_range(as, buffer->start - that->m_mmap_start, buffer->end - buffer->start, 0); +#endif + // remove from the valid range maps + rb_erase(&buffer->rm_rb, &that->m_rangeMap); + // toss this range + __free_pages(buffer->page, calc_order_from_size(size)); + buffer->page = NULL; + // give back the address space + binder_proc_free_map_insert(that, buffer); + } +#endif + BND_UNLOCK(that->m_map_pool_lock); +} + +/* ALWAYS call this with that->m_lock held */ +void binder_proc_spawn_looper(binder_proc_t *that) +{ + DBSPAWN((KERN_WARNING "%s(%p)\n", __func__, that)); +#if 0 + if ((++that->m_spawningThreads == 1) && binder_proc_IsAlive(that)) { + atomic_inc(&that->m_noop_spawner); + DBSPAWN((KERN_WARNING " -- upped m_noop_spawner to %d\n", atomic_read(&that->m_noop_spawner))); + } +#else + if (binder_proc_IsAlive(that) && (test_and_set_bit(SPAWNING_BIT, &that->m_noop_spawner) == 0)) { + set_bit(DO_SPAWN_BIT, &that->m_noop_spawner); + DBSPAWN((KERN_WARNING " -- upped m_noop_spawner\n")); + ++that->m_waitingThreads; + ++that->m_nonblockedThreads; + } +#endif + DBSPAWN((KERN_WARNING "%s(%p) finished\n", __func__, that)); +} + +void binder_proc_wakeup_timer(unsigned long data) +{ + unsigned long flags; + binder_proc_t *that = (binder_proc_t *)data; + + DIPRINTF(0, (KERN_WARNING "%s(%p) -- Enqueueing handler transaction\n", __func__, that)); + + spin_lock_irqsave(&that->m_spin_lock, flags); + + BND_ASSERT(that->m_eventTransaction != NULL, "m_eventTransaction == NULL"); + + if(!(that->m_state & btEventInQueue)) { + BND_ASSERT(that->m_eventTransaction->next == NULL, "Event transaction already in queue!"); + binder_transaction_SetPriority(that->m_eventTransaction, (s16)that->m_wakeupPriority); + that->m_wakeupTime = B_INFINITE_TIMEOUT; + that->m_wakeupPriority = B_LOW_PRIORITY; // this value should not be used anywhere + that->m_state |= btEventInQueue; + + binder_proc_DeliverTransacton(that, that->m_eventTransaction); + } + else { + BND_ASSERT(0, "event already in queue"); + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); +} + +void binder_proc_idle_timer(unsigned long data) +{ + unsigned long flags; + binder_proc_t *that = (binder_proc_t *)data; + binder_thread_t *thread; + + DIPRINTF(0, (KERN_WARNING "%s(%p) -- Signal idle thread\n", __func__, that)); + + spin_lock_irqsave(&that->m_spin_lock, flags); + + if(that->m_waitStackCount > BND_PROC_MAX_IDLE_THREADS) { + BND_ASSERT(!list_empty(&that->m_waitStack), "bad m_waitStackCount"); + thread = list_entry(that->m_waitStack.prev, binder_thread_t, waitStackEntry); + thread->wakeReason = WAKE_REASON_IDLE; + binder_proc_RemoveThreadFromWaitStack(that, thread); + binder_thread_Wakeup(thread); + } + else { + DBSPAWN((KERN_WARNING "%s(%p) idle timer ignored\n", __func__, that)); + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); +} + diff -urN linux-2.6.22.5/drivers/binder/binder_proc.h linux-2.6.22.5-android/drivers/binder/binder_proc.h --- linux-2.6.22.5/drivers/binder/binder_proc.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_proc.h 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,226 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_PROC_H +#define BINDER_PROC_H + +#include +#include +#include +#include +#include "binder_defs.h" +#include "binder_thread.h" +#include "iobuffer.h" + +// This "error" is returned by WaitForRequest() when a timed event +// is scheduled to happen. +enum { + REQUEST_EVENT_READY = 1, + DEATH_NOTIFICATION_READY = 2 +}; + +typedef struct descriptor { + struct binder_node *node; + s32 priRef; + s32 secRef; +} descriptor_t; + +typedef struct reverse_mapping { + struct reverse_mapping *next; + struct binder_node *node; + s32 descriptor; +} reverse_mapping_t; + +typedef struct local_mapping { + struct local_mapping *next; + void *ptr; // Unique token identifying this object (supplied by user space) + void *cookie; // Arbitrary data for user space to associate with the object/token + struct binder_node *node; +} local_mapping_t; + +typedef struct range_map { + unsigned long start; // inclusive + unsigned long end; // non-inclusive + struct page *page; + struct range_map* next; // next in the chain of free buffers + struct rb_node rm_rb; + struct binder_proc *team; +} range_map_t; + +typedef struct death_notification { + atomic_t ref_count; + struct hlist_node observer; + struct hlist_node observed_or_active; + void *cookie; + struct binder_proc *observer_proc; + struct binder_proc *observed_proc; // or NULL if already sent +} death_notification_t; + +enum { + btEventInQueue = 0x00000002, + btDying = 0x00000004, + btDead = 0x00000008, + btCleaned = 0x00000010, + btFreed = 0x00000020 +}; + +enum { + WAKE_THREAD_FOR_PROCESS_DEATH = 1 +}; + +typedef struct binder_proc { + atomic_t m_primaryRefs; + atomic_t m_secondaryRefs; + volatile unsigned long m_noop_spawner; +#define SPAWNING_BIT 0 +#define DO_SPAWN_BIT 1 + struct semaphore m_lock; + spinlock_t m_spin_lock; + struct semaphore m_map_pool_lock; + u32 m_state; + struct binder_thread * m_threads; + struct list_head m_waitStack; + int m_waitStackCount; + u32 m_wakeThreadMask; + bigtime_t m_wakeupTime; + s32 m_wakeupPriority; + struct timer_list m_wakeupTimer; + struct timer_list m_idleTimer; + bigtime_t m_idleTimeout; + bigtime_t m_replyTimeout; + s32 m_syncCount; + s32 m_freeCount; + struct binder_transaction * m_head; + struct binder_transaction ** m_tail; + struct binder_transaction * m_needFree; + struct binder_transaction * m_eventTransaction; + local_mapping_t * m_localHash[HASH_SIZE]; + struct binder_node * m_rootObject; // only use for comparison!! + s32 m_rootStopsProcess; + s32 m_numRemoteStrongRefs; + reverse_mapping_t * m_reverseHash[HASH_SIZE]; + descriptor_t * m_descriptors; + s32 m_descriptorCount; + s32 m_nonblockedThreads; + s32 m_waitingThreads; + s32 m_maxThreads; + s32 m_idlePriority; + atomic_t m_loopingThreads; + // s32 m_spawningThreads; + unsigned long m_mmap_start; // inclusive + struct rb_root m_rangeMap; + struct rb_root m_freeMap; + range_map_t *m_pool; + size_t m_pool_active; + struct hlist_head m_incoming_death_notifications; + struct hlist_head m_outgoing_death_notifications; + struct hlist_head m_pending_death_notifications; // ready to be sent to user space + struct hlist_head m_active_death_notifications; // already sent to user space + struct hlist_head m_deleted_death_notifications; +} binder_proc_t; + + +binder_proc_t * new_binder_proc(void); +#if 0 +binder_proc_t * new_binder_proc_with_parent(pid_t id, pid_t mainThid, struct binder_thread *parent); +#endif +void binder_proc_destroy(binder_proc_t *that); + +#define binder_proc_IsAlive(that) ((that->m_state&(btDying|btDead)) == 0) +// bool binder_proc_IsAlive(binder_proc_t *that) const; +void binder_proc_Released(binder_proc_t *that); + +void binder_proc_Die(binder_proc_t *that, bool locked /* = false */); + +BND_DECLARE_ACQUIRE_RELEASE(binder_proc); +BND_DECLARE_ATTEMPT_ACQUIRE(binder_proc); + +void binder_proc_SetRootObject(binder_proc_t *that, struct binder_node *node); + +void binder_proc_Stop(binder_proc_t *that, bool now); + +bool binder_proc_AddThread(binder_proc_t *that, binder_thread_t *t); +void binder_proc_RemoveThread(binder_proc_t *that, struct binder_thread *t); + +status_t binder_proc_WaitForRequest(binder_proc_t *that, struct binder_thread* who, struct binder_transaction **t); + +void binder_proc_GetPendingDeathNotifications(binder_proc_t *that, binder_thread_t *thread, iobuffer_t *io); + +/* Call when a thread receives its bcREGISTER_LOOPER command. */ +void binder_proc_StartLooper(binder_proc_t *that, bool driver_spawned); +/* Call when exiting a thread who has been told bcREGISTER_LOOPER. */ +void binder_proc_FinishLooper(binder_proc_t *that, bool driverSpawned); + +status_t binder_proc_SetWakeupTime(binder_proc_t *that, bigtime_t time, s32 priority); +status_t binder_proc_SetIdleTimeout(binder_proc_t *that, bigtime_t timeDelta); +status_t binder_proc_SetReplyTimeout(binder_proc_t *that, bigtime_t timeDelta); +status_t binder_proc_SetMaxThreads(binder_proc_t *that, s32 num); +status_t binder_proc_SetIdlePriority(binder_proc_t *that, s32 pri); + +/* Call to place a transaction in to this team's queue. */ +status_t binder_proc_Transact(binder_proc_t *that, struct binder_transaction *t); + +/* Management of transactions that are waiting to be deallocated. + These are safe to call with only a secondary reference on the + team. +*/ +status_t binder_proc_AddToNeedFreeList(binder_proc_t *that, struct binder_transaction *t); +status_t binder_proc_FreeBuffer(binder_proc_t *that, void *p); + +bool binder_proc_RefDescriptor(binder_proc_t *that, s32 descriptor, s32 type); +bool binder_proc_UnrefDescriptor(binder_proc_t *that, s32 descriptor, s32 type); +bool binder_proc_RemoveLocalMapping(binder_proc_t *that, void *ptr, struct binder_node *node); + +/* Called by binder_node when its last strong reference goes away, for the process to + do the appropriate bookkeeping. */ +void binder_proc_RemoveLocalStrongRef(binder_proc_t *that, struct binder_node *node); + +/* Called by binder_proc_ForceRefNode() if it is restoring the first strong reference + back on to the node. */ +void binder_proc_AddLocalStrongRef(binder_proc_t *that, struct binder_node *node); + +/* Attempt to acquire a primary reference on the given descriptor. + The result will be true if this succeeded, in which case you + can just continue with it. If the result is false, then + 'out_target' may be set to the binder_node_t the you are making + the attempt on. You can execute a transaction to the node + to attempt the acquire on it, and -must- release a SECONDARY + reference on the node which this function acquired. */ +bool binder_proc_AttemptRefDescriptor(binder_proc_t *that, s32 descriptor, struct binder_node **out_target); + +/* Forcibly increment the primary reference count of the given, + in response to a successful binder_proc_AttemptAcquire(). */ +void binder_proc_ForceRefNode(binder_proc_t *that, struct binder_node *node, iobuffer_t *io); + +s32 binder_proc_Node2Descriptor(binder_proc_t *that, struct binder_node *node, bool ref /* = true */, s32 type /* = PRIMARY */); +struct binder_node * binder_proc_Descriptor2Node(binder_proc_t *that, s32 descriptor, const void* id, s32 type /* = PRIMARY */); +status_t binder_proc_Ptr2Node(binder_proc_t *that, void *ptr, void *cookie, struct binder_node **n, iobuffer_t *io, const void* id, s32 type /* = PRIMARY */); + +/* death notifications */ +status_t binder_proc_RequestDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie); +status_t binder_proc_ClearDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie); +status_t binder_proc_DeadBinderDone(binder_proc_t *that, void *cookie); // called on client proc + +status_t binder_proc_TakeMeOffYourList(binder_proc_t *that); +status_t binder_proc_PutMeBackInTheGameCoach(binder_proc_t *that); + +bool binder_proc_ValidTransactionAddress(binder_proc_t *that, unsigned long address, struct page **pageptr); +range_map_t * binder_proc_AllocateTransactionBuffer(binder_proc_t *that, size_t size); +void binder_proc_FreeTransactionBuffer(binder_proc_t *that, range_map_t *buffer); +range_map_t * binder_proc_free_map_insert(binder_proc_t *that, range_map_t *buffer); +#endif // BINDER_PROC_H diff -urN linux-2.6.22.5/drivers/binder/binder_thread.c linux-2.6.22.5-android/drivers/binder/binder_thread.c --- linux-2.6.22.5/drivers/binder/binder_thread.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_thread.c 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,1575 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "binder_defs.h" +#include "binder_thread.h" +#include "binder_proc.h" +#include "binder_node.h" +#include "binder_transaction.h" +#include "iobuffer.h" + +#include +#include +#include + +static void binder_thread_Cleanup(binder_thread_t *that); + +static status_t binder_thread_WaitForReply(binder_thread_t *that, iobuffer_t *io); +static status_t binder_thread_WaitForRequest(binder_thread_t *that, iobuffer_t *io); +static status_t binder_thread_ReturnTransaction(binder_thread_t *that, iobuffer_t *io, binder_transaction_t *t); + +// static void binder_thread_WriteReturn(binder_thread_t *that, void *buffer, int size); + +// static void binder_thread_EnqueueTransaction(binder_thread_t *that, binder_transaction_t *t); + +// Set non-zero to do the capable(CAP_SYS_ADMIN) check +#define CHECK_CAPS 0 + +static binder_node_t *gContextManagerNode = NULL; +static DECLARE_MUTEX(gContextManagerNodeLock); +static atomic_t g_count = ATOMIC_INIT(0); + +int +binder_thread_GlobalCount() +{ + return atomic_read(&g_count); +} + +binder_thread_t * binder_thread_init(int thid, binder_proc_t *team) +{ + binder_thread_t *that; + + that = (binder_thread_t*)kmem_cache_alloc(thread_cache, GFP_KERNEL); + if (that) { + atomic_inc(&g_count); + that->attachedToThread = FALSE; + that->next = NULL; + INIT_LIST_HEAD(&that->waitStackEntry); + that->pendingChild = NULL; + that->nextRequest = NULL; + that->wakeReason = WAKE_REASON_NONE; + that->virtualThid = 0; + atomic_set(&that->m_primaryRefs, 0); + atomic_set(&that->m_secondaryRefs, 0); + atomic_set(&that->m_wake_count, 0); + that->m_err = 0; + init_MUTEX(&that->m_lock); + init_waitqueue_head(&that->m_wait); + that->m_waitForReply = 0; + that->m_reply = NULL; + that->m_consume = 0; + that->m_thid = thid; + that->m_team = team; + if (team != NULL) + BND_ACQUIRE(binder_proc, that->m_team, WEAK, that); + that->m_pendingReply = NULL; + that->m_pendingRefResolution = NULL; + that->m_teamRefs = 0; + that->m_isSpawned = FALSE; + that->m_isLooping = FALSE; + that->m_firstLoop = TRUE; + that->m_shortAttemptAcquire = FALSE; + that->m_pendingReplyIsRoot = FALSE; + that->m_failedRootReceive = FALSE; + that->m_failedRootReply = FALSE; + DPRINTF(5, (KERN_WARNING "*** CREATING THREAD %p (%p:%d)\n", that, that->m_team, that->m_thid)); + } + DBSHUTDOWN((KERN_WARNING "%s(%u, %p): %p\n", __func__, thid, team, that)); + return that; +} + +void binder_thread_destroy(binder_thread_t *that) +{ + DBSHUTDOWN((KERN_WARNING "binder_thread_destroy(%p, %p):%d\n", that, that->m_team, that->m_thid)); + if (that->m_isLooping && that->m_team && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_FinishLooper(that->m_team, that->m_isSpawned); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + // We don't care about process, here. + //if (find_thread(that->m_thid, 0, TRUE) != that) { + //DPRINTF(1, (KERN_WARNING "binder_thread_destroy(%p): couldn't find ourselves in the thread hash\n", that)); + //} + + binder_thread_Cleanup(that); + + if (that->m_team) { + BND_RELEASE(binder_proc, that->m_team, WEAK, that); + that->m_team = NULL; + } + + atomic_dec(&g_count); + + // free_lock(&that->m_lock); + kmem_cache_free(thread_cache, that); +} + +void +binder_thread_Released(binder_thread_t *that) +{ + DBSHUTDOWN((KERN_WARNING "%s(%p, %p):%d\n", __func__, that, that->m_team, that->m_thid)); + binder_thread_Die(that); +} + +void +binder_thread_Die(binder_thread_t *that) +{ + DBSHUTDOWN((KERN_WARNING "%s(%p) (%p:%d) in %d\n", __func__, that, that->m_team, binder_thread_Thid(that), current->pid)); + + // Always do this, even if all primary references on the team + // are gone. This is the only way the thread list gets cleaned up. + if (that->m_team != NULL) + binder_proc_RemoveThread(that->m_team, that); + + binder_thread_Cleanup(that); + + /* + * Linux doesn't seem to have an equivalent to delet_sem() + * delete_sem(that->m_ioSem); that->m_ioSem = B_BAD_SEM_ID; + */ + + DBSHUTDOWN((KERN_WARNING "Binder thread %p:%d: DEAD!\n", that->m_team, that->m_thid)); +} + +bool binder_thread_SetParentThread(binder_thread_t *that, binder_thread_t *replyTo) +{ + bool success; + + DPRINTF(4, (KERN_WARNING "binder_thread_SetParentThread(%p, %p)\n", that, replyTo)); + + BND_LOCK(that->m_lock); + if ((success = !that->m_failedRootReply)) { + + BND_ASSERT(!that->m_pendingReply, "Attaching to child thread that already has someone waiting for a reply!"); + that->m_pendingReply = binder_transaction_CreateEmpty(); + binder_transaction_SetRootObject(that->m_pendingReply, TRUE); + that->m_pendingReply->sender = replyTo; + that->m_pendingReplyIsRoot = TRUE; + BND_ACQUIRE(binder_thread, replyTo, WEAK, m_pendingReply); + + // The thread now has the reply info, so allow it to wake up and reply. + binder_thread_Wakeup(that); + } + BND_UNLOCK(that->m_lock); + + return success; +} + +void binder_thread_ReleasePendingChild(binder_thread_t *that) +{ + binder_thread_t *child; + BND_LOCK(that->m_lock); + DPRINTF(4, (KERN_WARNING "binder_thread_ReleasePendingChild(%p): child=%p\n", that, that->pendingChild)); + child = that->pendingChild; + that->pendingChild = NULL; + BND_UNLOCK(that->m_lock); + + if (child) { + forget_thread(child); + } +} + +void binder_thread_AttachProcess(binder_thread_t *that, struct binder_proc *team) +{ + bool attached = FALSE; + + DPRINTF(4, (KERN_WARNING "binder_thread_AttachProcess(%p, %p)\n", that, team)); + + BND_LOCK(that->m_lock); + + BND_ASSERT(!that->m_team, "Child thread is already attached to its process!"); + if (that->m_team == NULL) { + attached = TRUE; + that->m_team = team; + BND_ACQUIRE(binder_proc, team, WEAK, that); + } + + BND_UNLOCK(that->m_lock); + + if (attached) { + if(!binder_proc_AddThread(team, that)) { + BND_ASSERT(0, "attached thread to dying process"); + } + } +} + +void +binder_thread_Cleanup(binder_thread_t *that) +{ + binder_transaction_t *cmd, *pendingRef; + binder_transaction_t *pendingReply; + binder_transaction_t *reply; + binder_node_t *contextManagerNode; + int relCount; + bool first; + + BND_LOCK(that->m_lock); + pendingRef = that->m_pendingRefResolution; + that->m_pendingRefResolution = NULL; + pendingReply = that->m_pendingReply; + that->m_pendingReply = NULL; + reply = that->m_reply; + that->m_reply = NULL; + relCount = that->m_teamRefs; + that->m_teamRefs = 0; + DPRINTF(0, (KERN_WARNING "%s(%p):%p,%d strong: %d, weak: %d\n", __func__, that, that->m_team, that->m_thid, that->m_primaryRefs.counter, that->m_secondaryRefs.counter)); + BND_UNLOCK(that->m_lock); + + while (relCount) { + if (that->m_team) + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + relCount--; + } + + first = TRUE; + while ((cmd = pendingRef)) { + if (first) { + first = FALSE; + DPRINTF(5, (KERN_WARNING "Binder thread %p:%d: cleaning up pending ref resolution.\n", that->m_team, that->m_thid)); + } + pendingRef = cmd->next; + DPRINTF(5, (KERN_WARNING "Deleting transaction %p\n", cmd)); + binder_transaction_DestroyNoRefs(cmd); + } + + first = TRUE; + while ((cmd = pendingReply)) { + if (first) { + first = FALSE; + DPRINTF(5, (KERN_WARNING "Binder thread %p:%d: cleaning up pending replies.\n", that->m_team, that->m_thid)); + } + if (cmd->sender) { + DPRINTF(5, (KERN_WARNING "Returning transaction %p to thread %p (%d)\n", + cmd, cmd->sender, binder_thread_Thid(cmd->sender))); + binder_thread_ReplyDead(cmd->sender); + } + pendingReply = cmd->next; + binder_transaction_Destroy(cmd); + } + + first = TRUE; + while ((cmd = reply)) { + if (first) { + first = FALSE; + DPRINTF(5, (KERN_WARNING "Binder thread %p:%d: cleaning up received replies.\n", that->m_team, that->m_thid)); + } + reply = cmd->next; + DPRINTF(5, (KERN_WARNING "Deleting transaction %p\n", cmd)); + binder_transaction_Destroy(cmd); + } + BND_LOCK(gContextManagerNodeLock); + if (gContextManagerNode && (gContextManagerNode->m_home == that->m_team && that->m_team->m_threads == NULL)) { + contextManagerNode = gContextManagerNode; + gContextManagerNode = NULL; + } + else { + contextManagerNode = NULL; + } + BND_UNLOCK(gContextManagerNodeLock); + if(contextManagerNode != NULL) { + DPRINTF(2, (KERN_WARNING "team %08lx is not longer the context manager\n", (unsigned long)that->m_team)); + binder_node_destroy(contextManagerNode); + } + + binder_thread_ReleasePendingChild(that); + + // Make sure this thread returns to user space. + binder_thread_Wakeup(that); +} + +int +binder_thread_Control(binder_thread_t *that, unsigned int cmd, void *buffer) +{ + int result = -EINVAL; + unsigned int size = _IOC_SIZE(cmd); + + //ddprintf("binder -- ioctl %d, size=%d\n", cmd, size); + + DPRINTF(2, (KERN_WARNING "%s(%p, %d, %p): proc=%p\n", __func__, that, cmd, buffer, that->m_team)); + + switch (cmd) { + case BINDER_WRITE_READ: + DPRINTF(2, (KERN_WARNING "BINDER_WRITE_READ: %p:%d\n", that->m_team, that->m_thid)); + if (size >= sizeof(binder_write_read_t)) { + binder_write_read_t bwr; + if (copy_from_user(&bwr, buffer, sizeof(bwr)) == 0) { + DPRINTF(2, (KERN_WARNING " -- write %ld at %08lx\n -- read %ld at %08lx\n", bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer)); + if (bwr.write_size > 0) { + result = binder_thread_Write(that, (void *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); + if (result < 0) { + bwr.read_consumed = 0; + copy_to_user(buffer, &bwr, sizeof(bwr)); + goto getout; + } + } + if (bwr.read_size > 0) { + result = binder_thread_Read(that, (void *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed); + if (result < 0) { + // For ERESTARTSYS, we have to propagate the fact + // that we've already done any writes. + //if (result != -ERESTARTSYS) { + //bwr.read_size = result; // FIXME? + //} + copy_to_user(buffer, &bwr, sizeof(bwr)); + goto getout; + } + } + copy_to_user(buffer, &bwr, sizeof(bwr)); + result = 0; + } + } + break; + case BINDER_SET_WAKEUP_TIME: + if (size >= sizeof(binder_wakeup_time_t) && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_wakeup_time_t *time = (binder_wakeup_time_t*)buffer; + result = binder_proc_SetWakeupTime(that->m_team, time->time, time->priority); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_IDLE_TIMEOUT: + if (size >= 8 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetIdleTimeout(that->m_team, *((bigtime_t*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_REPLY_TIMEOUT: + if (size >= 8 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetReplyTimeout(that->m_team, *((bigtime_t*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_MAX_THREADS: + if (size >= 4 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetMaxThreads(that->m_team, *((int*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_IDLE_PRIORITY: + if (size >= 4 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetIdlePriority(that->m_team, *((int*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_CONTEXT_MGR: + if (size >= 4 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + DPRINTF(2, (KERN_WARNING "bcSET_CONTEXT_MANAGER attempt by %p\n", that->m_team)); + // LOCK + // check for existing context + BND_LOCK(gContextManagerNodeLock); + if (!gContextManagerNode) { + // check for administration rights +#if CHECK_CAPS + if (capable(CAP_SYS_ADMIN)) { +#endif + gContextManagerNode = binder_node_init(that->m_team, NULL, NULL); + BND_FIRST_ACQUIRE(binder_node, gContextManagerNode, STRONG, that->m_team); + DPRINTF(2, (KERN_WARNING "making team %08lx context manager\n", (unsigned long)that->m_team)); + result = 0; +#if CHECK_CAPS + } else { + DPRINTF(2, (KERN_WARNING "%p doesn't have CAP_SYS_ADMIN rights\n", that->m_team)); + } +#endif + } else { + DPRINTF(2, (KERN_WARNING "gContextManagerNode already set to %p by %08lx", gContextManagerNode, (unsigned long)that->m_team)); + } + BND_UNLOCK(gContextManagerNodeLock); + // UNLOCK + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_THREAD_EXIT: + BND_RELEASE(binder_thread, that, STRONG, 0); + result = 0; + break; + case BINDER_VERSION: + if (size >= sizeof(binder_version_t)) { + binder_version_t *vers = (binder_version_t*)buffer; + vers->protocol_version = BINDER_CURRENT_PROTOCOL_VERSION; + result = 0; + } + break; + default: + break; + } + +getout: + DPRINTF(2, (KERN_WARNING "%s(%p, %d, %p): proc=%p: result=%d\n", __func__, that, cmd, buffer, that->m_team, -result)); + + return result; +} + +int +binder_thread_Write(binder_thread_t *that, void *_buffer, int _size, signed long *consumed) +{ + int result, cmd, target; + binder_node_t *n; + iobuffer_t io; + + DPRINTF(2, (KERN_WARNING "binder_thread_Write(%p, %d)\n", _buffer, _size)); + if (that->m_err) return that->m_err; + if (!binder_proc_IsAlive(that->m_team)) return -ECONNREFUSED; + result = iobuffer_init(&io, (unsigned long)_buffer, _size, *consumed); + if (result) return result; + + while (1) { + if (that->m_consume) { + that->m_consume -= iobuffer_drain(&io, that->m_consume); + iobuffer_mark_consumed(&io); + } + target = -1; + if (iobuffer_read_u32(&io, &cmd)) goto finished; + DPRINTF(5, (KERN_WARNING "cmd: %d\n",cmd)); + switch (cmd) { + case bcINCREFS: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcINCREFS of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_RefDescriptor(that->m_team, target, WEAK); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcINCREFS_DONE: { + void *ptr; + void *cookie; + if (iobuffer_read_void(&io, &ptr)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DBREFS((KERN_WARNING "bcINCREFS_DONE of %p\n", ptr)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + if (binder_proc_Ptr2Node(that->m_team, ptr, cookie, &n, NULL, that, WEAK) == 0) { + BND_RELEASE(binder_node, n, WEAK, that->m_team); + BND_RELEASE(binder_node, n, WEAK, that->m_team); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcACQUIRE: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcACQUIRE of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_RefDescriptor(that->m_team, target, STRONG); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcACQUIRE_DONE: { + void *ptr; + void *cookie; + if (iobuffer_read_void(&io, &ptr)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DBREFS((KERN_WARNING "bcACQUIRE_DONE of %p\n", ptr)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + if (binder_proc_Ptr2Node(that->m_team, ptr, cookie, &n, NULL, that, STRONG) == 0) { + BND_RELEASE(binder_node, n, STRONG, that->m_team); + BND_RELEASE(binder_node, n, STRONG, that->m_team); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcATTEMPT_ACQUIRE: { + int priority; + if (iobuffer_read_u32(&io, &priority)) goto finished; + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcATTEMPT_ACQUIRE of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_node_t *node; + if (binder_proc_AttemptRefDescriptor(that->m_team, target, &node)) { + DBREFS((KERN_WARNING "Immediate Success!\n")); + BND_ASSERT(!that->m_shortAttemptAcquire, "Already have AttemptAcquire result! (now succeeding)"); + that->m_shortAttemptAcquire = TRUE; + that->m_resultAttemptAcquire = TRUE; + } else if (node) { + binder_transaction_t *t; + // Need to wait for a synchronous acquire attempt + // on the remote node. Note that the transaction has + // special code to understand a tfAttemptAcquire, taking + // ownership of the secondary reference on 'node'. + DBREFS((KERN_WARNING "Sending off to owner!\n")); + t = binder_transaction_CreateRef(tfAttemptAcquire, binder_node_Ptr(node), binder_node_Cookie(node), that->m_team); + binder_transaction_SetPriority(t, (s16)priority); + t->target = node; + binder_transaction_SetInline(t, TRUE); + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + BND_UNLOCK(that->m_lock); + } else { + DBREFS((KERN_WARNING "Immediate Failure!\n")); + BND_ASSERT(!that->m_shortAttemptAcquire, "Already have AttemptAcquire result! (now failing)"); + that->m_shortAttemptAcquire = TRUE; + that->m_resultAttemptAcquire = FALSE; + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } else { + DBREFS((KERN_WARNING "Team Failure!\n")); + BND_ASSERT(!that->m_shortAttemptAcquire, "Already have AttemptAcquire result! (now team failing)"); + that->m_shortAttemptAcquire = TRUE; + that->m_resultAttemptAcquire = FALSE; + } + iobuffer_mark_consumed(&io); + } break; + case bcACQUIRE_RESULT: { + int result; + binder_transaction_t *t; + if (iobuffer_read_u32(&io, &result)) goto finished; + iobuffer_mark_consumed(&io); + DBREFS((KERN_WARNING "bcACQUIRE_RESULT: %d\n",result)); + t = binder_transaction_Create(0, 0, 0, 0, NULL); + binder_transaction_SetAcquireReply(t, TRUE); + binder_transaction_SetInline(t, TRUE); + *(int *)t->data = result; + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + BND_UNLOCK(that->m_lock); + } break; + case bcRELEASE: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcRELEASE of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_UnrefDescriptor(that->m_team, target, STRONG); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcDECREFS: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcDECREFS of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_UnrefDescriptor(that->m_team, target, WEAK); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcFREE_BUFFER: { + void *ptr; + if (iobuffer_read_void(&io, &ptr)) goto finished; + DPRINTF(5, (KERN_WARNING "bcFREE_BUFFER: %p\n",ptr)); + BND_LOCK(that->m_lock); + if (that->m_pendingReply && that->m_pendingReply->map != NULL && binder_transaction_UserData(that->m_pendingReply) == ptr) { + // Data freed before reply sent. Remember this to free + // the transaction when we finally get its reply. + binder_transaction_SetFreePending(that->m_pendingReply, TRUE); + BND_UNLOCK(that->m_lock); + } else { + BND_UNLOCK(that->m_lock); + binder_proc_FreeBuffer(that->m_team, ptr); + } + iobuffer_mark_consumed(&io); + } break; + case bcRETRIEVE_ROOT_OBJECT: { + int pid; + binder_thread_t *child; + if (iobuffer_read_u32(&io, &pid)) goto finished; + DPRINTF(2, (KERN_WARNING "bcRETRIEVE_ROOT_OBJECT: process %d\n", pid)); + child = attach_child_thread((pid_t)pid, that); + DPRINTF(2, (KERN_WARNING "bcRETRIEVE_ROOT_OBJECT: child binder_thread=%p\n", child)); + + BND_LOCK(that->m_lock); + if (child) { + that->pendingChild = child; + that->m_waitForReply++; + } else { + that->m_failedRootReceive = TRUE; + } + BND_UNLOCK(that->m_lock); + + iobuffer_mark_consumed(&io); + } break; + case bcTRANSACTION: + case bcREPLY: { + binder_transaction_data_t tr; + + if(cmd == bcTRANSACTION) { + DPRINTF(5, (KERN_WARNING "bcTRANSACTION\n")); + } + else { + DPRINTF(5, (KERN_WARNING "bcREPLY\n")); + } + + if (iobuffer_read_raw(&io, &tr, sizeof(tr))) goto finished; + if (tr.flags & tfInline) { + // ddprintf("inline transactions not supported yet\n"); + that->m_consume = tr.data_size - sizeof(tr.data); + iobuffer_mark_consumed(&io); + } else if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_transaction_t *t; + iobuffer_mark_consumed(&io); +/* + if (tr.data_size && !is_valid_range(tr.data.ptr.buffer, tr.data_size, PROT_UWR)) { + that->m_err = -EINVAL; + goto finished; + } + if (tr.offsets_size && !is_valid_range(tr.data.ptr.offsets, tr.offsets_size, PROT_UWR)) { + that->m_err = -EINVAL; + goto finished; + } +*/ + t = binder_transaction_Create(tr.code, tr.data_size, tr.data.ptr.buffer, tr.offsets_size, tr.data.ptr.offsets); + binder_transaction_SetUserFlags(t, tr.flags); + binder_transaction_SetPriority(t, (s16)tr.priority); + binder_transaction_SetReply(t, cmd == bcREPLY); + DPRINTF(4, ("Command %s %p: size=%p, first=%p\n", + cmd == bcTRANSACTION ? "transaction" : "reply", t, + tr.data_size, tr.data_size > 0 ? (*(u32*)tr.data.ptr.buffer) : 0)); + if (cmd == bcTRANSACTION) { + target = tr.target.handle; + if(target) { + t->target = binder_proc_Descriptor2Node(that->m_team, target, t, STRONG); + BND_ASSERT(t->target, "Failure converting target descriptor to node"); + } + else { + BND_LOCK(gContextManagerNodeLock); + if (gContextManagerNode && BND_ATTEMPT_ACQUIRE(binder_node, gContextManagerNode, STRONG, t)) { + t->target = gContextManagerNode; + } + else { + DPRINTF(0, (KERN_WARNING "Failed to acquire context manager node\n")); + t->target = NULL; + } + BND_UNLOCK(gContextManagerNodeLock); + } + DPRINTF(4, (KERN_WARNING "Transacting %p to %d(%p) in team %p\n", t, target, t->target, t->target ? t->target->m_home : NULL)); + } + + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + if (that->m_pendingReply && binder_transaction_IsRootObject(that->m_pendingReply)) { + BND_ASSERT(binder_transaction_IsRootObject(t), "EXPECTING ROOT REPLY!"); + } else { + BND_ASSERT(!that->m_pendingReply || !binder_transaction_IsRootObject(t), "UNEXPECTED ROOT REPLY!"); + } + BND_UNLOCK(that->m_lock); + + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + } break; + case bcREGISTER_LOOPER: { + DPRINTF(5, (KERN_WARNING "bcREGISTER_LOOPER for %p (%p:%d)\n", that, that->m_team, that->m_thid)); + BND_ASSERT(that->m_isSpawned == FALSE, "m_isSpawned in bcREGISTER_LOOPER"); + BND_ASSERT(that->m_isLooping == FALSE, "m_isLooping in bcREGISTER_LOOPER"); + that->m_isSpawned = TRUE; + that->m_isLooping = TRUE; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_StartLooper(that->m_team, TRUE); + clear_bit(SPAWNING_BIT, &that->m_team->m_noop_spawner); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcENTER_LOOPER: { + DPRINTF(5, (KERN_WARNING "bcENTER_LOOPER for %p (%p:%d)\n", that, that->m_team, that->m_thid)); + /* This thread is going to loop, but it's not one of the + driver's own loopers. */ + // ASSERT(that->m_isLooping == FALSE); + that->m_isLooping = TRUE; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_StartLooper(that->m_team, FALSE); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcEXIT_LOOPER: { + /* End of a looper that is not the driver's own. */ + DBSPAWN((KERN_WARNING "*** THREAD %p:%d RECEIVED bcEXIT_LOOPER\n", that->m_team, that->m_thid)); + if (binder_proc_IsAlive(that->m_team)) { + // ASSERT(that->m_isLooping == TRUE); + that->m_isLooping = FALSE; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_FinishLooper(that->m_team, FALSE); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + } + iobuffer_mark_consumed(&io); + } break; +#if 0 + case bcCATCH_ROOT_OBJECTS: { + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + if (binder_proc_IsAlive(that->m_team)) { + binder_proc_StartCapturingRootObjects(that->m_team); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; +#endif + case bcSTOP_PROCESS: { + int now; + if (iobuffer_read_u32(&io, &target)) goto finished; + if (iobuffer_read_u32(&io, &now)) goto finished; + DBREFS((KERN_WARNING "bcSTOP_PROCESS of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_node_t *node = binder_proc_Descriptor2Node(that->m_team, target,that,WEAK); + if (node != NULL) { + binder_proc_t* proc = binder_node_AcquireHome(node, that); + if (proc != NULL) { + binder_proc_Stop(proc, now ? TRUE : FALSE); + BND_RELEASE(binder_proc, proc, STRONG, that); + } + BND_RELEASE(binder_node, node, WEAK,that); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcSTOP_SELF: { + DPRINTF(5, (KERN_WARNING "bcSTOP_SELF\n")); + int now; + if (iobuffer_read_u32(&io, &now)) goto finished; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_Stop(that->m_team, now ? TRUE : FALSE); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcREQUEST_DEATH_NOTIFICATION: { + void *cookie; + binder_node_t *node; + if (iobuffer_read_u32(&io, &target)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DPRINTF(5, (KERN_WARNING "bcREQUEST_DEATH_NOTIFICATION of %d w/cookie %p\n", target, cookie)); + node = binder_proc_Descriptor2Node(that->m_team, target, that, WEAK); + if(node != NULL) { + binder_proc_t* proc = binder_node_AcquireHome(node, node); + if (proc != NULL) { + binder_proc_RequestDeathNotification(proc, that->m_team, cookie); + BND_RELEASE(binder_proc, proc, STRONG, node); + } + BND_RELEASE(binder_node, node, WEAK, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcCLEAR_DEATH_NOTIFICATION: { + void *cookie; + binder_node_t *node; + if (iobuffer_read_u32(&io, &target)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DPRINTF(5, (KERN_WARNING "bcCLEAR_DEATH_NOTIFICATION of %d w/cookie %p\n", target, cookie)); + node = binder_proc_Descriptor2Node(that->m_team, target, that, WEAK); + if(node != NULL) { + binder_proc_t* proc = binder_node_AcquireHome(node, node); + if (proc != NULL) { + binder_proc_ClearDeathNotification(proc, that->m_team, cookie); + BND_RELEASE(binder_proc, proc, STRONG, node); + } + BND_RELEASE(binder_node, node, WEAK, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcDEAD_BINDER_DONE: { + void *cookie; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DPRINTF(5, (KERN_WARNING "bcDEAD_BINDER_DONE of cookie %p\n", cookie)); + binder_proc_DeadBinderDone(that->m_team, cookie); + iobuffer_mark_consumed(&io); + } break; + default: { + DPRINTF(5, (KERN_WARNING "Bad command %d on binder write().\n", cmd)); + } break; + } + } + +finished: + DPRINTF(5, (KERN_WARNING "binder_thread_Write() finished\n")); + *consumed = iobuffer_consumed(&io); + return 0; +} + +status_t +binder_thread_ReturnTransaction(binder_thread_t *that, iobuffer_t *io, binder_transaction_t *t) +{ + bool acquired; + bool freeImmediately; + binder_transaction_data_t tr; + DPRINTF(0, (KERN_WARNING "%s(%p:%d, %p, %p)\n", __func__, that->m_team, that->m_thid, io, t)); + if (iobuffer_remaining(io) < 18) return -ENOBUFS; + + acquired = BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that); + if (acquired) binder_transaction_ConvertFromNodes(t, that->m_team); + + freeImmediately = FALSE; + + if (binder_transaction_RefFlags(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_RefFlags()\n")); + switch (binder_transaction_RefFlags(t)) { + case tfAttemptAcquire: { + DPRINTF(5, (KERN_WARNING " --- tfAttemptAcquire\n")); + iobuffer_write_u32(io, brATTEMPT_ACQUIRE); + iobuffer_write_u32(io, binder_transaction_Priority(t)); + } break; + case tfRelease: + DPRINTF(5, (KERN_WARNING " --- tfRelease\n")); + iobuffer_write_u32(io, brRELEASE); + break; + case tfDecRefs: + DPRINTF(5, (KERN_WARNING " --- tfDecRefs\n")); + iobuffer_write_u32(io, brDECREFS); + break; + } + DPRINTF(5, (KERN_WARNING " --- writing data pointer %p\n", t->data_ptr)); + // iobuffer_write_void(io, *((void**)binder_transaction_Data(t))); + iobuffer_write_void(io, t->data_ptr); // binder object token + iobuffer_write_void(io, t->offsets_ptr); // binder object cookie + freeImmediately = binder_transaction_RefFlags(t) != tfAttemptAcquire; + // Take reference on team, so it won't go away until this transaction + // is processed. + if (binder_transaction_TakeTeam(t, that->m_team)) { + BND_LOCK(that->m_lock); + that->m_teamRefs++; + BND_UNLOCK(that->m_lock); + } + } else if (binder_transaction_IsAcquireReply(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_IsAcquireReply()\n")); + iobuffer_write_u32(io, brACQUIRE_RESULT); + // iobuffer_write_u32(io, *((int*)binder_transaction_Data(t))); + iobuffer_write_u32(io, *(u32*)t->data); + freeImmediately = TRUE; + } else if (binder_transaction_IsDeadReply(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_IsDeadReply()\n")); + if (that->pendingChild) binder_thread_ReleasePendingChild(that); + iobuffer_write_u32(io, brDEAD_REPLY); + freeImmediately = TRUE; + } else if (binder_transaction_IsFailedReply(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_IsFailedReply()\n")); + if (that->pendingChild) binder_thread_ReleasePendingChild(that); + iobuffer_write_u32(io, brFAILED_REPLY); + freeImmediately = TRUE; + } else { + DPRINTF(5, (KERN_WARNING " -- else binder_transaction_IsReply(%p): %s\n", t, binder_transaction_IsReply(t) ? "true" : "false")); + if (that->pendingChild) binder_thread_ReleasePendingChild(that); + tr.flags = binder_transaction_UserFlags(t); + tr.priority = binder_transaction_Priority(t); + if (acquired) { + tr.data_size = binder_transaction_DataSize(t); + tr.offsets_size = binder_transaction_OffsetsSize(t); + tr.data.ptr.buffer = binder_transaction_UserData(t); + tr.data.ptr.offsets = binder_transaction_UserOffsets(t); + } else { + tr.data_size = 0; + tr.offsets_size = 0; + tr.data.ptr.buffer = NULL; + tr.data.ptr.offsets = NULL; + } + + DPRINTF(4, ("Response %s %p: size=%p, data=%p, first=%p\n", + !binder_transaction_IsReply(t) == bcTRANSACTION ? "transaction" : "reply", t, + tr.data_size, tr.data.ptr.buffer, + tr.data_size > 0 ? (*(u32*)binder_transaction_Data(t)) : 0)); + + DPRINTF(5, (KERN_WARNING "%s(%p:%d, %p, %p) tr-data %p %d tr-offsets %p %d\n", __func__, that->m_team, that->m_thid, io, t, tr.data.ptr.buffer, tr.data_size, tr.data.ptr.offsets, tr.offsets_size)); + + if (binder_transaction_IsReply(t)) { + tr.target.ptr = NULL; + tr.code = 0; + iobuffer_write_u32(io, brREPLY); + } else { + if (t->target) { + tr.target.ptr = binder_node_Ptr(t->target); + tr.cookie = binder_node_Cookie(t->target); + } else { + tr.target.ptr = NULL; + tr.cookie = NULL; + } + tr.code = binder_transaction_Code(t); + iobuffer_write_u32(io, brTRANSACTION); + } + iobuffer_write_raw(io, &tr, sizeof(tr)); + } + + if (freeImmediately) { + DPRINTF(0, (KERN_WARNING "binder_thread_ReturnTransaction() delete %p\n",t)); + binder_transaction_Destroy(t); + } else { + t->receiver = that; + BND_ACQUIRE(binder_thread, that, WEAK, t); + if (t->sender) { + /* A synchronous transaction blocks this thread until + the receiver completes. */ + DPRINTF(0, (KERN_WARNING "binder_thread %p:%d (%d): enqueueing transaction %p, pending reply %p\n", that->m_team, that->m_thid, that->virtualThid, t, that->m_pendingReply)); + BND_ASSERT(!binder_transaction_IsFreePending(t), "transaction with free pending!"); + if (that->virtualThid) { + if (t->sender->virtualThid) { + BND_ASSERT(t->sender->virtualThid == that->virtualThid, "Bad virtualThid from sender!"); + } else { + BND_ASSERT(t->sender->m_thid == that->virtualThid, "My virtualThid is different than sender thid!"); + } + } + DPRINTF(5, (KERN_WARNING "t->sender->virtualThid: %d, that->virtualThid: %d\n", t->sender->virtualThid, that->virtualThid)); + if (t->sender->virtualThid) { + BND_ASSERT(that->virtualThid == 0 || that->virtualThid == t->sender->virtualThid, "virtualThid not cleared!"); + that->virtualThid = t->sender->virtualThid; + DPRINTF(0, (KERN_WARNING "Continuing virtualThid: %d\n", that->virtualThid)); + } else { + BND_ASSERT(that->virtualThid == 0 || that->virtualThid == t->sender->m_thid, "virtualThid not cleared!"); + that->virtualThid = t->sender->m_thid; + DPRINTF(0, (KERN_WARNING "Starting new virtualThid: %d\n", that->virtualThid)); + } + BND_LOCK(that->m_lock); + DPRINTF(5, (KERN_WARNING "%p:%d(%d) new reply: %p, pending reply: %p\n", that->m_team, that->m_thid, that->virtualThid, t, that->m_pendingReply)); + t->next = that->m_pendingReply; + that->m_pendingReply = t; + BND_UNLOCK(that->m_lock); + } else { + /* A reply transaction just waits until the receiver is done with + its data. */ + DPRINTF(0, (KERN_WARNING "binder_thread: return reply transaction %p\n", t)); + binder_proc_AddToNeedFreeList(that->m_team, t); + } + } + + iobuffer_mark_consumed(io); + + if (acquired) BND_RELEASE(binder_proc, that->m_team, STRONG, that); + + return 0; +} + +status_t +binder_thread_WaitForReply(binder_thread_t *that, iobuffer_t *io) +{ + status_t err; + binder_transaction_t *t = NULL; + if (iobuffer_remaining(io) < 18) return -ENOBUFS; + + if (!BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) return -ECONNREFUSED; + + if (that->m_isLooping) binder_proc_TakeMeOffYourList(that->m_team); + + // FIXME: implement reply timeouts? + err = wait_event_interruptible(that->m_wait, atomic_read(&that->m_wake_count) > 0); + if(err == 0) + atomic_dec(&that->m_wake_count); + DPRINTF(0, (KERN_WARNING "%p:%d down_interruptible() returned %08x\n", that->m_team, that->m_thid, err)); + + //DBTRANSACT((KERN_WARNING "*** Thread %d received direct %p! wait=%d, isAnyReply=%d\n", current->pid, that->m_reply, that->m_waitForReply, binder_transaction_IsAnyReply(that->m_reply))); + + /* FFB: why don't we check the err here, geh/hackbod? */ + if (that->m_isLooping) binder_proc_PutMeBackInTheGameCoach(that->m_team); + + BND_LOCK(that->m_lock); + if ((t = that->m_reply)) { + status_t result; + /* If this is a reply, handle it. When the binder_proc_t supplies + a reflection, it will take care of adjusting our thread + priority at that point. The user-space looper is responsible + for restoring its priority when done handling the reflect. */ + if (binder_transaction_IsAnyReply(t)) that->m_waitForReply--; + that->m_reply = t->next; + BND_UNLOCK(that->m_lock); + result = binder_thread_ReturnTransaction(that, io, t); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + return result; + } + BND_UNLOCK(that->m_lock); + + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + // We can get here if we need to spawn a looper. + // BND_VALIDATE(err != 0, "Binder replySem released without reply available", return -EINVAL); + return err; +} + +status_t +binder_thread_WaitForRequest(binder_thread_t *that, iobuffer_t *io) +{ + binder_transaction_t *t = NULL; + status_t err; + if (iobuffer_remaining(io) < 18) return -ENOBUFS; + + err = binder_proc_WaitForRequest(that->m_team, that, &t); + if (err == 0 && t != NULL) { + // ASSERT(t); + err = binder_thread_ReturnTransaction(that, io, t); + } + + return err; +} + +static status_t +binder_thread_WaitForParent(binder_thread_t *that) +{ + binder_thread_t *targetThread; + struct task_struct *parentTask; + pid_t childPid; + bigtime_t wakeupTime; + status_t err; + + DPRINTF(5, (KERN_WARNING "%s: on thread %p\n", __func__, that)); + + // We want to support wrappers, where the real child process + // being run may have some additional processes (such as xterms, + // gdb sessions, etc) between it and the parent that started it. + // In that case, the parent won't be talking directly with our + // thread structure but instead with its immediate child, so we + // need to go up and find it. + + targetThread = that; + if (that->m_pendingReply == NULL) { + DPRINTF(5, (KERN_WARNING "%s: PID %d: finding parent who forked us.\n", __func__, that->m_thid)); + // Parent hasn't set this thread up for a reply... figure out + // what is going on. + targetThread = NULL; + parentTask = current; + do { + childPid = parentTask->pid; + parentTask = parentTask->parent; + if (!parentTask) break; + targetThread = check_for_thread(parentTask->pid, FALSE); + DPRINTF(5, (KERN_WARNING "%s: Up to parent PID %d: targetThread=%p\n", __func__, parentTask->pid, targetThread)); + } while (targetThread == NULL); + + // If we found a thread structure, and it is not set up to + // send a root reply, then we hit the parent and it has not + // yet stopped to wait for the reply. So we'll go ahead and + // and create the child thread structure so we can block on + // it until the parent gets it set up. + DPRINTF(5, (KERN_WARNING "%s: Finished search: targetThread=%p, childPid=%d\n", __func__, targetThread, childPid)); + if (targetThread && !targetThread->m_pendingReplyIsRoot) { + targetThread = check_for_thread(childPid, TRUE); + DPRINTF(5, (KERN_WARNING "%s: Created wrapper process thread structure: %p\n", __func__, targetThread)); + } + } + + if (targetThread == NULL) { + printk(KERN_WARNING "%s: Binder: PID %d attempting to send root reply without waiting parent\n", __func__, that->m_thid); + return -EINVAL; + } + + // Now wait for the parent to be blocked waiting for a reply. + // Hard-coded to give the parent 10 seconds to get around to us. + wakeupTime = 10*HZ; + do_div(wakeupTime, TICK_NSEC); + wakeupTime += get_jiffies_64(); + DPRINTF(0, (KERN_WARNING "%s: Process %d is about to snooze on thread %p (%d)\n", __func__, current->pid, targetThread, targetThread->m_thid)); + err = binder_thread_Snooze(targetThread, wakeupTime); + + // Just one more thing to deal with -- if there is a wrapper process, + // then it is the wrapper that has been set up to reply. We need to + // move that state to our own process because we are the one doing + // the reply. + if (targetThread != that) { + binder_transaction_t* reply; + BND_LOCK(targetThread->m_lock); + DPRINTF(1, (KERN_WARNING "%s: Wrapper has pendingReply=%p, isRoot=%d\n", __func__, targetThread->m_pendingReply, targetThread->m_pendingReplyIsRoot)); + reply = targetThread->m_pendingReply; + if (reply) { + targetThread->m_pendingReply = reply->next; + targetThread->m_pendingReplyIsRoot = FALSE; + } + BND_UNLOCK(targetThread->m_lock); + + if (reply) { + BND_LOCK(that->m_lock); + reply->next = that->m_pendingReply; + that->m_pendingReply = reply; + that->m_pendingReplyIsRoot = TRUE; + BND_UNLOCK(that->m_lock); + } + + // The retrieval of the wrapper thread structure caused us + // to take a reference on it. Now release the reference, + // removing the structure from our thread list if appropriate. + forget_thread(targetThread); + } + + if (err != 0 && that->m_pendingReply) { + /* If an error occurred but the pendingReply has + also been given, then our semaphore has also been + released. We don't want to get out of sync. */ + DPRINTF(5, (KERN_WARNING "Thread %d: Re-acquire IO sem!\n", binder_thread_Thid(that))); + // Note: targetThread -is- the correct one to use here, that + // is the one we blocked on. + binder_thread_AcquireIOSem(targetThread); + } + + DPRINTF(0, (KERN_WARNING "%s: Returning: pendingReply=%p, err=%d\n", __func__, that->m_pendingReply, err)); + return that->m_pendingReply ? 0 : err; +} + +int +binder_thread_Read(binder_thread_t *that, void *buffer, int size, signed long *consumed) +{ + int origRemain; + status_t err = 0; + bool isRoot; + bool isInline; + /* ditch these next two lines under linux, if we can */ + pid_t me = current->pid; + + binder_transaction_t *t,*replyTo; + iobuffer_t io; + bool acquired = FALSE; + + if (me != that->m_thid) return -EINVAL; + + DPRINTF(0, (KERN_WARNING "binder_thread_Read: %08lx (%p:%d)\n", (unsigned long)that, that->m_team, that->m_thid)); + iobuffer_init(&io, (unsigned long)buffer, size, *consumed); + + /* + * Write brNOOP, but don't mark it consumed. We'll replace the brNOOP with + * a brSPAWN_LOOPER if we need to spawn a thread. + * Only do this once, in case the system call gets restarted for some reason. + */ + if (*consumed == 0) iobuffer_write_u32(&io, brNOOP); + + /* Read as much data as possible, until we either have to block + or have filled the buffer. */ + + while (iobuffer_remaining(&io) > 8) { + if (!binder_proc_IsAlive(that->m_team)) { + /* If the team is dead, write a command to say so and exit + right now. Do not pass go, do not collect $200. */ + DPRINTF(0, (KERN_WARNING " binder_proc_IsAlive(%08x): false\n", (unsigned int)that->m_team)); + iobuffer_write_u32(&io, brFINISHED); + iobuffer_mark_consumed(&io); + err = -ECONNREFUSED; + goto finished; + } else if (that->m_shortAttemptAcquire) { + /* Return the result of a short-circuited attempt acquire. */ + DPRINTF(0, (KERN_WARNING "Thread %d already has reply!\n", that->m_thid)); + that->m_shortAttemptAcquire = FALSE; + iobuffer_write_u32(&io, brACQUIRE_RESULT); + iobuffer_write_u32(&io, that->m_resultAttemptAcquire); + iobuffer_mark_consumed(&io); + continue; + } else if (that->m_failedRootReceive) { + // XXX Would be nice to return a little more informative + // error message. + that->m_failedRootReceive = FALSE; + iobuffer_write_u32(&io, brDEAD_REPLY); + goto finished; + } + + /* Look for a queued transaction. */ + BND_LOCK(that->m_lock); + if ((t=that->m_pendingRefResolution) != NULL) { + if (iobuffer_consumed(&io) > 0 && (binder_transaction_MaxIOToNodes(t)+4) > iobuffer_remaining(&io)) { + /* If there is already data in the buffer, and may not be enough + room for what this transaction could generate, then stop now. */ + DPRINTF(0, (KERN_WARNING "Aborting ConvertToNodes: consumed=%d, max=%d, remain=%d\n", iobuffer_consumed(&io), binder_transaction_MaxIOToNodes(t)+4, iobuffer_remaining(&io))); + BND_UNLOCK(that->m_lock); + goto finished; + } + that->m_pendingRefResolution = t->next; + } + BND_UNLOCK(that->m_lock); + + /* If a transaction was found, twiddle it and send it off. */ + if (t != NULL && (acquired || (acquired=BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)))) { + + DPRINTF(5, (KERN_WARNING "Thread %d has pending transaction %p\n", that->m_thid, t)); + + isRoot = (binder_transaction_IsRootObject(t)); + + /* Perform node conversion if not already done. */ + if (!binder_transaction_IsReferenced(t)) { + binder_proc_t *proc = NULL; + int acquiredProc = 0; + + DBREAD((KERN_WARNING "Thread %d performing ref resolution!\n", that->m_thid)); + origRemain = iobuffer_remaining(&io); + err = 0; + if (isRoot) { + // If we are replying with the root object, we first need to block + // until our parent has set us up to have somewhere to reply to. + err = binder_thread_WaitForParent(that); + + BND_LOCK(that->m_lock); + that->m_failedRootReply = that->m_pendingReply == NULL; + if (that->m_failedRootReply) err = -EINVAL; + BND_UNLOCK(that->m_lock); + } + /* + * The moment of truth. In order to convert nodes, we have to + * copy the data. In order to copy the data, we need to know + * the recipient of the transaction. If the transaction has a + * target, the target's team becomes the recipient. If the + * transaction carries a reply, use the pending reply's sending + * team. + */ + if (err == 0) { + if (t->target) { + proc = binder_node_AcquireHome(t->target, that); + acquiredProc = proc != NULL; + } else { + proc = that->m_pendingReply ? binder_thread_Team(that->m_pendingReply->sender) : NULL; + } + err = proc ? 0 : -EINVAL; + } + if (!proc) { + DPRINTF(0, (KERN_WARNING "*#*#*# NO TARGET PROCESS FOR binder_transaction_CopyTransactionData #*#*#*\n")); + DPRINTF(0, (KERN_WARNING "t->target: %p, that->m_pendingReply: %p, m_pendingReply->sender: %p\n", t->target, that->m_pendingReply, that->m_pendingReply ? that->m_pendingReply->sender : NULL)); + } + if (err == 0) + err = binder_transaction_CopyTransactionData(t, proc); + if (err == 0) + err = binder_transaction_ConvertToNodes(t, that->m_team, &io); + /* If we got some error, report error to the caller so they don't wait forever. */ + if (err < 0 && !binder_transaction_IsReply(t)) { + if(proc && binder_proc_IsAlive(proc)) + iobuffer_write_u32(&io, brFAILED_REPLY); + else + iobuffer_write_u32(&io, brDEAD_REPLY); + } + iobuffer_mark_consumed(&io); + + if (acquiredProc) { + BND_RELEASE(binder_proc, proc, STRONG, that); + } + + if (err < 0 || iobuffer_remaining(&io) < 4) { + /* XXX Fail if we run out of room. Do we need to deal with this + better. (It's only a problem if the caller is trying to read in + to a buffer that isn't big enough, in total, for a returned + transaction. */ + DPRINTF(0, (KERN_WARNING "Aborting transaction: err: %08x (or not enough room to return last command)\n", err)); + err = 0; + if(!binder_transaction_IsReply(t)) { + binder_transaction_Destroy(t); + goto finished; + } + binder_transaction_SetFailedReply(t, TRUE); + } + + /* If we aren't sending anything back to the caller, we can + deliver this transaction right away. Otherwise, we must + wait for the caller to process the returned data. This + is due to a race condition between the receiver releasing + its references and the caller acquiring any new references + returned by the driver. */ + if (origRemain != iobuffer_remaining(&io)) { + DBREAD((KERN_WARNING "Transaction acquired references! Keeping.\n")); + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + BND_UNLOCK(that->m_lock); + t = NULL; + } + } +#if 0 + // FFB's broken debug code + else { + DPRINTF(0, (KERN_WARNING "binder_transaction_IsReferenced(%p) true -- sender: %d (vthid: %d)\n", t, t->sender->m_thid, t->sender->virtualThid)); + } +#endif + + /* Send this transaction off to its target. */ + if (t != NULL) { + DBREAD((KERN_WARNING "Thread %d delivering transaction!\n", that->m_thid)); + isInline = binder_transaction_IsInline(t); + if (binder_transaction_IsAnyReply(t)) { + BND_LOCK(that->m_lock); + + replyTo = that->m_pendingReply; + if (replyTo) { + that->m_pendingReply = replyTo->next; + if (!that->m_pendingReply) { + that->virtualThid = 0; + DPRINTF(5, (KERN_WARNING "virtualThid reset to 0, m_waitForReply: %d\n", that->m_waitForReply)); + } else { + DPRINTF(5, (KERN_WARNING "virtualThid: %d, m_pendingReply: %p, m_waitForReply: %d\n", that->virtualThid, that->m_pendingReply, that->m_waitForReply)); + } + BND_UNLOCK(that->m_lock); + + /* If this is a successful bcATTEMPT_ACQUIRE, then take + care of reference counts now. + */ + if (binder_transaction_IsAcquireReply(t) && (*(int*)t->data != 0)) { + binder_proc_ForceRefNode(binder_thread_Team(replyTo->sender), replyTo->target, &io); + } + + if (binder_transaction_IsRootObject(replyTo)) { + BND_ASSERT(binder_transaction_IsRootObject(t), "EXPECTING ROOT REPLY!"); + } else if (binder_transaction_RefFlags(replyTo)&tfAttemptAcquire) { + BND_ASSERT(binder_transaction_IsAcquireReply(t), "EXPECTING ACQUIRE REPLY!"); + } else { + BND_ASSERT(!binder_transaction_IsRootObject(t) && !binder_transaction_IsAcquireReply(t), "EXPECTING REGULAR REPLY!"); + } + + DBTRANSACT((KERN_WARNING "*** Thread %d is replying to %p with %p! wait=%d\n", + that->m_thid, replyTo, t, that->m_waitForReply)); + binder_thread_Reply(replyTo->sender, t); + if (binder_transaction_IsInline(replyTo) || binder_transaction_IsRootObject(replyTo)) { + binder_transaction_Destroy(replyTo); + } else { + DPRINTF(0, (KERN_WARNING "binder_thread: finish reply request %p\n", replyTo)); + if (binder_transaction_IsFreePending(replyTo)) { + binder_transaction_Destroy(replyTo); + } else { + binder_proc_AddToNeedFreeList(that->m_team, replyTo); + } + } + } else { + BND_UNLOCK(that->m_lock); + DPRINTF(1, (KERN_WARNING "********** Nowhere for reply to go!!!!!!!!!!!\n")); +#if 0 + BND_ASSERT(binder_transaction_IsRootObject(t) || !binder_proc_IsAlive(that->m_team), "Unexpected reply!"); + if (binder_transaction_IsRootObject(t)) binder_proc_CaptureRootObject(t); + else { + binder_transaction_Destroy(t); + } +#endif + } + } else { + t->sender = that; + BND_ACQUIRE(binder_thread, that, WEAK, t); + that->m_waitForReply++; + DPRINTF(2, (KERN_WARNING "*** Thread %d going to wait for reply to %p! now wait=%d\n", that->m_thid, t, that->m_waitForReply)); + if (t->target) binder_node_Send(t->target, t); + else { + binder_thread_ReplyDead(that); + binder_transaction_Destroy(t); + } + } + if (!isInline) iobuffer_write_u32(&io, brTRANSACTION_COMPLETE); + iobuffer_mark_consumed(&io); + } + + /* Got a transaction but team is going away. Toss it. */ + } else if (t != NULL) { + DPRINTF(0, (KERN_WARNING "Transaction sent to dying team, thread %d.\n", that->m_thid)); + binder_transaction_DestroyNoRefs(t); + + /* If there is data available, return it now instead of + waiting for the next transaction. */ + } else if (iobuffer_consumed(&io) > 0) { + DPRINTF(2, (KERN_WARNING "Thread %d has %d bytes of data to return, won't wait for transaction.\n", that->m_thid, iobuffer_consumed(&io))); + goto finished; + + /* No transaction, but maybe we are waiting for a reply back? */ + } else if (that->m_waitForReply) { + DPRINTF(2, (KERN_WARNING "Thread %d waiting for reply!\n", that->m_thid)); + if ((sizeof(binder_transaction_data_t)+8) > iobuffer_remaining(&io)) { + /* If there isn't enough room in the buffer to return a transaction, + then stop now. */ + DPRINTF(0, (KERN_WARNING "Aborting read: Not enough room to return reply\n")); + goto finished; + } + err = binder_thread_WaitForReply(that, &io); + if (err == -ENOBUFS) err = 0; + goto finished; + + /* We're all out. Just wait for something else to do. */ + } else { + DPRINTF(2, (KERN_WARNING "Thread %d waiting for request, vthid: %d!\n", that->m_thid, that->virtualThid)); + BND_ASSERT(that->virtualThid == 0, "Waiting for transaction with vthid != 0"); + BND_ASSERT(that->m_pendingReply == NULL, "Waiting for transaction with pending reply"); + + if (that->m_teamRefs > 0) { + int relCount; + BND_LOCK(that->m_lock); + relCount = that->m_teamRefs; + that->m_teamRefs = 0; + BND_UNLOCK(that->m_lock); + DPRINTF(3, (KERN_WARNING "Unlocking proc %08x %d times\n", (unsigned int)that->m_team, relCount)); + + while (relCount) { + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + relCount--; + } + } + + err = binder_thread_WaitForRequest(that, &io); + if (err == -ERESTARTSYS) { + goto finished; + } else if (err == -EINTR) { + goto finished; + } else if (err == -ECONNREFUSED) { + goto finished; + } else if (err == -ENOBUFS) { + err = 0; + goto finished; + } else if (err == REQUEST_EVENT_READY) { + iobuffer_write_u32(&io, brEVENT_OCCURRED); + iobuffer_write_u32(&io, that->returnedEventPriority); + iobuffer_mark_consumed(&io); + err = 0; + } else if (err == DEATH_NOTIFICATION_READY) { + binder_proc_GetPendingDeathNotifications(that->m_team, that, &io); + iobuffer_mark_consumed(&io); + err = 0; + } else if (err == -ETIMEDOUT) { + if (that->m_isLooping) { + if ((acquired=BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that))) + binder_proc_FinishLooper(that->m_team, that->m_isSpawned); + that->m_isLooping = FALSE; + } + if (that->m_isSpawned) iobuffer_write_u32(&io, brFINISHED); + else iobuffer_write_u32(&io, brOK); + iobuffer_mark_consumed(&io); + err = 0; + } + /* + else if (err == B_BAD_SEM_ID) { + iobuffer_write_u32(&io, brFINISHED); + iobuffer_mark_consumed(&io); + } + */ + else if (err < 0) { + iobuffer_write_u32(&io, brERROR); + iobuffer_write_u32(&io, err); + iobuffer_mark_consumed(&io); + err = 0; + goto finished; + } + } + } + +finished: + if (acquired) BND_RELEASE(binder_proc, that->m_team, STRONG, that); + + // Return number of bytes available, or the last error code + // if there are none. (This is so we can return -EINTR.) + *consumed = iobuffer_consumed(&io); + + if (err != -ERESTARTSYS) { + if (test_and_clear_bit(DO_SPAWN_BIT, &that->m_team->m_noop_spawner)) { + DBSPAWN((KERN_WARNING "Asking %p:%d to brSPAWN_LOOPER\n", that->m_team, that->m_thid)); + // make the brNOOP into a brSPAWN_LOOPER + // *(u32*)buffer = brSPAWN_LOOPER; + // We call the unchecked __put_user() here because the constructor + // for iobuffer already called access_ok(). + __put_user(brSPAWN_LOOPER, (u32*)buffer); + if (iobuffer_consumed(&io) < sizeof(u32)) { + iobuffer_mark_consumed(&io); + *consumed = iobuffer_consumed(&io); + } + } + } + return err; +} + +status_t +binder_thread_Snooze(binder_thread_t *that, bigtime_t timeout) +{ + status_t res = 0; + + DPRINTF(1, (KERN_WARNING "binder_thread_Snooze(%d, %lld)\n", that->m_thid, timeout)); + /* + * I don't know if I got the semantics correct for this. + status_t res = acquire_sem_etc(that->m_ioSem,1,B_CAN_INTERRUPT|B_ABSOLUTE_TIMEOUT,timeout); + */ + + if(signal_pending(current)) { + DPRINTF(1, (KERN_WARNING "binder_thread_Snooze(%d, %lld) signal pending -- ABORT\n", that->m_thid, timeout)); + return -ERESTARTSYS; + } + + timeout -= get_jiffies_64(); + DPRINTF(1, (KERN_WARNING "binder_thread_Snooze(%d, relative %lld)\n", that->m_thid, timeout)); + if (timeout > 0) { +#if 1 + bigtime_t check = timeout; + do_div(check, HZ); + if (check > 10) { + DPRINTF(0, (KERN_WARNING "%s: timeout exceeds 10 seconds at %Ld sec\n", __func__, check)); + return -ETIMEDOUT; + } +#endif + DPRINTF(5, (KERN_WARNING "%s: m_wake_count: %d\n", __func__, atomic_read(&that->m_wake_count))); + res = wait_event_interruptible_timeout(that->m_wait, atomic_read(&that->m_wake_count) > 0, timeout); + if(res > 0) + atomic_dec(&that->m_wake_count); + } + else { + /* Makes system lock up due to busy wait + * bug temporary + * when not using unlocked ioctl + */ + static unsigned int last_yield = 0; + unsigned int now = jiffies; + if ((now - last_yield) > 5*HZ) { + last_yield = now; + //printk(KERN_WARNING "binder_thread_Snooze(%d, %lld) yield wakeup_time thread %lld, team %lld, this %p, team->waitStack %p, team->state %x\n", + // that->m_thid, timeout, that->wakeupTime, that->m_team->m_wakeupTime, that, that->m_team->m_waitStack, that->m_team->m_state); + yield(); + } + } + + //ddprintf("Result of snooze in thread %ld: 0x%08lx\n", that->m_thid, res); + if (res == 0) // timed out + res = -ETIMEDOUT; + else if (res > 0) // acquired, reports time remaining + res = 0; + return res; +} + +status_t +binder_thread_AcquireIOSem(binder_thread_t *that) +{ + int err; + DPRINTF(0, (KERN_WARNING "binder_thread_AcquireIOSem(%d)\n", that->m_thid)); + // while (acquire_sem_etc(that->m_ioSem,1,B_TIMEOUT,0) == -EINTR) ; + //wait_event(that->m_wait, that->m_wake_count > 0); + err = wait_event_interruptible(that->m_wait, atomic_read(&that->m_wake_count) > 0); // this should probably not be interruptible, but it allows us to kill the thread + if(err == 0) + atomic_dec(&that->m_wake_count); + return err; +} + +void +binder_thread_Wakeup(binder_thread_t *that) +{ + DIPRINTF(0, (KERN_WARNING "binder_thread_Wakeup(%d)\n", that->m_thid)); + // We use B_DO_NOT_RESCHEDULE here because Wakeup() is usually called + // while the binder_proc_t is locked. If the thread is a real-time + // priority, waking up here will cause pinging between this thread + // and its caller. (We wake up, block on the binder_proc_t, the caller + // continues and unlocks, then we continue.) + // release_sem_etc(that->m_ioSem, 1, B_DO_NOT_RESCHEDULE); + // FIXME: this may not have the do-not-reschedule semantics we want (wake_up_interruptible_sync may work for this) + atomic_add(1, &that->m_wake_count); + wake_up(&that->m_wait); + //wake_up_interruptible_sync(&that->m_wait); +} + +void +binder_thread_Reply(binder_thread_t *that, binder_transaction_t *t) +{ + DBTRANSACT((KERN_WARNING "*** Thread %d (vthid %d) sending to %d (vthid %d)! wait=%d, isReply=%d, isAcquireReply=%d\n", + current->pid, t->sender ? t->sender->virtualThid : -1, + that->m_thid, that->virtualThid, that->m_waitForReply, binder_transaction_IsReply(t), binder_transaction_IsAcquireReply(t))); + BND_LOCK(that->m_lock); + if (that->m_team && binder_proc_IsAlive(that->m_team)) { + // BND_VALIDATE(that->m_reply == NULL, "Already have reply!", ddprintf("Current reply: %p, new reply: %p\n", that->m_reply, t)); + BND_ASSERT(that->m_waitForReply > 0, "Not waiting for a reply!"); + t->next = that->m_reply; + that->m_reply = t; + } else { + BND_ASSERT(t != NULL, "binder_thread_Reply() called with NULL transaction!"); + if (t) binder_transaction_Destroy(t); + } + BND_UNLOCK(that->m_lock); + atomic_add(1, &that->m_wake_count); + wake_up(&that->m_wait); +} + +void +binder_thread_ReplyDead(binder_thread_t *that) +{ + binder_transaction_t* t = binder_transaction_CreateEmpty(); + binder_transaction_SetDeadReply(t, TRUE); + binder_thread_Reply(that, t); +} + +BND_IMPLEMENT_ACQUIRE_RELEASE(binder_thread); +BND_IMPLEMENT_ATTEMPT_ACQUIRE(binder_thread); + + diff -urN linux-2.6.22.5/drivers/binder/binder_thread.h linux-2.6.22.5-android/drivers/binder/binder_thread.h --- linux-2.6.22.5/drivers/binder/binder_thread.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_thread.h 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,153 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_THREAD_H +#define BINDER_THREAD_H + +#include "binder_defs.h" +#include + +typedef struct binder_thread { + /* These are protected by binder.c's global lock. */ + struct hlist_node node; + bool attachedToThread; /* Expecting a BINDER_THREAD_EXIT. */ + + /* These are managed by binder_proc_t. Nothing else should + touch them. */ + struct binder_thread * next; /* List of all threads */ + struct list_head waitStackEntry; + struct binder_thread * pendingChild; /* Child for bcREQUEST_ROOT_OBJECT */ + struct binder_transaction * nextRequest; /* Return request to waiting thread */ + enum { + WAKE_REASON_NONE = 0, + WAKE_REASON_IDLE, + WAKE_REASON_PROCESS_DEATH + } wakeReason; + + /* Stupid hack. */ + int returnedEventPriority; + + pid_t virtualThid; /* The thid for the transaction thread group */ + atomic_t m_primaryRefs; + atomic_t m_secondaryRefs; + status_t m_err; + pid_t m_thid; + wait_queue_head_t m_wait; + atomic_t m_wake_count; + int m_waitForReply; + int m_consume; + + struct semaphore m_lock; + struct binder_proc * m_team; // the team we belong to + struct binder_transaction * m_reply; + struct binder_transaction * m_pendingReply; + struct binder_transaction * m_pendingRefResolution; + + /* This is the number of primary references on our team + that must be removed when we continue looping. It is + used to keep the team around while processing final + brRELEASE and brDECREFS commands on objects inside it. */ + int m_teamRefs; + + /* Did the driver spawn this thread? */ + bool m_isSpawned : 1; + + /* Is this thread running as a looper? */ + bool m_isLooping : 1; + + /* For driver spawned threads: first time looping? */ + bool m_firstLoop : 1; + + /* Set if thread has determined an immediate reply for a + bcATTEMPT_ACQUIRE. In this case, 'short' is true and + 'result' is whether it succeeded. */ + bool m_shortAttemptAcquire : 1; + bool m_resultAttemptAcquire : 1; + + /* Set if this thread structure has been initialized to + reply with a root object to its parent thread. */ + bool m_pendingReplyIsRoot : 1; + + /*! Set if this thread had an error when trying to + receive a child's root reply, to return the result + at the next Read(). */ + bool m_failedRootReceive : 1; + + /* Set if this thread tried to send a root object, but + timed out. */ + bool m_failedRootReply : 1; +} binder_thread_t; + +int binder_thread_GlobalCount(void); + +binder_thread_t * binder_thread_init(pid_t thid, struct binder_proc *team); +void binder_thread_destroy(binder_thread_t *that); + +void binder_thread_Released(binder_thread_t *that); + +void binder_thread_Die(binder_thread_t *that); + +BND_DECLARE_ACQUIRE_RELEASE(binder_thread); +BND_DECLARE_ATTEMPT_ACQUIRE(binder_thread); + +/* Attach parent thread to this thread. The child is set up as if it had + received a transaction, and the first thing it should do is send a reply + that will go back to the parent. This is for bcRETRIEVE_ROOT_OBJECT. */ +bool binder_thread_SetParentThread(binder_thread_t *that, binder_thread_t *replyTo); + +/* Clear the pendingChild field when we have received the reply. */ +void binder_thread_ReleasePendingChild(binder_thread_t *that); + +/* When binder_thread_SetParentThread() is used to wait for the child thread + to send its root object, we can create a binder_thread structure that is + not attached to a binder_proc. This function is called when the child + thread finally gets into the driver, to get its pre-created thread + structure attached to its new process structure. */ +void binder_thread_AttachProcess(binder_thread_t *that, struct binder_proc *team); + +/* Calls from binder_proc_t to block until new requests arrive */ +status_t binder_thread_Snooze(binder_thread_t *that, bigtime_t wakeupTime); +status_t binder_thread_AcquireIOSem(binder_thread_t *that); +void binder_thread_Wakeup(binder_thread_t *that); + +/* Returning transactions -- reflections and the final reply */ +void binder_thread_Reply(binder_thread_t *that, struct binder_transaction *t); +void binder_thread_Reflect(binder_thread_t *that, struct binder_transaction *t); + +/* Reply that the target is no longer with us. */ +void binder_thread_ReplyDead(binder_thread_t *that); + +bool binder_thread_AttemptExecution(binder_thread_t *that, struct binder_transaction *t); +void binder_thread_FinishAsync(binder_thread_t *that, struct binder_transaction *t); +void binder_thread_Sync(binder_thread_t *that); + +#define binder_thread_Thid(that) ((that)->m_thid) +#define binder_thread_Team(that) ((that)->m_team) + +#define binder_thread_VirtualThid(that) ((that)->virtualThid) + +#define binder_thread_PrimaryRefCount(that) atomic_read(&(that)->m_primaryRefs) +#define binder_thread_SecondaryRefCount(that) atomic_read(&(that)->m_secondaryRefs) + +int binder_thread_Control(binder_thread_t *that, unsigned int cmd, void *buffer); +int binder_thread_Write(binder_thread_t *that, void *buffer, int size, signed long *consumed); +int binder_thread_Read(binder_thread_t *that, void *buffer, int size, signed long *consumed); + +#define binder_thread_Reflect(that, t) binder_thread_Reply(that, t) + +#endif // BINDER_THREAD_H diff -urN linux-2.6.22.5/drivers/binder/binder_transaction.c linux-2.6.22.5-android/drivers/binder/binder_transaction.c --- linux-2.6.22.5/drivers/binder/binder_transaction.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_transaction.c 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,541 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "binder_defs.h" +#include "binder_transaction.h" +#include "binder_proc.h" +#include "binder_thread.h" +#include "binder_node.h" +#include +#include +#include + +static int binder_transaction_print_size = 32 * 1024; +static int binder_transaction_fail_size = 16 * 1024 * 1024; +module_param_named(warn_transaction_size, binder_transaction_print_size, int, 0644); +module_param_named(max_transaction_size, binder_transaction_fail_size, int, 0644); + +#define PURGATORY 0 +#if PURGATORY +static DECLARE_MUTEX(sem); +static binder_transaction_t* head = NULL; +static binder_transaction_t** tail = &head; +static int count = 0; + +static void my_free_trans(binder_transaction_t *t) +{ + down(&sem); + *tail = t; + tail = &t->next; + count++; + if (count > 20) { + t = head; + head = head->next; + kmem_cache_free(transaction_cache, t); + count--; + } + up(&sem); +} +#define ALLOC_TRANS kmem_cache_alloc(transaction_cache, GFP_KERNEL) +#define FREE_TRANS(x) my_free_trans(x) +#else +#define ALLOC_TRANS kmem_cache_alloc(transaction_cache, GFP_KERNEL) +#define FREE_TRANS(x) kmem_cache_free(transaction_cache, x) +#endif + +void binder_transaction_dtor(binder_transaction_t *that); + +void binder_transaction_Init(binder_transaction_t *that); +void binder_transaction_debug_dump(binder_transaction_t *that); + +status_t +binder_transaction_ConvertToNodes(binder_transaction_t *that, binder_proc_t *from, iobuffer_t *io) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %p)\n", __func__, that, from, io)); + if (binder_transaction_RefFlags(that)) return 0; + + if (that->team != from) { + BND_ACQUIRE(binder_proc, from, WEAK, that); + if (that->team) BND_RELEASE(binder_proc, that->team, WEAK, that); + that->team = from; + } + + if (that->offsets_size > 0) { + u8 *ptr = binder_transaction_Data(that); + const size_t *off = binder_transaction_Offsets(that); //(const size_t*)(ptr + INT_ALIGN(that->data_size)); + const size_t *offEnd = off + (that->offsets_size/sizeof(size_t)); + struct flat_binder_object* flat; + + // This function is called before any references have been acquired. + BND_ASSERT((that->flags&tfReferenced) == 0, "ConvertToNodes() already called!"); + that->flags |= tfReferenced; + + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserOffsets(that) + + binder_transaction_OffsetsSize(that) ); + + while (off < offEnd) { + bool strong = TRUE; + BND_ASSERT(*off <= (that->data_size-sizeof(struct flat_binder_object)), "!!! ConvertToNodes: type code pointer out of range."); + flat = (struct flat_binder_object*)(ptr + *off++); + switch (flat->type) { + case kPackedLargeBinderHandleType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_HANDLE_TYPE %ld\n",flat->handle)); + // Retrieve node and acquire reference. + flat->node = binder_proc_Descriptor2Node(from, flat->handle,that, STRONG); + break; + case kPackedLargeBinderType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_TYPE %p\n",flat->binder)); + // Lookup node and acquire reference. + if (binder_proc_Ptr2Node(from, flat->binder,flat->cookie,&flat->node,io,that, STRONG) != 0) return -EINVAL; + if (binder_transaction_IsRootObject(that)) { + DPRINTF(5,(KERN_WARNING "Making node %p a root node\n", flat->node)); + binder_proc_SetRootObject(from, flat->node); + } + break; + case kPackedLargeBinderWeakHandleType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_WEAK_HANDLE_TYPE %ld\n",flat->handle)); + // Retrieve node and acquire reference. + flat->node = binder_proc_Descriptor2Node(from, flat->handle,that,WEAK); + strong = FALSE; + break; + case kPackedLargeBinderWeakType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_WEAK_TYPE %p\n",flat->binder)); + // Lookup node and acquire reference. + if (binder_proc_Ptr2Node(from, flat->binder,flat->cookie,&flat->node,io,that,WEAK) != 0) return -EINVAL; + strong = FALSE; + break; + default: + BND_ASSERT(FALSE, "Bad binder offset given to transaction!"); + DPRINTF(0, (KERN_WARNING "ConvertToNodes: unknown typecode %08lx, off: %p, offEnd: %p\n", flat->type, off, offEnd)); + BND_FLUSH_CACHE(ptr, offEnd); + return -EINVAL; + } + flat->type = strong ? kPackedLargeBinderNodeType : kPackedLargeBinderWeakNodeType; + } + BND_FLUSH_CACHE(ptr, offEnd); + } + + return 0; +} + +status_t +binder_transaction_ConvertFromNodes(binder_transaction_t *that, binder_proc_t *to) +{ + u8 *ptr; + size_t *off; + size_t *offEnd; + DPRINTF(4, (KERN_WARNING "%s(%p, %p)\n", __func__, that, to)); + if (binder_transaction_RefFlags(that)) return 0; + + if (that->team != to) { + BND_ACQUIRE(binder_proc, to, WEAK, that); + if (that->team) BND_RELEASE(binder_proc, that->team, WEAK, that); + that->team = to; + } + + if (that->offsets_size > 0) { + // This function is called after references have been acquired. + BND_ASSERT((that->flags&tfReferenced) != 0, "ConvertToNodes() not called!"); + + ptr = binder_transaction_Data(that); + off = binder_transaction_Offsets(that); //(const size_t*)(ptr + INT_ALIGN(that->data_size)); + offEnd = off + (that->offsets_size/sizeof(size_t)); + struct flat_binder_object* flat; + + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserOffsets(that) + + binder_transaction_OffsetsSize(that) ); + while (off < offEnd) { + flat = (struct flat_binder_object*)(ptr + *off++); + binder_node_t *n = flat->node; + if (flat->type == kPackedLargeBinderNodeType) { + if (!n) { + flat->type = kPackedLargeBinderType; + flat->binder = NULL; + flat->cookie = NULL; + } else if (n->m_home == to) { + flat->type = kPackedLargeBinderType; + flat->binder = binder_node_Ptr(n); + flat->cookie = binder_node_Cookie(n); + // Keep a reference on the node so that it doesn't + // go away until this transaction completes. + } else { + flat->type = kPackedLargeBinderHandleType; + flat->handle = binder_proc_Node2Descriptor(to, n, TRUE, STRONG); + flat->cookie = NULL; + // We now have a reference on the node through the + // target team's descriptor, so remove our own ref. + BND_RELEASE(binder_node, n, STRONG, that); + } + } else if (flat->type == kPackedLargeBinderWeakNodeType) { + if (!n) { + flat->type = kPackedLargeBinderWeakType; + flat->binder = NULL; + flat->cookie = NULL; + } else if (n->m_home == to) { + flat->type = kPackedLargeBinderWeakType; + flat->binder = binder_node_Ptr(n); + flat->cookie = binder_node_Cookie(n); + // Keep a reference on the node so that it doesn't + // go away until this transaction completes. + } else { + flat->type = kPackedLargeBinderWeakHandleType; + flat->handle = binder_proc_Node2Descriptor(to, n, TRUE, WEAK); + flat->cookie = NULL; + // We now have a reference on the node through the + // target team's descriptor, so remove our own ref. + BND_RELEASE(binder_node, n, WEAK, that); + } + } else { + BND_ASSERT(FALSE, "Bad binder offset given to transaction!"); + DPRINTF(0, (KERN_WARNING "ConvertToNodes: unknown typecode %08lx, off: %p, offEnd: %p\n", flat->type, off, offEnd)); + BND_FLUSH_CACHE(ptr, offEnd); + return -EINVAL; + } + } + BND_FLUSH_CACHE(ptr, offEnd); + } + + return 0; +} + +void +binder_transaction_ReleaseTarget(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that->sender) { + DPRINTF(5, (KERN_WARNING "%s(%p) release sender %p\n", __func__, that, that->sender)); + BND_RELEASE(binder_thread, that->sender, WEAK, that); + that->sender = NULL; + } + if (that->receiver) { + DPRINTF(5, (KERN_WARNING "%s(%p) release receiver %p\n", __func__, that, that->receiver)); + BND_RELEASE(binder_thread, that->receiver, WEAK, that); + that->receiver = NULL; + } + + if (that->target) { + DPRINTF(5, (KERN_WARNING "%s(%p) release target %p\n", __func__, that, that->target)); + BND_RELEASE(binder_node, that->target, binder_transaction_RefFlags(that) == tfAttemptAcquire ? WEAK : STRONG,that); + that->target = NULL; + } + DPRINTF(4, (KERN_WARNING "%s(%p) fini\n", __func__, that)); +} + +void +binder_transaction_ReleaseTeam(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p), team: %p\n", __func__, that, that->team)); + + if (that->team) { + BND_RELEASE(binder_proc, that->team, binder_transaction_RefFlags(that) ? STRONG : WEAK, that); + that->team = NULL; + } +} + +size_t +binder_transaction_MaxIOToNodes(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p): %d\n", __func__, that, (that->offsets_size/8)*16)); + // Each offsets entry is 4 bytes, and could result in 24 bytes + // being written. (To be more accurate, we could actually look + // at the offsets and only include the ones that are a + // B_BINDER_TYPE or B_BINDER_WEAK_TYPE.) + return (that->offsets_size/4)*24; +} + +binder_proc_t * +binder_transaction_TakeTeam(binder_transaction_t *that, binder_proc_t * me) +{ + binder_proc_t *ret; + DPRINTF(4, (KERN_WARNING "%s(%p, %p)\n", __func__, that, me)); + if (that->team != me || binder_transaction_RefFlags(that)) return NULL; + + ret = that->team; + that->team = NULL; + return ret; +} + +binder_transaction_t* +binder_transaction_CreateRef(u16 refFlags, void *ptr, void *cookie, binder_proc_t *team) +{ + binder_transaction_t* that = ALLOC_TRANS; + DPRINTF(4, (KERN_WARNING "%s(%04x, %p, %p): %p\n", __func__, refFlags, ptr, team, that)); + if (that) { + binder_transaction_Init(that); + BND_ASSERT((refFlags&(~tfRefTransaction)) == 0 && (refFlags&tfRefTransaction) != 0, + "Bad flags to binder_transaction::create_ref()"); + that->flags |= refFlags; + that->data_ptr = ptr; + that->offsets_ptr = cookie; + if (team) { + that->team = team; + BND_ACQUIRE(binder_proc, that->team, STRONG, that); + } + } + return that; +} + +binder_transaction_t* +binder_transaction_Create(u32 _code, size_t _dataSize, const void *_data, size_t _offsetsSize, const void *_offsetsData) +{ + binder_transaction_t* that = ALLOC_TRANS; + DPRINTF(4, (KERN_WARNING "%s(%08x, %u:%p, %u:%p): %p\n", __func__, _code, _dataSize, _data, _offsetsSize, _offsetsData, that)); + if (that) { + binder_transaction_Init(that); + that->code = _code; + BND_ASSERT(_dataSize == 0 || _data != NULL, "Transaction with dataSize > 0, but NULL data!"); + if (_dataSize && _data) { + that->data_size = _dataSize; + that->data_ptr = _data; + BND_ASSERT(_offsetsSize == 0 || _offsetsData != NULL, "Transaction with offsetsSize > 0, but NULL offsets!"); + if (_offsetsSize && _offsetsData) { + that->offsets_size = _offsetsSize; + that->offsets_ptr = _offsetsData; + } + } + } + return that; +} + +binder_transaction_t* binder_transaction_CreateEmpty(void) +{ + binder_transaction_t* that = ALLOC_TRANS; + DPRINTF(4, (KERN_WARNING "%s(void): %p\n", __func__, that)); + if (that) binder_transaction_Init(that); + return that; +} + +void binder_transaction_Destroy(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that) { + binder_transaction_dtor(that); + } +} + +void binder_transaction_DestroyNoRefs(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that) { + that->offsets_size = 0; + binder_transaction_dtor(that); + } +} + +void binder_transaction_Init(binder_transaction_t *that) +{ + that->next = NULL; + that->target = NULL; + that->sender = NULL; + that->receiver = NULL; + + that->code = 0; + that->team = NULL; + that->flags = 0; + that->priority = B_NORMAL_PRIORITY; // FIXME? + that->data_size = 0; + that->offsets_size = 0; + that->data_ptr = NULL; + that->offsets_ptr = NULL; + + that->map = NULL; +} + +void +binder_transaction_dtor(binder_transaction_t *that) +{ + binder_proc_t *owner = NULL; + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that->offsets_size > 0) { + DPRINTF(5, (KERN_WARNING " -- have binders to clean up\n")); + if(that->flags & tfReferenced) { + BND_ASSERT((that->map) != NULL, "binder_transaction_dtor that->map == NULL"); + } + else { + DPRINTF(0, (KERN_WARNING "ConvertToNodes() not called on %p! that->map == %p\n", that, that->map)); + BND_ASSERT((that->map) == NULL, "binder_transaction_dtor ConvertToNodes() not called and that->map != NULL"); + } + if (that->team && BND_ATTEMPT_ACQUIRE(binder_proc, that->team, STRONG, that)) owner = that->team; + + DPRINTF(5, (KERN_WARNING " -- that->map == %p\n", that->map)); + if(that->map != NULL) { // avoid crash due to corrupt transaction + u8 *ptr = 0; + const size_t *off; + const size_t *offEnd; + struct flat_binder_object* flat; + + ptr = binder_transaction_Data(that); + off = (const size_t*)binder_transaction_Offsets(that); + offEnd = off + (that->offsets_size/sizeof(size_t)); + + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserOffsets(that) + + binder_transaction_OffsetsSize(that) ); + while (off < offEnd) { + DPRINTF(9, (KERN_WARNING "type ptr: %p\n", ptr+*off)); + flat = (struct flat_binder_object*)(ptr + *off++); + DPRINTF(9, (KERN_WARNING " type: %08lx\n", flat->type)); + switch (flat->type) { + case kPackedLargeBinderHandleType: + DPRINTF(9, (KERN_WARNING "Delete binder_transaction B_BINDER_HANDLE_TYPE %ld\n",flat->handle)); + // Only call if there are primary references on the team. + // Otherwise, it has already removed all of its handles. + if (owner) binder_proc_UnrefDescriptor(owner, flat->handle, STRONG); + break; + case kPackedLargeBinderType: + // Only do this if there are primary references on the team. + // The team doesn't go away until all published binders are + // removed; after that, there are no references to remove. + if (owner) { + binder_node_t *n; + if (binder_proc_Ptr2Node(owner, flat->binder,flat->cookie,&n,NULL,that, STRONG) == 0) { + if (n) { + BND_RELEASE(binder_node, n, STRONG,that); // once for the grab we just did + BND_RELEASE(binder_node, n, STRONG,that); // and once for the reference this transaction holds + } + } else { + BND_ASSERT(FALSE, "Can't find node!"); + } + } + break; + case kPackedLargeBinderNodeType: + if (flat->node) BND_RELEASE(binder_node, flat->node, STRONG,that); + break; + case kPackedLargeBinderWeakHandleType: + DPRINTF(9, (KERN_WARNING "Delete binder_transaction B_BINDER_HANDLE_TYPE %ld\n",flat->handle)); + // Only call if there are primary references on the team. + // Otherwise, it has already removed all of its handles. + if (owner) binder_proc_UnrefDescriptor(owner, flat->handle, WEAK); + break; + case kPackedLargeBinderWeakType: + // Only do this if there are primary references on the team. + // The team doesn't go away until all published binders are + // removed; after that, there are no references to remove. + if (owner) { + binder_node_t *n; + if (binder_proc_Ptr2Node(owner, flat->binder,flat->cookie,&n,NULL,that,WEAK) == 0) { + if (n) { + BND_RELEASE(binder_node, n, WEAK,that); // once for the grab we just did + BND_RELEASE(binder_node, n, WEAK,that); // and once for the reference this transaction holds + } + } else { + BND_ASSERT(FALSE, "Can't find node!"); + } + } + break; + case kPackedLargeBinderWeakNodeType: + if (flat->node) BND_RELEASE(binder_node, flat->node, WEAK,that); + break; + } + } + BND_FLUSH_CACHE(ptr, offEnd); + } + } + + // release the RAM and address space in the receiver. + if (that->map) { + binder_proc_t* mapProc = that->map->team; + if (mapProc) { + binder_proc_FreeTransactionBuffer(mapProc, that->map); + BND_RELEASE(binder_proc, mapProc, WEAK, that); + } + else printk(KERN_WARNING "%s(%p) -- no team trying to release map %p\n", __func__, that, that->map); + } + + if (owner) BND_RELEASE(binder_proc, owner, STRONG,that); + + binder_transaction_ReleaseTeam(that); + binder_transaction_ReleaseTarget(that); + + // release the RAM + FREE_TRANS(that); +} + +/* We need the recipient team passed in because we can't always know the + * receiver at this point. */ +status_t +binder_transaction_CopyTransactionData(binder_transaction_t *that, binder_proc_t *recipient) +{ + status_t result = -EINVAL; + size_t tSize = INT_ALIGN(that->data_size) + INT_ALIGN(that->offsets_size); + DPRINTF(0, (KERN_WARNING "%s(%p, %p)\n", __func__, that, recipient)); + // Do we need to ensure that->map contains NULL? What do we do if it doesn't? + if(tSize >= binder_transaction_print_size) { + printk(KERN_WARNING "%s-%d: binder_transaction_CopyTransactionData size %d (%d,%d) to %p, reply=%d\n", + current->comm, current->pid, tSize, that->data_size, that->offsets_size, recipient, binder_transaction_IsReply(that)); + } + if (binder_transaction_IsAcquireReply(that)) { + // No data to copy + result = 0; + } else { + // if (tSize >= sizeof(that->data)) { + if(tSize >= binder_transaction_fail_size) { + printk(KERN_ERR "%s-%d: binder_transaction_CopyTransactionData transaction size too big, size %d (%d,%d) to %p\n", + current->comm, current->pid, tSize, that->data_size, that->offsets_size, recipient); + return result; + } + that->map = binder_proc_AllocateTransactionBuffer(recipient, tSize); + if (that->map) { + BND_ACQUIRE(binder_proc, that->map->team, WEAK, that); + + // locate our kernel-space address + u8 *to = page_address(that->map->page); + size_t not_copied; + // copy the data from user-land + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserData(that) + tSize ); + not_copied = copy_from_user(to, that->data_ptr, that->data_size); + // and the offsets, too + if ((not_copied == 0) && (that->offsets_size != 0)) { + to += INT_ALIGN(that->data_size); + not_copied = copy_from_user(to, that->offsets_ptr, that->offsets_size); + if (not_copied) { + DPRINTF(0, (KERN_WARNING " -- failed to copy %u of %u bytes of offsets from %p to %p\n", not_copied, that->offsets_size, that->offsets_ptr, to)); + } + } else if (not_copied) { + // BUSTED! + DPRINTF(0, (KERN_WARNING " -- Couldn't copy %u of %u bytes from user-land %p to %p\n", not_copied, that->data_size, that->data_ptr, to)); + } + DPRINTF(4, ("Copied transaction %p: data=%p, size=%p, not_copied=%p\n", + that, binder_transaction_Data(that), + binder_transaction_DataSize(that), + not_copied)); + if (binder_transaction_DataSize(that) > 0) { + DPRINTF(4, ("Copied transaction %p: my_first=%p, user_first=%p\n", + that, + (*(u32*)binder_transaction_Data(that)), + (*(u32*)that->data_ptr))); + } + BND_FLUSH_CACHE( binder_transaction_Data(that), + binder_transaction_Data(that) + tSize ); + result = not_copied ? -EFAULT : 0; + } + else { + DPRINTF(0, (KERN_WARNING "binder_transaction_CopyTransactionData() failed to allocate transaction buffer in %p\n", recipient)); + } + // } else { + // // ignore inlined data for now + // printk(KERN_WARNING "Small transaction in binder_transaction_CopyTransactionData\n"); + // binder_transaction_SetInline(that, TRUE); + // } + } + return result; +} + diff -urN linux-2.6.22.5/drivers/binder/binder_transaction.h linux-2.6.22.5-android/drivers/binder/binder_transaction.h --- linux-2.6.22.5/drivers/binder/binder_transaction.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/binder_transaction.h 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,127 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_TRANSACTION_H +#define BINDER_TRANSACTION_H + +#include "binder_defs.h" +#include "iobuffer.h" +#include // for page_address() + +enum { + tfUserFlags = 0x000F, + + tfIsReply = 0x0100, + tfIsEvent = 0x0200, + tfIsAcquireReply = 0x0400, + tfIsDeadReply = 0x0800, + tfIsFailedReply = 0x0020, + tfIsFreePending = 0x0040, + + tfAttemptAcquire = 0x1000, + tfRelease = 0x2000, + tfDecRefs = 0x3000, + tfRefTransaction = 0xF000, + + tfReferenced = 0x0080 +}; + +typedef struct binder_transaction { + struct binder_transaction * next; // next in the transaction queue + struct binder_node * target; // the receiving binder + struct binder_thread * sender; // the sending thread + struct binder_thread * receiver; // the receiving thread + + u32 code; + struct binder_proc * team; // do we need this? Won't sender or receiver's m_team do? + u16 flags; + s16 priority; + size_t data_size; + size_t offsets_size; + const void * data_ptr; + const void * offsets_ptr; + + // The pointer to the actual transaction data. The binder offsets appear + // at (mapped address + data_size). + struct range_map * map; + // 12 bytes of inlined transaction data: just enough for one binder (type, ptr/descriptor, offset) + u8 data[12]; +} binder_transaction_t; + +binder_transaction_t* binder_transaction_CreateRef(u16 refFlags, void *ptr, void *cookie, struct binder_proc* team /* = NULL */); +binder_transaction_t* binder_transaction_Create(u32 code, size_t dataSize, const void *data, size_t offsetsSize /* = 0 */, const void *offsetsData /* = NULL */); +binder_transaction_t* binder_transaction_CreateEmpty(void); +void binder_transaction_Destroy(binder_transaction_t *that); +/* Call this to destroy a transaction before you have called + ConvertToNodes() on it. This will avoid releasing references + on any nodes in the transaction, which you haven't yet acquired. */ +void binder_transaction_DestroyNoRefs(binder_transaction_t *that); +/* Converts from user-types to kernel-nodes */ +status_t binder_transaction_ConvertToNodes(binder_transaction_t *that, struct binder_proc *from, iobuffer_t *io); +/* Converts from kernel-nodes to user-types */ +status_t binder_transaction_ConvertFromNodes(binder_transaction_t *that, struct binder_proc *to); +void binder_transaction_ReleaseTarget(binder_transaction_t *that); +void binder_transaction_ReleaseTeam(binder_transaction_t *that); + +/* Return the maximum IO bytes that will be written by + ConvertToNodes(). */ +size_t binder_transaction_MaxIOToNodes(binder_transaction_t *that); + +/* If this transaction has a primary reference on its team, + return it and clear the pointer. You now own the reference. */ +struct binder_proc * binder_transaction_TakeTeam(binder_transaction_t *that, struct binder_proc *me); +status_t binder_transaction_CopyTransactionData(binder_transaction_t *that, struct binder_proc *recipient); + +#define INT_ALIGN(x) (((x)+sizeof(int)-1)&~(sizeof(int)-1)) +#define binder_transaction_Data(that) ((u8*)page_address((that)->map->page)) +#define binder_transaction_UserData(that) ((void*)((that)->map->start)) +#define binder_transaction_DataSize(that) ((that)->data_size) +#define binder_transaction_Offsets(that) ((size_t*)(binder_transaction_Data(that)+INT_ALIGN((that)->data_size))) +#define binder_transaction_UserOffsets(that) ((void*)((that)->map->start + INT_ALIGN((that)->data_size))) +#define binder_transaction_OffsetsSize(that) ((that)->offsets_size) + +#define binder_transaction_UserFlags(that) ((that)->flags & tfUserFlags) +#define binder_transaction_RefFlags(that) ((that)->flags & tfRefTransaction) +#define binder_transaction_IsInline(that) ((that)->flags & tfInline) +#define binder_transaction_IsRootObject(that) ((that)->flags & tfRootObject) +#define binder_transaction_IsReply(that) ((that)->flags & tfIsReply) +#define binder_transaction_IsEvent(that) ((that)->flags & tfIsEvent) +#define binder_transaction_IsAcquireReply(that) ((that)->flags & tfIsAcquireReply) +#define binder_transaction_IsDeadReply(that) ((that)->flags & tfIsDeadReply) +#define binder_transaction_IsFailedReply(that) ((that)->flags & tfIsFailedReply) +#define binder_transaction_IsAnyReply(that) ((that)->flags & (tfIsReply|tfIsAcquireReply|tfIsDeadReply)) +#define binder_transaction_IsFreePending(that) ((that)->flags & tfIsFreePending) +#define binder_transaction_IsReferenced(that) ((that)->flags & tfReferenced) + +#define binder_transaction_SetUserFlags(that, f) { (that)->flags = ((that)->flags&(~tfUserFlags)) | (f&tfUserFlags); } +#define binder_transaction_SetInline(that, f) { if (f) (that)->flags |= tfInline; else (that)->flags &= ~tfInline; } +#define binder_transaction_SetRootObject(that, f) { if (f) (that)->flags |= tfRootObject; else (that)->flags &= ~tfRootObject; } +#define binder_transaction_SetReply(that, f) { if (f) (that)->flags |= tfIsReply; else (that)->flags &= ~tfIsReply; } +#define binder_transaction_SetDeadReply(that, f) { if (f) (that)->flags |= tfIsDeadReply; else (that)->flags &= ~tfIsDeadReply; } +#define binder_transaction_SetFailedReply(that, f) { if (f) (that)->flags |= tfIsFailedReply; else (that)->flags &= ~tfIsFailedReply; } +#define binder_transaction_SetEvent(that, f) { if (f) (that)->flags |= tfIsEvent; else (that)->flags &= ~tfIsEvent; } +#define binder_transaction_SetAcquireReply(that, f) { if (f) (that)->flags |= tfIsAcquireReply; else (that)->flags &= ~tfIsAcquireReply; } +#define binder_transaction_SetFreePending(that, f) { if (f) (that)->flags |= tfIsFreePending; else (that)->flags &= ~tfIsFreePending; } + +#define binder_transaction_Code(that) ((that)->code) + +#define binder_transaction_Priority(that) ((that)->priority) +#define binder_transaction_SetPriority(that, pri) { (that)->priority = pri; } + + +#endif diff -urN linux-2.6.22.5/drivers/binder/iobuffer.c linux-2.6.22.5-android/drivers/binder/iobuffer.c --- linux-2.6.22.5/drivers/binder/iobuffer.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/iobuffer.c 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,112 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "iobuffer.h" +#include "binder_defs.h" +#include + +int iobuffer_init(iobuffer_t *that, unsigned long base, int size, int consumed) { + // require 4 byte alignment for base + if ((base & 0x3) != 0) printk(KERN_WARNING "iobuffer_init() bad buffer alignment\n"); + if ((base & 0x3) != 0) return -EFAULT; + if (!access_ok(VERIFY_WRITE, base, size)) printk(KERN_WARNING "access_ok(): FALSE\n"); + if (!access_ok(VERIFY_WRITE, base, size)) return -EFAULT; + DPRINTF(9, (KERN_WARNING "iobuffer_init(%p, %08lx, %d)\n", that, base, size)); + that->m_base = base; + that->m_size = size; + that->m_offs = that->m_consumed = consumed; + return 0; +} + +int iobuffer_read_raw(iobuffer_t *that, void *data, int size) +{ + if ((that->m_size-that->m_offs) < size) return -EFAULT; + copy_from_user(data, (void*)(that->m_base+that->m_offs), size); + that->m_offs += size; + return 0; +} + +int iobuffer_read_u32(iobuffer_t *that, u32 *data) +{ + if ((that->m_size-that->m_offs) < sizeof(u32)) return -EFAULT; + copy_from_user(data, (void*)(that->m_base+that->m_offs), sizeof(u32)); + that->m_offs += sizeof(u32); + return 0; +} + +int iobuffer_read_void(iobuffer_t *that, void **data) +{ + if ((that->m_size-that->m_offs) < sizeof(void*)) return -EFAULT; + copy_from_user(data, (void*)(that->m_base+that->m_offs), sizeof(void*)); + that->m_offs += sizeof(void*); + return 0; +} + +int iobuffer_write_raw(iobuffer_t *that, const void *data, int size) +{ + if ((that->m_size-that->m_offs) < size) return -EFAULT; + copy_to_user((void*)(that->m_base+that->m_offs), data, size); + that->m_offs += size; + return 0; +} + +int iobuffer_write_u32(iobuffer_t *that, u32 data) +{ + if ((that->m_size-that->m_offs) < sizeof(u32)) return -EFAULT; + // *((u32*)(that->m_base+that->m_offs)) = data; + __put_user(data, ((u32*)(that->m_base+that->m_offs))); + that->m_offs += sizeof(u32); + return 0; +} + +int iobuffer_write_void(iobuffer_t *that, const void *data) +{ + if ((that->m_size-that->m_offs) < sizeof(void *)) return -EFAULT; + // *((void **)(that->m_base+that->m_offs)) = data; + __put_user(data, ((void**)(that->m_base+that->m_offs))); + that->m_offs += sizeof(void*); + return 0; +} + +int iobuffer_drain(iobuffer_t *that, int size) { + if (size > (that->m_size-that->m_offs)) size = that->m_size-that->m_offs; + that->m_offs += size; + return size; +} + +int iobuffer_remaining(iobuffer_t *that) +{ + return that->m_size-that->m_offs; +} + +int iobuffer_consumed(iobuffer_t *that) +{ + return that->m_consumed; +} + +void iobuffer_mark_consumed(iobuffer_t *that) +{ + that->m_consumed = that->m_offs; +} + +void iobuffer_remainder(iobuffer_t *that, void **ptr, int *size) +{ + *ptr = ((uint8_t*)that->m_base)+that->m_offs; + *size = that->m_size - that->m_offs; +} + diff -urN linux-2.6.22.5/drivers/binder/iobuffer.h linux-2.6.22.5-android/drivers/binder/iobuffer.h --- linux-2.6.22.5/drivers/binder/iobuffer.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/iobuffer.h 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,44 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef _IOBUFFER_H_ +#define _IOBUFFER_H_ + +#include + +typedef struct iobuffer { + unsigned long m_base; + int m_offs; + int m_size; + int m_consumed; +} iobuffer_t; + +extern int iobuffer_init(iobuffer_t *that, unsigned long base, int size, int consumed); +extern int iobuffer_read_raw(iobuffer_t *that, void *data, int size); +extern int iobuffer_read_u32(iobuffer_t *that, u32 *data); +extern int iobuffer_read_void(iobuffer_t *that, void **data); +extern int iobuffer_write_raw(iobuffer_t *that, const void *data, int size); +extern int iobuffer_write_u32(iobuffer_t *that, u32 data); +extern int iobuffer_write_void(iobuffer_t *that, const void *data); +extern int iobuffer_drain(iobuffer_t *that, int size); +extern int iobuffer_remaining(iobuffer_t *that); +extern int iobuffer_consumed(iobuffer_t *that); +extern void iobuffer_mark_consumed(iobuffer_t *that); +extern void iobuffer_remainder(iobuffer_t *that, void **ptr, int *size); + +#endif diff -urN linux-2.6.22.5/drivers/binder/LICENSE linux-2.6.22.5-android/drivers/binder/LICENSE --- linux-2.6.22.5/drivers/binder/LICENSE 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/LICENSE 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,281 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + \ No newline at end of file diff -urN linux-2.6.22.5/drivers/binder/Makefile linux-2.6.22.5-android/drivers/binder/Makefile --- linux-2.6.22.5/drivers/binder/Makefile 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/Makefile 2007-11-20 08:46:07.654250861 +1100 @@ -0,0 +1,4 @@ + +obj-$(CONFIG_BINDER) = binderdev.o + +binderdev-objs := iobuffer.o binder.o binder_node.o binder_proc.o binder_thread.o binder_transaction.o \ No newline at end of file diff -urN linux-2.6.22.5/drivers/binder/tester.c linux-2.6.22.5-android/drivers/binder/tester.c --- linux-2.6.22.5/drivers/binder/tester.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/binder/tester.c 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,63 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef signed long sl_t; +typedef unsigned long ul_t; + +const sl_t cmd_write_limit = 1024; +const sl_t cmd_read_limit = 1024; + +int main(int argc, char **argv) { + int result; + binder_write_read_t bwr; + sl_t write_count = 0; + uint8_t *write_buf = malloc(cmd_write_limit); + uint8_t *read_buf = malloc(cmd_read_limit); + bwr.write_buffer = (ul_t)write_buf; + bwr.write_size = 0; + bwr.read_size = cmd_read_limit; + bwr.read_buffer = (ul_t)read_buf; + uint8_t *wb = write_buf; + + + int fd = open("/dev/binder", O_RDWR); + if (fd < 0) { + printf("Open failed: %s\n", strerror(errno)); + return -1; + } + *(ul_t*)wb = bcSET_CONTEXT_MANAGER; + bwr.write_size += sizeof(ul_t); + wb += sizeof(ul_t); + *(ul_t*)wb = bcENTER_LOOPER; + bwr.write_size += sizeof(ul_t); + result = ioctl(fd, BINDER_WRITE_READ, &bwr); + printf("ioctl(fd, BINDER_WRITE_READ, &bwr): %08x", result); + if (result < 0) printf(" %08x : %s", errno, strerror(errno)); + printf("\n"); + return 0; +} diff -urN linux-2.6.22.5/drivers/char/Kconfig linux-2.6.22.5-android/drivers/char/Kconfig --- linux-2.6.22.5/drivers/char/Kconfig 2007-11-20 08:03:55.269382152 +1100 +++ linux-2.6.22.5-android/drivers/char/Kconfig 2007-11-20 09:04:01.000000000 +1100 @@ -1093,6 +1093,12 @@ depends on ISA || PCI default y +config BINDER + tristate "OpenBinder IPC Driver" + default n + help + from openbinder.org + source "drivers/s390/char/Kconfig" config TS0710_MUX diff -urN linux-2.6.22.5/drivers/char/Kconfig~ linux-2.6.22.5-android/drivers/char/Kconfig~ --- linux-2.6.22.5/drivers/char/Kconfig~ 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/char/Kconfig~ 2007-11-20 08:46:07.674251926 +1100 @@ -0,0 +1,1123 @@ +# +# Character device configuration +# + +menu "Character devices" + +config VT + bool "Virtual terminal" if EMBEDDED + depends on !S390 + select INPUT + default y if !VIOCONS + ---help--- + If you say Y here, you will get support for terminal devices with + display and keyboard devices. These are called "virtual" because you + can run several virtual terminals (also called virtual consoles) on + one physical terminal. This is rather useful, for example one + virtual terminal can collect system messages and warnings, another + one can be used for a text-mode user session, and a third could run + an X session, all in parallel. Switching between virtual terminals + is done with certain key combinations, usually Alt-. + + The setterm command ("man setterm") can be used to change the + properties (such as colors or beeping) of a virtual terminal. The + man page console_codes(4) ("man console_codes") contains the special + character sequences that can be used to change those properties + directly. The fonts used on virtual terminals can be changed with + the setfont ("man setfont") command and the key bindings are defined + with the loadkeys ("man loadkeys") command. + + You need at least one virtual terminal device in order to make use + of your keyboard and monitor. Therefore, only people configuring an + embedded system would want to say N here in order to save some + memory; the only way to log into such a system is then via a serial + or network connection. + + If unsure, say Y, or else you won't be able to do much with your new + shiny Linux system :-) + +config VT_CONSOLE + bool "Support for console on virtual terminal" if EMBEDDED + depends on VT + default y + ---help--- + The system console is the device which receives all kernel messages + and warnings and which allows logins in single user mode. If you + answer Y here, a virtual terminal (the device used to interact with + a physical terminal) can be used as system console. This is the most + common mode of operations, so you should say Y here unless you want + the kernel messages be output only to a serial port (in which case + you should say Y to "Console on serial port", below). + + If you do say Y here, by default the currently visible virtual + terminal (/dev/tty0) will be used as system console. You can change + that with a kernel command line option such as "console=tty3" which + would use the third virtual terminal as system console. (Try "man + bootparam" or see the documentation of your boot loader (lilo or + loadlin) about how to pass options to the kernel at boot time.) + + If unsure, say Y. + +config NR_TTY_DEVICES + int "Maximum tty device number" + depends on VT + default 63 + ---help--- + This is the highest numbered device created in /dev. You will actually have + NR_TTY_DEVICES+1 devices in /dev. The default is 63, which will result in + 64 /dev entries. The lowest number you can set is 11, anything below that, + and it will default to 11. 63 is also the upper limit so we don't overrun + the serial consoles. + + +config HW_CONSOLE + bool + depends on VT && !S390 && !UML + default y + +config VT_HW_CONSOLE_BINDING + bool "Support for binding and unbinding console drivers" + depends on HW_CONSOLE + default n + ---help--- + The virtual terminal is the device that interacts with the physical + terminal through console drivers. On these systems, at least one + console driver is loaded. In other configurations, additional console + drivers may be enabled, such as the framebuffer console. If more than + 1 console driver is enabled, setting this to 'y' will allow you to + select the console driver that will serve as the backend for the + virtual terminals. + + See for more + information. For framebuffer console users, please refer to + . + +config SERIAL_NONSTANDARD + bool "Non-standard serial port support" + depends on HAS_IOMEM + ---help--- + Say Y here if you have any non-standard serial boards -- boards + which aren't supported using the standard "dumb" serial driver. + This includes intelligent serial boards such as Cyclades, + Digiboards, etc. These are usually used for systems that need many + serial ports because they serve many terminals or dial-in + connections. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about non-standard serial boards. + + Most people can say N here. + +config COMPUTONE + tristate "Computone IntelliPort Plus serial support" + depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI) + ---help--- + This driver supports the entire family of Intelliport II/Plus + controllers with the exception of the MicroChannel controllers and + products previous to the Intelliport II. These are multiport cards, + which give you many serial ports. You would need something like this + to connect more than two modems to your Linux box, for instance in + order to become a dial-in server. If you have a card like that, say + Y here and read . + + To compile this driver as modules, choose M here: the + modules will be called ip2 and ip2main. + +config ROCKETPORT + tristate "Comtrol RocketPort support" + depends on SERIAL_NONSTANDARD + help + This driver supports Comtrol RocketPort and RocketModem PCI boards. + These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or + modems. For information about the RocketPort/RocketModem boards + and this driver read . + + To compile this driver as a module, choose M here: the + module will be called rocket. + + If you want to compile this driver into the kernel, say Y here. If + you don't have a Comtrol RocketPort/RocketModem card installed, say N. + +config CYCLADES + tristate "Cyclades async mux support" + depends on SERIAL_NONSTANDARD && (PCI || ISA) + ---help--- + This driver supports Cyclades Z and Y multiserial boards. + You would need something like this to connect more than two modems to + your Linux box, for instance in order to become a dial-in server. + + For information about the Cyclades-Z card, read + . + + To compile this driver as a module, choose M here: the + module will be called cyclades. + + If you haven't heard about it, it's safe to say N. + +config CYZ_INTR + bool "Cyclades-Z interrupt mode operation (EXPERIMENTAL)" + depends on EXPERIMENTAL && CYCLADES + help + The Cyclades-Z family of multiport cards allows 2 (two) driver op + modes: polling and interrupt. In polling mode, the driver will check + the status of the Cyclades-Z ports every certain amount of time + (which is called polling cycle and is configurable). In interrupt + mode, it will use an interrupt line (IRQ) in order to check the + status of the Cyclades-Z ports. The default op mode is polling. If + unsure, say N. + +config DIGIEPCA + tristate "Digiboard Intelligent Async Support" + depends on SERIAL_NONSTANDARD + ---help--- + This is a driver for Digi International's Xx, Xeve, and Xem series + of cards which provide multiple serial ports. You would need + something like this to connect more than two modems to your Linux + box, for instance in order to become a dial-in server. This driver + supports the original PC (ISA) boards as well as PCI, and EISA. If + you have a card like this, say Y here and read the file + . + + To compile this driver as a module, choose M here: the + module will be called epca. + +config ESPSERIAL + tristate "Hayes ESP serial port support" + depends on SERIAL_NONSTANDARD && ISA && ISA_DMA_API + help + This is a driver which supports Hayes ESP serial ports. Both single + port cards and multiport cards are supported. Make sure to read + . + + To compile this driver as a module, choose M here: the + module will be called esp. + + If unsure, say N. + +config MOXA_INTELLIO + tristate "Moxa Intellio support" + depends on SERIAL_NONSTANDARD + help + Say Y here if you have a Moxa Intellio multiport serial card. + + To compile this driver as a module, choose M here: the + module will be called moxa. + +config MOXA_SMARTIO + tristate "Moxa SmartIO support (OBSOLETE)" + depends on SERIAL_NONSTANDARD + help + Say Y here if you have a Moxa SmartIO multiport serial card. + + This driver can also be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called mxser. If you want to do that, say M + here. + +config MOXA_SMARTIO_NEW + tristate "Moxa SmartIO support v. 2.0" + depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) + help + Say Y here if you have a Moxa SmartIO multiport serial card and/or + want to help develop a new version of this driver. + + This is upgraded (1.9.1) driver from original Moxa drivers with + changes finally resulting in PCI probing. + + Use at your own risk. + + This driver can also be built as a module. The module will be called + mxser_new. If you want to do that, say M here. + +config ISI + tristate "Multi-Tech multiport card support (EXPERIMENTAL)" + depends on SERIAL_NONSTANDARD && PCI + select FW_LOADER + help + This is a driver for the Multi-Tech cards which provide several + serial ports. The driver is experimental and can currently only be + built as a module. The module will be called isicom. + If you want to do that, choose M here. + +config SYNCLINK + tristate "Microgate SyncLink card support" + depends on SERIAL_NONSTANDARD && PCI && ISA_DMA_API + help + Provides support for the SyncLink ISA and PCI multiprotocol serial + adapters. These adapters support asynchronous and HDLC bit + synchronous communication up to 10Mbps (PCI adapter). + + This driver can only be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called synclink. If you want to do that, say M + here. + +config SYNCLINKMP + tristate "SyncLink Multiport support" + depends on SERIAL_NONSTANDARD + help + Enable support for the SyncLink Multiport (2 or 4 ports) + serial adapter, running asynchronous and HDLC communications up + to 2.048Mbps. Each ports is independently selectable for + RS-232, V.35, RS-449, RS-530, and X.21 + + This driver may be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called synclinkmp. If you want to do that, say M + here. + +config SYNCLINK_GT + tristate "SyncLink GT/AC support" + depends on SERIAL_NONSTANDARD && PCI + help + Support for SyncLink GT and SyncLink AC families of + synchronous and asynchronous serial adapters + manufactured by Microgate Systems, Ltd. (www.microgate.com) + +config N_HDLC + tristate "HDLC line discipline support" + depends on SERIAL_NONSTANDARD + help + Allows synchronous HDLC communications with tty device drivers that + support synchronous HDLC such as the Microgate SyncLink adapter. + + This driver can only be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called n_hdlc. If you want to do that, say M + here. + +config RISCOM8 + tristate "SDL RISCom/8 card support" + depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP + help + This is a driver for the SDL Communications RISCom/8 multiport card, + which gives you many serial ports. You would need something like + this to connect more than two modems to your Linux box, for instance + in order to become a dial-in server. If you have a card like that, + say Y here and read the file . + + Also it's possible to say M here and compile this driver as kernel + loadable module; the module will be called riscom8. + +config SPECIALIX + tristate "Specialix IO8+ card support" + depends on SERIAL_NONSTANDARD + help + This is a driver for the Specialix IO8+ multiport card (both the + ISA and the PCI version) which gives you many serial ports. You + would need something like this to connect more than two modems to + your Linux box, for instance in order to become a dial-in server. + + If you have a card like that, say Y here and read the file + . Also it's possible to say M here + and compile this driver as kernel loadable module which will be + called specialix. + +config SPECIALIX_RTSCTS + bool "Specialix DTR/RTS pin is RTS" + depends on SPECIALIX + help + The Specialix IO8+ card can only support either RTS or DTR. If you + say N here, the driver will use the pin as "DTR" when the tty is in + software handshake mode. If you say Y here or hardware handshake is + on, it will always be RTS. Read the file + for more information. + +config SX + tristate "Specialix SX (and SI) card support" + depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) + help + This is a driver for the SX and SI multiport serial cards. + Please read the file for details. + + This driver can only be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called sx. If you want to do that, say M here. + +config RIO + tristate "Specialix RIO system support" + depends on SERIAL_NONSTANDARD + help + This is a driver for the Specialix RIO, a smart serial card which + drives an outboard box that can support up to 128 ports. Product + information is at . + There are both ISA and PCI versions. + +config RIO_OLDPCI + bool "Support really old RIO/PCI cards" + depends on RIO + help + Older RIO PCI cards need some initialization-time configuration to + determine the IRQ and some control addresses. If you have a RIO and + this doesn't seem to work, try setting this to Y. + +config STALDRV + bool "Stallion multiport serial support" + depends on SERIAL_NONSTANDARD + help + Stallion cards give you many serial ports. You would need something + like this to connect more than two modems to your Linux box, for + instance in order to become a dial-in server. If you say Y here, + you will be asked for your specific card model in the next + questions. Make sure to read in + this case. If you have never heard about all this, it's safe to + say N. + +config STALLION + tristate "Stallion EasyIO or EC8/32 support" + depends on STALDRV && BROKEN_ON_SMP + help + If you have an EasyIO or EasyConnection 8/32 multiport Stallion + card, then this is for you; say Y. Make sure to read + . + + To compile this driver as a module, choose M here: the + module will be called stallion. + +config ISTALLION + tristate "Stallion EC8/64, ONboard, Brumby support" + depends on STALDRV && BROKEN_ON_SMP + help + If you have an EasyConnection 8/64, ONboard, Brumby or Stallion + serial multiport card, say Y here. Make sure to read + . + + To compile this driver as a module, choose M here: the + module will be called istallion. + +config AU1000_UART + bool "Enable Au1000 UART Support" + depends on SERIAL_NONSTANDARD && MIPS + help + If you have an Alchemy AU1000 processor (MIPS based) and you want + to use serial ports, say Y. Otherwise, say N. + +config AU1000_SERIAL_CONSOLE + bool "Enable Au1000 serial console" + depends on AU1000_UART + help + If you have an Alchemy AU1000 processor (MIPS based) and you want + to use a console on a serial port, say Y. Otherwise, say N. + +config SERIAL_DEC + bool "DECstation serial support" + depends on MACH_DECSTATION + default y + help + This selects whether you want to be asked about drivers for + DECstation serial ports. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about DECstation serial ports. + +config SERIAL_DEC_CONSOLE + bool "Support for console on a DECstation serial port" + depends on SERIAL_DEC + default y + help + If you say Y here, it will be possible to use a serial port as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). Note that the firmware uses ttyS0 as the serial console on + the Maxine and ttyS2 on the others. + + If unsure, say Y. + +config ZS + bool "Z85C30 Serial Support" + depends on SERIAL_DEC + default y + help + Documentation on the Zilog 85C350 serial communications controller + is downloadable at + +config A2232 + tristate "Commodore A2232 serial support (EXPERIMENTAL)" + depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP + ---help--- + This option supports the 2232 7-port serial card shipped with the + Amiga 2000 and other Zorro-bus machines, dating from 1989. At + a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip + each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The + ports were connected with 8 pin DIN connectors on the card bracket, + for which 8 pin to DB25 adapters were supplied. The card also had + jumpers internally to toggle various pinning configurations. + + This driver can be built as a module; but then "generic_serial" + will also be built as a module. This has to be loaded before + "ser_a2232". If you want to do this, answer M here. + +config SGI_SNSC + bool "SGI Altix system controller communication support" + depends on (IA64_SGI_SN2 || IA64_GENERIC) + help + If you have an SGI Altix and you want to enable system + controller communication from user space (you want this!), + say Y. Otherwise, say N. + +config SGI_TIOCX + bool "SGI TIO CX driver support" + depends on (IA64_SGI_SN2 || IA64_GENERIC) + help + If you have an SGI Altix and you have fpga devices attached + to your TIO, say Y here, otherwise say N. + +config SGI_MBCS + tristate "SGI FPGA Core Services driver support" + depends on SGI_TIOCX + help + If you have an SGI Altix with an attached SABrick + say Y or M here, otherwise say N. + +source "drivers/serial/Kconfig" + +config UNIX98_PTYS + bool "Unix98 PTY support" if EMBEDDED + default y + ---help--- + A pseudo terminal (PTY) is a software device consisting of two + halves: a master and a slave. The slave device behaves identical to + a physical terminal; the master device is used by a process to + read data from and write data to the slave, thereby emulating a + terminal. Typical programs for the master side are telnet servers + and xterms. + + Linux has traditionally used the BSD-like names /dev/ptyxx for + masters and /dev/ttyxx for slaves of pseudo terminals. This scheme + has a number of problems. The GNU C library glibc 2.1 and later, + however, supports the Unix98 naming standard: in order to acquire a + pseudo terminal, a process opens /dev/ptmx; the number of the pseudo + terminal is then made available to the process and the pseudo + terminal slave can be accessed as /dev/pts/. What was + traditionally /dev/ttyp2 will then be /dev/pts/2, for example. + + All modern Linux systems use the Unix98 ptys. Say Y unless + you're on an embedded system and want to conserve memory. + +config LEGACY_PTYS + bool "Legacy (BSD) PTY support" + default y + ---help--- + A pseudo terminal (PTY) is a software device consisting of two + halves: a master and a slave. The slave device behaves identical to + a physical terminal; the master device is used by a process to + read data from and write data to the slave, thereby emulating a + terminal. Typical programs for the master side are telnet servers + and xterms. + + Linux has traditionally used the BSD-like names /dev/ptyxx + for masters and /dev/ttyxx for slaves of pseudo + terminals. This scheme has a number of problems, including + security. This option enables these legacy devices; on most + systems, it is safe to say N. + + +config LEGACY_PTY_COUNT + int "Maximum number of legacy PTY in use" + depends on LEGACY_PTYS + range 1 256 + default "256" + ---help--- + The maximum number of legacy PTYs that can be used at any one time. + The default is 256, and should be more than enough. Embedded + systems may want to reduce this to save memory. + + When not in use, each legacy PTY occupies 12 bytes on 32-bit + architectures and 24 bytes on 64-bit architectures. + +config BRIQ_PANEL + tristate 'Total Impact briQ front panel driver' + depends on PPC_CHRP + ---help--- + The briQ is a small footprint CHRP computer with a frontpanel VFD, a + tristate led and two switches. It is the size of a CDROM drive. + + If you have such one and want anything showing on the VFD then you + must answer Y here. + + To compile this driver as a module, choose M here: the + module will be called briq_panel. + + It's safe to say N here. + +config PRINTER + tristate "Parallel printer support" + depends on PARPORT + ---help--- + If you intend to attach a printer to the parallel port of your Linux + box (as opposed to using a serial printer; if the connector at the + printer has 9 or 25 holes ["female"], then it's serial), say Y. + Also read the Printing-HOWTO, available from + . + + It is possible to share one parallel port among several devices + (e.g. printer and ZIP drive) and it is safe to compile the + corresponding drivers into the kernel. + + To compile this driver as a module, choose M here and read + . The module will be called lp. + + If you have several parallel ports, you can specify which ports to + use with the "lp" kernel command line option. (Try "man bootparam" + or see the documentation of your boot loader (lilo or loadlin) about + how to pass options to the kernel at boot time.) The syntax of the + "lp" command line option can be found in . + + If you have more than 8 printers, you need to increase the LP_NO + macro in lp.c and the PARPORT_MAX macro in parport.h. + +config LP_CONSOLE + bool "Support for console on line printer" + depends on PRINTER + ---help--- + If you want kernel messages to be printed out as they occur, you + can have a console on the printer. This option adds support for + doing that; to actually get it to happen you need to pass the + option "console=lp0" to the kernel at boot time. + + If the printer is out of paper (or off, or unplugged, or too + busy..) the kernel will stall until the printer is ready again. + By defining CONSOLE_LP_STRICT to 0 (at your own risk) you + can make the kernel continue when this happens, + but it'll lose the kernel messages. + + If unsure, say N. + +config PPDEV + tristate "Support for user-space parallel port device drivers" + depends on PARPORT + ---help--- + Saying Y to this adds support for /dev/parport device nodes. This + is needed for programs that want portable access to the parallel + port, for instance deviceid (which displays Plug-and-Play device + IDs). + + This is the parallel port equivalent of SCSI generic support (sg). + It is safe to say N to this -- it is not needed for normal printing + or parallel port CD-ROM/disk support. + + To compile this driver as a module, choose M here: the + module will be called ppdev. + + If unsure, say N. + +config TIPAR + tristate "Texas Instruments parallel link cable support" + depends on PARPORT + ---help--- + If you own a Texas Instruments graphing calculator and use a + parallel link cable, then you might be interested in this driver. + + If you enable this driver, you will be able to communicate with + your calculator through a set of device nodes under /dev. The + main advantage of this driver is that you don't have to be root + to use this precise link cable (depending on the permissions on + the device nodes, though). + + To compile this driver as a module, choose M here: the + module will be called tipar. + + If you don't know what a parallel link cable is or what a Texas + Instruments graphing calculator is, then you probably don't need this + driver. + + If unsure, say N. + +config HVC_DRIVER + bool + help + Users of pSeries machines that want to utilize the hvc console front-end + module for their backend console driver should select this option. + It will automatically be selected if one of the back-end console drivers + is selected. + + +config HVC_CONSOLE + bool "pSeries Hypervisor Virtual Console support" + depends on PPC_PSERIES + select HVC_DRIVER + help + pSeries machines when partitioned support a hypervisor virtual + console. This driver allows each pSeries partition to have a console + which is accessed via the HMC. + +config HVC_ISERIES + bool "iSeries Hypervisor Virtual Console support" + depends on PPC_ISERIES + default y + select HVC_DRIVER + help + iSeries machines support a hypervisor virtual console. + +config HVC_RTAS + bool "IBM RTAS Console support" + depends on PPC_RTAS + select HVC_DRIVER + help + IBM Console device driver which makes use of RTAS + +config HVC_BEAT + bool "Toshiba's Beat Hypervisor Console support" + depends on PPC_CELLEB + select HVC_DRIVER + help + Toshiba's Cell Reference Set Beat Console device driver + +config HVCS + tristate "IBM Hypervisor Virtual Console Server support" + depends on PPC_PSERIES + help + Partitionable IBM Power5 ppc64 machines allow hosting of + firmware virtual consoles from one Linux partition by + another Linux partition. This driver allows console data + from Linux partitions to be accessed through TTY device + interfaces in the device tree of a Linux partition running + this driver. + + To compile this driver as a module, choose M here: the + module will be called hvcs.ko. Additionally, this module + will depend on arch specific APIs exported from hvcserver.ko + which will also be compiled when this driver is built as a + module. + +source "drivers/char/ipmi/Kconfig" + +source "drivers/char/watchdog/Kconfig" + +config DS1620 + tristate "NetWinder thermometer support" + depends on ARCH_NETWINDER + help + Say Y here to include support for the thermal management hardware + found in the NetWinder. This driver allows the user to control the + temperature set points and to read the current temperature. + + It is also possible to say M here to build it as a module (ds1620) + It is recommended to be used on a NetWinder, but it is not a + necessity. + +config NWBUTTON + tristate "NetWinder Button" + depends on ARCH_NETWINDER + ---help--- + If you say Y here and create a character device node /dev/nwbutton + with major and minor numbers 10 and 158 ("man mknod"), then every + time the orange button is pressed a number of times, the number of + times the button was pressed will be written to that device. + + This is most useful for applications, as yet unwritten, which + perform actions based on how many times the button is pressed in a + row. + + Do not hold the button down for too long, as the driver does not + alter the behaviour of the hardware reset circuitry attached to the + button; it will still execute a hard reset if the button is held + down for longer than approximately five seconds. + + To compile this driver as a module, choose M here: the + module will be called nwbutton. + + Most people will answer Y to this question and "Reboot Using Button" + below to be able to initiate a system shutdown from the button. + +config NWBUTTON_REBOOT + bool "Reboot Using Button" + depends on NWBUTTON + help + If you say Y here, then you will be able to initiate a system + shutdown and reboot by pressing the orange button a number of times. + The number of presses to initiate the shutdown is two by default, + but this can be altered by modifying the value of NUM_PRESSES_REBOOT + in nwbutton.h and recompiling the driver or, if you compile the + driver as a module, you can specify the number of presses at load + time with "insmod button reboot_count=". + +config NWFLASH + tristate "NetWinder flash support" + depends on ARCH_NETWINDER + ---help--- + If you say Y here and create a character device /dev/flash with + major 10 and minor 160 you can manipulate the flash ROM containing + the NetWinder firmware. Be careful as accidentally overwriting the + flash contents can render your computer unbootable. On no account + allow random users access to this device. :-) + + To compile this driver as a module, choose M here: the + module will be called nwflash. + + If you're not sure, say N. + +source "drivers/char/hw_random/Kconfig" + +config NVRAM + tristate "/dev/nvram support" + depends on ATARI || X86 || ARM || GENERIC_NVRAM + ---help--- + If you say Y here and create a character special file /dev/nvram + with major number 10 and minor number 144 using mknod ("man mknod"), + you get read and write access to the extra bytes of non-volatile + memory in the real time clock (RTC), which is contained in every PC + and most Ataris. The actual number of bytes varies, depending on the + nvram in the system, but is usually 114 (128-14 for the RTC). + + This memory is conventionally called "CMOS RAM" on PCs and "NVRAM" + on Ataris. /dev/nvram may be used to view settings there, or to + change them (with some utility). It could also be used to frequently + save a few bits of very important data that may not be lost over + power-off and for which writing to disk is too insecure. Note + however that most NVRAM space in a PC belongs to the BIOS and you + should NEVER idly tamper with it. See Ralf Brown's interrupt list + for a guide to the use of CMOS bytes by your BIOS. + + On Atari machines, /dev/nvram is always configured and does not need + to be selected. + + To compile this driver as a module, choose M here: the + module will be called nvram. + +config RTC + tristate "Enhanced Real Time Clock Support" + depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH && !S390 + ---help--- + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + Every PC has such a clock built in. It can be used to generate + signals from as low as 1Hz up to 8192Hz, and can also be used + as a 24 hour alarm. It reports status information via the file + /proc/driver/rtc and its behaviour is set by various ioctls on + /dev/rtc. + + If you run Linux on a multiprocessor machine and said Y to + "Symmetric Multi Processing" above, you should say Y here to read + and set the RTC in an SMP compatible fashion. + + If you think you have a use for such a device (such as periodic data + sampling), then say Y here, and read + for details. + + To compile this driver as a module, choose M here: the + module will be called rtc. + +config SGI_DS1286 + tristate "SGI DS1286 RTC support" + depends on SGI_IP22 + help + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock built into your computer. + Every SGI has such a clock built in. It reports status information + via the file /proc/rtc and its behaviour is set by various ioctls on + /dev/rtc. + +config SGI_IP27_RTC + bool "SGI M48T35 RTC support" + depends on SGI_IP27 + help + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock built into your computer. + Every SGI has such a clock built in. It reports status information + via the file /proc/rtc and its behaviour is set by various ioctls on + /dev/rtc. + +config GEN_RTC + tristate "Generic /dev/rtc emulation" + depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV && !S390 && !SUPERH + ---help--- + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + It reports status information via the file /proc/driver/rtc and its + behaviour is set by various ioctls on /dev/rtc. If you enable the + "extended RTC operation" below it will also provide an emulation + for RTC_UIE which is required by some programs and may improve + precision in some cases. + + To compile this driver as a module, choose M here: the + module will be called genrtc. + +config GEN_RTC_X + bool "Extended RTC operation" + depends on GEN_RTC + help + Provides an emulation for RTC_UIE which is required by some programs + and may improve precision of the generic RTC support in some cases. + +config EFI_RTC + bool "EFI Real Time Clock Services" + depends on IA64 + +config DS1302 + tristate "DS1302 RTC support" + depends on M32R && (PLAT_M32700UT || PLAT_OPSPUT) + help + If you say Y here and create a character special file /dev/rtc with + major number 121 and minor number 0 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + +config COBALT_LCD + bool "Support for Cobalt LCD" + depends on MIPS_COBALT + help + This option enables support for the LCD display and buttons found + on Cobalt systems through a misc device. + +config DTLK + tristate "Double Talk PC internal speech card support" + depends on ISA + help + This driver is for the DoubleTalk PC, a speech synthesizer + manufactured by RC Systems (). It is also + called the `internal DoubleTalk'. + + To compile this driver as a module, choose M here: the + module will be called dtlk. + +config R3964 + tristate "Siemens R3964 line discipline" + ---help--- + This driver allows synchronous communication with devices using the + Siemens R3964 packet protocol. Unless you are dealing with special + hardware like PLCs, you are unlikely to need this. + + To compile this driver as a module, choose M here: the + module will be called n_r3964. + + If unsure, say N. + +config APPLICOM + tristate "Applicom intelligent fieldbus card support" + depends on PCI + ---help--- + This driver provides the kernel-side support for the intelligent + fieldbus cards made by Applicom International. More information + about these cards can be found on the WWW at the address + , or by email from David Woodhouse + . + + To compile this driver as a module, choose M here: the + module will be called applicom. + + If unsure, say N. + +config SONYPI + tristate "Sony Vaio Programmable I/O Control Device support (EXPERIMENTAL)" + depends on EXPERIMENTAL && X86 && PCI && INPUT && !64BIT + ---help--- + This driver enables access to the Sony Programmable I/O Control + Device which can be found in many (all ?) Sony Vaio laptops. + + If you have one of those laptops, read + , and say Y or M here. + + To compile this driver as a module, choose M here: the + module will be called sonypi. + +config GPIO_TB0219 + tristate "TANBAC TB0219 GPIO support" + depends on TANBAC_TB022X + select GPIO_VR41XX + +source "drivers/char/agp/Kconfig" + +source "drivers/char/drm/Kconfig" + +source "drivers/char/pcmcia/Kconfig" + +config MWAVE + tristate "ACP Modem (Mwave) support" + depends on X86 + select SERIAL_8250 + ---help--- + The ACP modem (Mwave) for Linux is a WinModem. It is composed of a + kernel driver and a user level application. Together these components + support direct attachment to public switched telephone networks (PSTNs) + and support selected world wide countries. + + This version of the ACP Modem driver supports the IBM Thinkpad 600E, + 600, and 770 that include on board ACP modem hardware. + + The modem also supports the standard communications port interface + (ttySx) and is compatible with the Hayes AT Command Set. + + The user level application needed to use this driver can be found at + the IBM Linux Technology Center (LTC) web site: + . + + If you own one of the above IBM Thinkpads which has the Mwave chipset + in it, say Y. + + To compile this driver as a module, choose M here: the + module will be called mwave. + +config SCx200_GPIO + tristate "NatSemi SCx200 GPIO Support" + depends on SCx200 + select NSC_GPIO + help + Give userspace access to the GPIO pins on the National + Semiconductor SCx200 processors. + + If compiled as a module, it will be called scx200_gpio. + +config PC8736x_GPIO + tristate "NatSemi PC8736x GPIO Support" + depends on X86 + default SCx200_GPIO # mostly N + select NSC_GPIO # needed for support routines + help + Give userspace access to the GPIO pins on the National + Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip + has multiple functional units, inc several managed by + hwmon/pc87360 driver. Tested with PC-87366 + + If compiled as a module, it will be called pc8736x_gpio. + +config NSC_GPIO + tristate "NatSemi Base GPIO Support" + depends on X86_32 + # selected by SCx200_GPIO and PC8736x_GPIO + # what about 2 selectors differing: m != y + help + Common support used (and needed) by scx200_gpio and + pc8736x_gpio drivers. If those drivers are built as + modules, this one will be too, named nsc_gpio + +config CS5535_GPIO + tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)" + depends on X86_32 + help + Give userspace access to the GPIO pins on the AMD CS5535 and + CS5536 Geode companion devices. + + If compiled as a module, it will be called cs5535_gpio. + +config GPIO_VR41XX + tristate "NEC VR4100 series General-purpose I/O Unit support" + depends on CPU_VR41XX + +config RAW_DRIVER + tristate "RAW driver (/dev/raw/rawN) (OBSOLETE)" + depends on BLOCK + help + The raw driver permits block devices to be bound to /dev/raw/rawN. + Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O. + See the raw(8) manpage for more details. + + The raw driver is deprecated and will be removed soon. + Applications should simply open the device (eg /dev/hda1) + with the O_DIRECT flag. + +config MAX_RAW_DEVS + int "Maximum number of RAW devices to support (1-8192)" + depends on RAW_DRIVER + default "256" + help + The maximum number of RAW devices that are supported. + Default is 256. Increase this number in case you need lots of + raw devices. + +config HPET + bool "HPET - High Precision Event Timer" if (X86 || IA64) + default n + depends on ACPI + help + If you say Y here, you will have a miscdevice named "/dev/hpet/". Each + open selects one of the timers supported by the HPET. The timers are + non-periodic and/or periodic. + +config HPET_RTC_IRQ + bool "HPET Control RTC IRQ" if !HPET_EMULATE_RTC + default n + depends on HPET + help + If you say Y here, you will disable RTC_IRQ in drivers/char/rtc.c. It + is assumed the platform called hpet_alloc with the RTC IRQ values for + the HPET timers. + +config HPET_MMAP + bool "Allow mmap of HPET" + default y + depends on HPET + help + If you say Y here, user applications will be able to mmap + the HPET registers. + + In some hardware implementations, the page containing HPET + registers may also contain other things that shouldn't be + exposed to the user. If this applies to your hardware, + say N here. + +config HANGCHECK_TIMER + tristate "Hangcheck timer" + depends on X86 || IA64 || PPC64 || S390 + help + The hangcheck-timer module detects when the system has gone + out to lunch past a certain margin. It can reboot the system + or merely print a warning. + +config MMTIMER + tristate "MMTIMER Memory mapped RTC for SGI Altix" + depends on IA64_GENERIC || IA64_SGI_SN2 + default y + help + The mmtimer device allows direct userspace access to the + Altix system timer. + +source "drivers/char/tpm/Kconfig" + +config TELCLOCK + tristate "Telecom clock driver for ATCA SBC" + depends on EXPERIMENTAL && X86 + default n + help + The telecom clock device is specific to the MPCBL0010 and MPCBL0050 + ATCA computers and allows direct userspace access to the + configuration of the telecom clock configuration settings. This + device is used for hardware synchronization across the ATCA backplane + fabric. Upon loading, the driver exports a sysfs directory, + /sys/devices/platform/telco_clock, with a number of files for + controlling the behavior of this hardware. + +config DEVPORT + bool + depends on !M68K + depends on ISA || PCI + default y + +config GOLDFISH_TTY + tristate "Goldfish TTY Driver" + default n + help + TTY driver for Goldfish Virtual Platform. + +config BINDER + tristate "OpenBinder IPC Driver" + default n + help + from openbinder.org + +source "drivers/s390/char/Kconfig" + +config TS0710_MUX + tristate "GSM TS 07.10 Multiplex driver" + help + This implements the GSM 07.10 multiplex protocol. + +config TS0710_MUX_USB_MOTO + tristate "Motorola USB support for TS 07.10 Multiplex driver" + depends on TS0710_MUX && USB + help + This addrs support for the TS07.10 over USB, as found in Motorola + Smartphones. + +endmenu + diff -urN linux-2.6.22.5/drivers/char/Kconfig.orig linux-2.6.22.5-android/drivers/char/Kconfig.orig --- linux-2.6.22.5/drivers/char/Kconfig.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/char/Kconfig.orig 2007-11-20 08:22:16.098001727 +1100 @@ -0,0 +1,1111 @@ +# +# Character device configuration +# + +menu "Character devices" + +config VT + bool "Virtual terminal" if EMBEDDED + depends on !S390 + select INPUT + default y if !VIOCONS + ---help--- + If you say Y here, you will get support for terminal devices with + display and keyboard devices. These are called "virtual" because you + can run several virtual terminals (also called virtual consoles) on + one physical terminal. This is rather useful, for example one + virtual terminal can collect system messages and warnings, another + one can be used for a text-mode user session, and a third could run + an X session, all in parallel. Switching between virtual terminals + is done with certain key combinations, usually Alt-. + + The setterm command ("man setterm") can be used to change the + properties (such as colors or beeping) of a virtual terminal. The + man page console_codes(4) ("man console_codes") contains the special + character sequences that can be used to change those properties + directly. The fonts used on virtual terminals can be changed with + the setfont ("man setfont") command and the key bindings are defined + with the loadkeys ("man loadkeys") command. + + You need at least one virtual terminal device in order to make use + of your keyboard and monitor. Therefore, only people configuring an + embedded system would want to say N here in order to save some + memory; the only way to log into such a system is then via a serial + or network connection. + + If unsure, say Y, or else you won't be able to do much with your new + shiny Linux system :-) + +config VT_CONSOLE + bool "Support for console on virtual terminal" if EMBEDDED + depends on VT + default y + ---help--- + The system console is the device which receives all kernel messages + and warnings and which allows logins in single user mode. If you + answer Y here, a virtual terminal (the device used to interact with + a physical terminal) can be used as system console. This is the most + common mode of operations, so you should say Y here unless you want + the kernel messages be output only to a serial port (in which case + you should say Y to "Console on serial port", below). + + If you do say Y here, by default the currently visible virtual + terminal (/dev/tty0) will be used as system console. You can change + that with a kernel command line option such as "console=tty3" which + would use the third virtual terminal as system console. (Try "man + bootparam" or see the documentation of your boot loader (lilo or + loadlin) about how to pass options to the kernel at boot time.) + + If unsure, say Y. + +config NR_TTY_DEVICES + int "Maximum tty device number" + depends on VT + default 63 + ---help--- + This is the highest numbered device created in /dev. You will actually have + NR_TTY_DEVICES+1 devices in /dev. The default is 63, which will result in + 64 /dev entries. The lowest number you can set is 11, anything below that, + and it will default to 11. 63 is also the upper limit so we don't overrun + the serial consoles. + + +config HW_CONSOLE + bool + depends on VT && !S390 && !UML + default y + +config VT_HW_CONSOLE_BINDING + bool "Support for binding and unbinding console drivers" + depends on HW_CONSOLE + default n + ---help--- + The virtual terminal is the device that interacts with the physical + terminal through console drivers. On these systems, at least one + console driver is loaded. In other configurations, additional console + drivers may be enabled, such as the framebuffer console. If more than + 1 console driver is enabled, setting this to 'y' will allow you to + select the console driver that will serve as the backend for the + virtual terminals. + + See for more + information. For framebuffer console users, please refer to + . + +config SERIAL_NONSTANDARD + bool "Non-standard serial port support" + depends on HAS_IOMEM + ---help--- + Say Y here if you have any non-standard serial boards -- boards + which aren't supported using the standard "dumb" serial driver. + This includes intelligent serial boards such as Cyclades, + Digiboards, etc. These are usually used for systems that need many + serial ports because they serve many terminals or dial-in + connections. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about non-standard serial boards. + + Most people can say N here. + +config COMPUTONE + tristate "Computone IntelliPort Plus serial support" + depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI) + ---help--- + This driver supports the entire family of Intelliport II/Plus + controllers with the exception of the MicroChannel controllers and + products previous to the Intelliport II. These are multiport cards, + which give you many serial ports. You would need something like this + to connect more than two modems to your Linux box, for instance in + order to become a dial-in server. If you have a card like that, say + Y here and read . + + To compile this driver as modules, choose M here: the + modules will be called ip2 and ip2main. + +config ROCKETPORT + tristate "Comtrol RocketPort support" + depends on SERIAL_NONSTANDARD + help + This driver supports Comtrol RocketPort and RocketModem PCI boards. + These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or + modems. For information about the RocketPort/RocketModem boards + and this driver read . + + To compile this driver as a module, choose M here: the + module will be called rocket. + + If you want to compile this driver into the kernel, say Y here. If + you don't have a Comtrol RocketPort/RocketModem card installed, say N. + +config CYCLADES + tristate "Cyclades async mux support" + depends on SERIAL_NONSTANDARD && (PCI || ISA) + ---help--- + This driver supports Cyclades Z and Y multiserial boards. + You would need something like this to connect more than two modems to + your Linux box, for instance in order to become a dial-in server. + + For information about the Cyclades-Z card, read + . + + To compile this driver as a module, choose M here: the + module will be called cyclades. + + If you haven't heard about it, it's safe to say N. + +config CYZ_INTR + bool "Cyclades-Z interrupt mode operation (EXPERIMENTAL)" + depends on EXPERIMENTAL && CYCLADES + help + The Cyclades-Z family of multiport cards allows 2 (two) driver op + modes: polling and interrupt. In polling mode, the driver will check + the status of the Cyclades-Z ports every certain amount of time + (which is called polling cycle and is configurable). In interrupt + mode, it will use an interrupt line (IRQ) in order to check the + status of the Cyclades-Z ports. The default op mode is polling. If + unsure, say N. + +config DIGIEPCA + tristate "Digiboard Intelligent Async Support" + depends on SERIAL_NONSTANDARD + ---help--- + This is a driver for Digi International's Xx, Xeve, and Xem series + of cards which provide multiple serial ports. You would need + something like this to connect more than two modems to your Linux + box, for instance in order to become a dial-in server. This driver + supports the original PC (ISA) boards as well as PCI, and EISA. If + you have a card like this, say Y here and read the file + . + + To compile this driver as a module, choose M here: the + module will be called epca. + +config ESPSERIAL + tristate "Hayes ESP serial port support" + depends on SERIAL_NONSTANDARD && ISA && ISA_DMA_API + help + This is a driver which supports Hayes ESP serial ports. Both single + port cards and multiport cards are supported. Make sure to read + . + + To compile this driver as a module, choose M here: the + module will be called esp. + + If unsure, say N. + +config MOXA_INTELLIO + tristate "Moxa Intellio support" + depends on SERIAL_NONSTANDARD + help + Say Y here if you have a Moxa Intellio multiport serial card. + + To compile this driver as a module, choose M here: the + module will be called moxa. + +config MOXA_SMARTIO + tristate "Moxa SmartIO support (OBSOLETE)" + depends on SERIAL_NONSTANDARD + help + Say Y here if you have a Moxa SmartIO multiport serial card. + + This driver can also be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called mxser. If you want to do that, say M + here. + +config MOXA_SMARTIO_NEW + tristate "Moxa SmartIO support v. 2.0" + depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) + help + Say Y here if you have a Moxa SmartIO multiport serial card and/or + want to help develop a new version of this driver. + + This is upgraded (1.9.1) driver from original Moxa drivers with + changes finally resulting in PCI probing. + + Use at your own risk. + + This driver can also be built as a module. The module will be called + mxser_new. If you want to do that, say M here. + +config ISI + tristate "Multi-Tech multiport card support (EXPERIMENTAL)" + depends on SERIAL_NONSTANDARD && PCI + select FW_LOADER + help + This is a driver for the Multi-Tech cards which provide several + serial ports. The driver is experimental and can currently only be + built as a module. The module will be called isicom. + If you want to do that, choose M here. + +config SYNCLINK + tristate "Microgate SyncLink card support" + depends on SERIAL_NONSTANDARD && PCI && ISA_DMA_API + help + Provides support for the SyncLink ISA and PCI multiprotocol serial + adapters. These adapters support asynchronous and HDLC bit + synchronous communication up to 10Mbps (PCI adapter). + + This driver can only be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called synclink. If you want to do that, say M + here. + +config SYNCLINKMP + tristate "SyncLink Multiport support" + depends on SERIAL_NONSTANDARD + help + Enable support for the SyncLink Multiport (2 or 4 ports) + serial adapter, running asynchronous and HDLC communications up + to 2.048Mbps. Each ports is independently selectable for + RS-232, V.35, RS-449, RS-530, and X.21 + + This driver may be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called synclinkmp. If you want to do that, say M + here. + +config SYNCLINK_GT + tristate "SyncLink GT/AC support" + depends on SERIAL_NONSTANDARD && PCI + help + Support for SyncLink GT and SyncLink AC families of + synchronous and asynchronous serial adapters + manufactured by Microgate Systems, Ltd. (www.microgate.com) + +config N_HDLC + tristate "HDLC line discipline support" + depends on SERIAL_NONSTANDARD + help + Allows synchronous HDLC communications with tty device drivers that + support synchronous HDLC such as the Microgate SyncLink adapter. + + This driver can only be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called n_hdlc. If you want to do that, say M + here. + +config RISCOM8 + tristate "SDL RISCom/8 card support" + depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP + help + This is a driver for the SDL Communications RISCom/8 multiport card, + which gives you many serial ports. You would need something like + this to connect more than two modems to your Linux box, for instance + in order to become a dial-in server. If you have a card like that, + say Y here and read the file . + + Also it's possible to say M here and compile this driver as kernel + loadable module; the module will be called riscom8. + +config SPECIALIX + tristate "Specialix IO8+ card support" + depends on SERIAL_NONSTANDARD + help + This is a driver for the Specialix IO8+ multiport card (both the + ISA and the PCI version) which gives you many serial ports. You + would need something like this to connect more than two modems to + your Linux box, for instance in order to become a dial-in server. + + If you have a card like that, say Y here and read the file + . Also it's possible to say M here + and compile this driver as kernel loadable module which will be + called specialix. + +config SPECIALIX_RTSCTS + bool "Specialix DTR/RTS pin is RTS" + depends on SPECIALIX + help + The Specialix IO8+ card can only support either RTS or DTR. If you + say N here, the driver will use the pin as "DTR" when the tty is in + software handshake mode. If you say Y here or hardware handshake is + on, it will always be RTS. Read the file + for more information. + +config SX + tristate "Specialix SX (and SI) card support" + depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) + help + This is a driver for the SX and SI multiport serial cards. + Please read the file for details. + + This driver can only be built as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called sx. If you want to do that, say M here. + +config RIO + tristate "Specialix RIO system support" + depends on SERIAL_NONSTANDARD + help + This is a driver for the Specialix RIO, a smart serial card which + drives an outboard box that can support up to 128 ports. Product + information is at . + There are both ISA and PCI versions. + +config RIO_OLDPCI + bool "Support really old RIO/PCI cards" + depends on RIO + help + Older RIO PCI cards need some initialization-time configuration to + determine the IRQ and some control addresses. If you have a RIO and + this doesn't seem to work, try setting this to Y. + +config STALDRV + bool "Stallion multiport serial support" + depends on SERIAL_NONSTANDARD + help + Stallion cards give you many serial ports. You would need something + like this to connect more than two modems to your Linux box, for + instance in order to become a dial-in server. If you say Y here, + you will be asked for your specific card model in the next + questions. Make sure to read in + this case. If you have never heard about all this, it's safe to + say N. + +config STALLION + tristate "Stallion EasyIO or EC8/32 support" + depends on STALDRV && BROKEN_ON_SMP + help + If you have an EasyIO or EasyConnection 8/32 multiport Stallion + card, then this is for you; say Y. Make sure to read + . + + To compile this driver as a module, choose M here: the + module will be called stallion. + +config ISTALLION + tristate "Stallion EC8/64, ONboard, Brumby support" + depends on STALDRV && BROKEN_ON_SMP + help + If you have an EasyConnection 8/64, ONboard, Brumby or Stallion + serial multiport card, say Y here. Make sure to read + . + + To compile this driver as a module, choose M here: the + module will be called istallion. + +config AU1000_UART + bool "Enable Au1000 UART Support" + depends on SERIAL_NONSTANDARD && MIPS + help + If you have an Alchemy AU1000 processor (MIPS based) and you want + to use serial ports, say Y. Otherwise, say N. + +config AU1000_SERIAL_CONSOLE + bool "Enable Au1000 serial console" + depends on AU1000_UART + help + If you have an Alchemy AU1000 processor (MIPS based) and you want + to use a console on a serial port, say Y. Otherwise, say N. + +config SERIAL_DEC + bool "DECstation serial support" + depends on MACH_DECSTATION + default y + help + This selects whether you want to be asked about drivers for + DECstation serial ports. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about DECstation serial ports. + +config SERIAL_DEC_CONSOLE + bool "Support for console on a DECstation serial port" + depends on SERIAL_DEC + default y + help + If you say Y here, it will be possible to use a serial port as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). Note that the firmware uses ttyS0 as the serial console on + the Maxine and ttyS2 on the others. + + If unsure, say Y. + +config ZS + bool "Z85C30 Serial Support" + depends on SERIAL_DEC + default y + help + Documentation on the Zilog 85C350 serial communications controller + is downloadable at + +config A2232 + tristate "Commodore A2232 serial support (EXPERIMENTAL)" + depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP + ---help--- + This option supports the 2232 7-port serial card shipped with the + Amiga 2000 and other Zorro-bus machines, dating from 1989. At + a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip + each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The + ports were connected with 8 pin DIN connectors on the card bracket, + for which 8 pin to DB25 adapters were supplied. The card also had + jumpers internally to toggle various pinning configurations. + + This driver can be built as a module; but then "generic_serial" + will also be built as a module. This has to be loaded before + "ser_a2232". If you want to do this, answer M here. + +config SGI_SNSC + bool "SGI Altix system controller communication support" + depends on (IA64_SGI_SN2 || IA64_GENERIC) + help + If you have an SGI Altix and you want to enable system + controller communication from user space (you want this!), + say Y. Otherwise, say N. + +config SGI_TIOCX + bool "SGI TIO CX driver support" + depends on (IA64_SGI_SN2 || IA64_GENERIC) + help + If you have an SGI Altix and you have fpga devices attached + to your TIO, say Y here, otherwise say N. + +config SGI_MBCS + tristate "SGI FPGA Core Services driver support" + depends on SGI_TIOCX + help + If you have an SGI Altix with an attached SABrick + say Y or M here, otherwise say N. + +source "drivers/serial/Kconfig" + +config UNIX98_PTYS + bool "Unix98 PTY support" if EMBEDDED + default y + ---help--- + A pseudo terminal (PTY) is a software device consisting of two + halves: a master and a slave. The slave device behaves identical to + a physical terminal; the master device is used by a process to + read data from and write data to the slave, thereby emulating a + terminal. Typical programs for the master side are telnet servers + and xterms. + + Linux has traditionally used the BSD-like names /dev/ptyxx for + masters and /dev/ttyxx for slaves of pseudo terminals. This scheme + has a number of problems. The GNU C library glibc 2.1 and later, + however, supports the Unix98 naming standard: in order to acquire a + pseudo terminal, a process opens /dev/ptmx; the number of the pseudo + terminal is then made available to the process and the pseudo + terminal slave can be accessed as /dev/pts/. What was + traditionally /dev/ttyp2 will then be /dev/pts/2, for example. + + All modern Linux systems use the Unix98 ptys. Say Y unless + you're on an embedded system and want to conserve memory. + +config LEGACY_PTYS + bool "Legacy (BSD) PTY support" + default y + ---help--- + A pseudo terminal (PTY) is a software device consisting of two + halves: a master and a slave. The slave device behaves identical to + a physical terminal; the master device is used by a process to + read data from and write data to the slave, thereby emulating a + terminal. Typical programs for the master side are telnet servers + and xterms. + + Linux has traditionally used the BSD-like names /dev/ptyxx + for masters and /dev/ttyxx for slaves of pseudo + terminals. This scheme has a number of problems, including + security. This option enables these legacy devices; on most + systems, it is safe to say N. + + +config LEGACY_PTY_COUNT + int "Maximum number of legacy PTY in use" + depends on LEGACY_PTYS + range 1 256 + default "256" + ---help--- + The maximum number of legacy PTYs that can be used at any one time. + The default is 256, and should be more than enough. Embedded + systems may want to reduce this to save memory. + + When not in use, each legacy PTY occupies 12 bytes on 32-bit + architectures and 24 bytes on 64-bit architectures. + +config BRIQ_PANEL + tristate 'Total Impact briQ front panel driver' + depends on PPC_CHRP + ---help--- + The briQ is a small footprint CHRP computer with a frontpanel VFD, a + tristate led and two switches. It is the size of a CDROM drive. + + If you have such one and want anything showing on the VFD then you + must answer Y here. + + To compile this driver as a module, choose M here: the + module will be called briq_panel. + + It's safe to say N here. + +config PRINTER + tristate "Parallel printer support" + depends on PARPORT + ---help--- + If you intend to attach a printer to the parallel port of your Linux + box (as opposed to using a serial printer; if the connector at the + printer has 9 or 25 holes ["female"], then it's serial), say Y. + Also read the Printing-HOWTO, available from + . + + It is possible to share one parallel port among several devices + (e.g. printer and ZIP drive) and it is safe to compile the + corresponding drivers into the kernel. + + To compile this driver as a module, choose M here and read + . The module will be called lp. + + If you have several parallel ports, you can specify which ports to + use with the "lp" kernel command line option. (Try "man bootparam" + or see the documentation of your boot loader (lilo or loadlin) about + how to pass options to the kernel at boot time.) The syntax of the + "lp" command line option can be found in . + + If you have more than 8 printers, you need to increase the LP_NO + macro in lp.c and the PARPORT_MAX macro in parport.h. + +config LP_CONSOLE + bool "Support for console on line printer" + depends on PRINTER + ---help--- + If you want kernel messages to be printed out as they occur, you + can have a console on the printer. This option adds support for + doing that; to actually get it to happen you need to pass the + option "console=lp0" to the kernel at boot time. + + If the printer is out of paper (or off, or unplugged, or too + busy..) the kernel will stall until the printer is ready again. + By defining CONSOLE_LP_STRICT to 0 (at your own risk) you + can make the kernel continue when this happens, + but it'll lose the kernel messages. + + If unsure, say N. + +config PPDEV + tristate "Support for user-space parallel port device drivers" + depends on PARPORT + ---help--- + Saying Y to this adds support for /dev/parport device nodes. This + is needed for programs that want portable access to the parallel + port, for instance deviceid (which displays Plug-and-Play device + IDs). + + This is the parallel port equivalent of SCSI generic support (sg). + It is safe to say N to this -- it is not needed for normal printing + or parallel port CD-ROM/disk support. + + To compile this driver as a module, choose M here: the + module will be called ppdev. + + If unsure, say N. + +config TIPAR + tristate "Texas Instruments parallel link cable support" + depends on PARPORT + ---help--- + If you own a Texas Instruments graphing calculator and use a + parallel link cable, then you might be interested in this driver. + + If you enable this driver, you will be able to communicate with + your calculator through a set of device nodes under /dev. The + main advantage of this driver is that you don't have to be root + to use this precise link cable (depending on the permissions on + the device nodes, though). + + To compile this driver as a module, choose M here: the + module will be called tipar. + + If you don't know what a parallel link cable is or what a Texas + Instruments graphing calculator is, then you probably don't need this + driver. + + If unsure, say N. + +config HVC_DRIVER + bool + help + Users of pSeries machines that want to utilize the hvc console front-end + module for their backend console driver should select this option. + It will automatically be selected if one of the back-end console drivers + is selected. + + +config HVC_CONSOLE + bool "pSeries Hypervisor Virtual Console support" + depends on PPC_PSERIES + select HVC_DRIVER + help + pSeries machines when partitioned support a hypervisor virtual + console. This driver allows each pSeries partition to have a console + which is accessed via the HMC. + +config HVC_ISERIES + bool "iSeries Hypervisor Virtual Console support" + depends on PPC_ISERIES + default y + select HVC_DRIVER + help + iSeries machines support a hypervisor virtual console. + +config HVC_RTAS + bool "IBM RTAS Console support" + depends on PPC_RTAS + select HVC_DRIVER + help + IBM Console device driver which makes use of RTAS + +config HVC_BEAT + bool "Toshiba's Beat Hypervisor Console support" + depends on PPC_CELLEB + select HVC_DRIVER + help + Toshiba's Cell Reference Set Beat Console device driver + +config HVCS + tristate "IBM Hypervisor Virtual Console Server support" + depends on PPC_PSERIES + help + Partitionable IBM Power5 ppc64 machines allow hosting of + firmware virtual consoles from one Linux partition by + another Linux partition. This driver allows console data + from Linux partitions to be accessed through TTY device + interfaces in the device tree of a Linux partition running + this driver. + + To compile this driver as a module, choose M here: the + module will be called hvcs.ko. Additionally, this module + will depend on arch specific APIs exported from hvcserver.ko + which will also be compiled when this driver is built as a + module. + +source "drivers/char/ipmi/Kconfig" + +source "drivers/char/watchdog/Kconfig" + +config DS1620 + tristate "NetWinder thermometer support" + depends on ARCH_NETWINDER + help + Say Y here to include support for the thermal management hardware + found in the NetWinder. This driver allows the user to control the + temperature set points and to read the current temperature. + + It is also possible to say M here to build it as a module (ds1620) + It is recommended to be used on a NetWinder, but it is not a + necessity. + +config NWBUTTON + tristate "NetWinder Button" + depends on ARCH_NETWINDER + ---help--- + If you say Y here and create a character device node /dev/nwbutton + with major and minor numbers 10 and 158 ("man mknod"), then every + time the orange button is pressed a number of times, the number of + times the button was pressed will be written to that device. + + This is most useful for applications, as yet unwritten, which + perform actions based on how many times the button is pressed in a + row. + + Do not hold the button down for too long, as the driver does not + alter the behaviour of the hardware reset circuitry attached to the + button; it will still execute a hard reset if the button is held + down for longer than approximately five seconds. + + To compile this driver as a module, choose M here: the + module will be called nwbutton. + + Most people will answer Y to this question and "Reboot Using Button" + below to be able to initiate a system shutdown from the button. + +config NWBUTTON_REBOOT + bool "Reboot Using Button" + depends on NWBUTTON + help + If you say Y here, then you will be able to initiate a system + shutdown and reboot by pressing the orange button a number of times. + The number of presses to initiate the shutdown is two by default, + but this can be altered by modifying the value of NUM_PRESSES_REBOOT + in nwbutton.h and recompiling the driver or, if you compile the + driver as a module, you can specify the number of presses at load + time with "insmod button reboot_count=". + +config NWFLASH + tristate "NetWinder flash support" + depends on ARCH_NETWINDER + ---help--- + If you say Y here and create a character device /dev/flash with + major 10 and minor 160 you can manipulate the flash ROM containing + the NetWinder firmware. Be careful as accidentally overwriting the + flash contents can render your computer unbootable. On no account + allow random users access to this device. :-) + + To compile this driver as a module, choose M here: the + module will be called nwflash. + + If you're not sure, say N. + +source "drivers/char/hw_random/Kconfig" + +config NVRAM + tristate "/dev/nvram support" + depends on ATARI || X86 || ARM || GENERIC_NVRAM + ---help--- + If you say Y here and create a character special file /dev/nvram + with major number 10 and minor number 144 using mknod ("man mknod"), + you get read and write access to the extra bytes of non-volatile + memory in the real time clock (RTC), which is contained in every PC + and most Ataris. The actual number of bytes varies, depending on the + nvram in the system, but is usually 114 (128-14 for the RTC). + + This memory is conventionally called "CMOS RAM" on PCs and "NVRAM" + on Ataris. /dev/nvram may be used to view settings there, or to + change them (with some utility). It could also be used to frequently + save a few bits of very important data that may not be lost over + power-off and for which writing to disk is too insecure. Note + however that most NVRAM space in a PC belongs to the BIOS and you + should NEVER idly tamper with it. See Ralf Brown's interrupt list + for a guide to the use of CMOS bytes by your BIOS. + + On Atari machines, /dev/nvram is always configured and does not need + to be selected. + + To compile this driver as a module, choose M here: the + module will be called nvram. + +config RTC + tristate "Enhanced Real Time Clock Support" + depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH && !S390 + ---help--- + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + Every PC has such a clock built in. It can be used to generate + signals from as low as 1Hz up to 8192Hz, and can also be used + as a 24 hour alarm. It reports status information via the file + /proc/driver/rtc and its behaviour is set by various ioctls on + /dev/rtc. + + If you run Linux on a multiprocessor machine and said Y to + "Symmetric Multi Processing" above, you should say Y here to read + and set the RTC in an SMP compatible fashion. + + If you think you have a use for such a device (such as periodic data + sampling), then say Y here, and read + for details. + + To compile this driver as a module, choose M here: the + module will be called rtc. + +config SGI_DS1286 + tristate "SGI DS1286 RTC support" + depends on SGI_IP22 + help + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock built into your computer. + Every SGI has such a clock built in. It reports status information + via the file /proc/rtc and its behaviour is set by various ioctls on + /dev/rtc. + +config SGI_IP27_RTC + bool "SGI M48T35 RTC support" + depends on SGI_IP27 + help + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock built into your computer. + Every SGI has such a clock built in. It reports status information + via the file /proc/rtc and its behaviour is set by various ioctls on + /dev/rtc. + +config GEN_RTC + tristate "Generic /dev/rtc emulation" + depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV && !S390 && !SUPERH + ---help--- + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + It reports status information via the file /proc/driver/rtc and its + behaviour is set by various ioctls on /dev/rtc. If you enable the + "extended RTC operation" below it will also provide an emulation + for RTC_UIE which is required by some programs and may improve + precision in some cases. + + To compile this driver as a module, choose M here: the + module will be called genrtc. + +config GEN_RTC_X + bool "Extended RTC operation" + depends on GEN_RTC + help + Provides an emulation for RTC_UIE which is required by some programs + and may improve precision of the generic RTC support in some cases. + +config EFI_RTC + bool "EFI Real Time Clock Services" + depends on IA64 + +config DS1302 + tristate "DS1302 RTC support" + depends on M32R && (PLAT_M32700UT || PLAT_OPSPUT) + help + If you say Y here and create a character special file /dev/rtc with + major number 121 and minor number 0 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + +config COBALT_LCD + bool "Support for Cobalt LCD" + depends on MIPS_COBALT + help + This option enables support for the LCD display and buttons found + on Cobalt systems through a misc device. + +config DTLK + tristate "Double Talk PC internal speech card support" + depends on ISA + help + This driver is for the DoubleTalk PC, a speech synthesizer + manufactured by RC Systems (). It is also + called the `internal DoubleTalk'. + + To compile this driver as a module, choose M here: the + module will be called dtlk. + +config R3964 + tristate "Siemens R3964 line discipline" + ---help--- + This driver allows synchronous communication with devices using the + Siemens R3964 packet protocol. Unless you are dealing with special + hardware like PLCs, you are unlikely to need this. + + To compile this driver as a module, choose M here: the + module will be called n_r3964. + + If unsure, say N. + +config APPLICOM + tristate "Applicom intelligent fieldbus card support" + depends on PCI + ---help--- + This driver provides the kernel-side support for the intelligent + fieldbus cards made by Applicom International. More information + about these cards can be found on the WWW at the address + , or by email from David Woodhouse + . + + To compile this driver as a module, choose M here: the + module will be called applicom. + + If unsure, say N. + +config SONYPI + tristate "Sony Vaio Programmable I/O Control Device support (EXPERIMENTAL)" + depends on EXPERIMENTAL && X86 && PCI && INPUT && !64BIT + ---help--- + This driver enables access to the Sony Programmable I/O Control + Device which can be found in many (all ?) Sony Vaio laptops. + + If you have one of those laptops, read + , and say Y or M here. + + To compile this driver as a module, choose M here: the + module will be called sonypi. + +config GPIO_TB0219 + tristate "TANBAC TB0219 GPIO support" + depends on TANBAC_TB022X + select GPIO_VR41XX + +source "drivers/char/agp/Kconfig" + +source "drivers/char/drm/Kconfig" + +source "drivers/char/pcmcia/Kconfig" + +config MWAVE + tristate "ACP Modem (Mwave) support" + depends on X86 + select SERIAL_8250 + ---help--- + The ACP modem (Mwave) for Linux is a WinModem. It is composed of a + kernel driver and a user level application. Together these components + support direct attachment to public switched telephone networks (PSTNs) + and support selected world wide countries. + + This version of the ACP Modem driver supports the IBM Thinkpad 600E, + 600, and 770 that include on board ACP modem hardware. + + The modem also supports the standard communications port interface + (ttySx) and is compatible with the Hayes AT Command Set. + + The user level application needed to use this driver can be found at + the IBM Linux Technology Center (LTC) web site: + . + + If you own one of the above IBM Thinkpads which has the Mwave chipset + in it, say Y. + + To compile this driver as a module, choose M here: the + module will be called mwave. + +config SCx200_GPIO + tristate "NatSemi SCx200 GPIO Support" + depends on SCx200 + select NSC_GPIO + help + Give userspace access to the GPIO pins on the National + Semiconductor SCx200 processors. + + If compiled as a module, it will be called scx200_gpio. + +config PC8736x_GPIO + tristate "NatSemi PC8736x GPIO Support" + depends on X86 + default SCx200_GPIO # mostly N + select NSC_GPIO # needed for support routines + help + Give userspace access to the GPIO pins on the National + Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip + has multiple functional units, inc several managed by + hwmon/pc87360 driver. Tested with PC-87366 + + If compiled as a module, it will be called pc8736x_gpio. + +config NSC_GPIO + tristate "NatSemi Base GPIO Support" + depends on X86_32 + # selected by SCx200_GPIO and PC8736x_GPIO + # what about 2 selectors differing: m != y + help + Common support used (and needed) by scx200_gpio and + pc8736x_gpio drivers. If those drivers are built as + modules, this one will be too, named nsc_gpio + +config CS5535_GPIO + tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)" + depends on X86_32 + help + Give userspace access to the GPIO pins on the AMD CS5535 and + CS5536 Geode companion devices. + + If compiled as a module, it will be called cs5535_gpio. + +config GPIO_VR41XX + tristate "NEC VR4100 series General-purpose I/O Unit support" + depends on CPU_VR41XX + +config RAW_DRIVER + tristate "RAW driver (/dev/raw/rawN) (OBSOLETE)" + depends on BLOCK + help + The raw driver permits block devices to be bound to /dev/raw/rawN. + Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O. + See the raw(8) manpage for more details. + + The raw driver is deprecated and will be removed soon. + Applications should simply open the device (eg /dev/hda1) + with the O_DIRECT flag. + +config MAX_RAW_DEVS + int "Maximum number of RAW devices to support (1-8192)" + depends on RAW_DRIVER + default "256" + help + The maximum number of RAW devices that are supported. + Default is 256. Increase this number in case you need lots of + raw devices. + +config HPET + bool "HPET - High Precision Event Timer" if (X86 || IA64) + default n + depends on ACPI + help + If you say Y here, you will have a miscdevice named "/dev/hpet/". Each + open selects one of the timers supported by the HPET. The timers are + non-periodic and/or periodic. + +config HPET_RTC_IRQ + bool "HPET Control RTC IRQ" if !HPET_EMULATE_RTC + default n + depends on HPET + help + If you say Y here, you will disable RTC_IRQ in drivers/char/rtc.c. It + is assumed the platform called hpet_alloc with the RTC IRQ values for + the HPET timers. + +config HPET_MMAP + bool "Allow mmap of HPET" + default y + depends on HPET + help + If you say Y here, user applications will be able to mmap + the HPET registers. + + In some hardware implementations, the page containing HPET + registers may also contain other things that shouldn't be + exposed to the user. If this applies to your hardware, + say N here. + +config HANGCHECK_TIMER + tristate "Hangcheck timer" + depends on X86 || IA64 || PPC64 || S390 + help + The hangcheck-timer module detects when the system has gone + out to lunch past a certain margin. It can reboot the system + or merely print a warning. + +config MMTIMER + tristate "MMTIMER Memory mapped RTC for SGI Altix" + depends on IA64_GENERIC || IA64_SGI_SN2 + default y + help + The mmtimer device allows direct userspace access to the + Altix system timer. + +source "drivers/char/tpm/Kconfig" + +config TELCLOCK + tristate "Telecom clock driver for ATCA SBC" + depends on EXPERIMENTAL && X86 + default n + help + The telecom clock device is specific to the MPCBL0010 and MPCBL0050 + ATCA computers and allows direct userspace access to the + configuration of the telecom clock configuration settings. This + device is used for hardware synchronization across the ATCA backplane + fabric. Upon loading, the driver exports a sysfs directory, + /sys/devices/platform/telco_clock, with a number of files for + controlling the behavior of this hardware. + +config DEVPORT + bool + depends on !M68K + depends on ISA || PCI + default y + +source "drivers/s390/char/Kconfig" + +config TS0710_MUX + tristate "GSM TS 07.10 Multiplex driver" + help + This implements the GSM 07.10 multiplex protocol. + +config TS0710_MUX_USB_MOTO + tristate "Motorola USB support for TS 07.10 Multiplex driver" + depends on TS0710_MUX && USB + help + This addrs support for the TS07.10 over USB, as found in Motorola + Smartphones. + +endmenu + diff -urN linux-2.6.22.5/drivers/input/evdev.c linux-2.6.22.5-android/drivers/input/evdev.c --- linux-2.6.22.5/drivers/input/evdev.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/drivers/input/evdev.c 2007-11-20 08:57:07.000000000 +1100 @@ -20,6 +20,9 @@ #include #include #include +#ifdef CONFIG_ANDROID_POWER +#include +#endif struct evdev { int exist; @@ -39,10 +42,23 @@ struct fasync_struct *fasync; struct evdev *evdev; struct list_head node; +#ifdef CONFIG_ANDROID_POWER + android_suspend_lock_t suspend_lock; +#endif }; static struct evdev *evdev_table[EVDEV_MINORS]; +#ifdef CONFIG_ANDROID_POWER +static void do_gettimeofday_monotonic(struct timeval *tv) +{ + struct timespec ts; + ktime_get_ts(&ts); + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / 1000; +} +#endif + static void evdev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct evdev *evdev = handle->private; @@ -51,7 +67,12 @@ if (evdev->grab) { client = evdev->grab; +#ifdef CONFIG_ANDROID_POWER + android_lock_suspend_auto_expire(&client->suspend_lock, 5 * HZ); + do_gettimeofday_monotonic(&client->buffer[client->head].time); +#else do_gettimeofday(&client->buffer[client->head].time); +#endif client->buffer[client->head].type = type; client->buffer[client->head].code = code; client->buffer[client->head].value = value; @@ -61,7 +82,12 @@ } else list_for_each_entry(client, &evdev->client_list, node) { +#ifdef CONFIG_ANDROID_POWER + android_lock_suspend_auto_expire(&client->suspend_lock, 5 * HZ); + do_gettimeofday_monotonic(&client->buffer[client->head].time); +#else do_gettimeofday(&client->buffer[client->head].time); +#endif client->buffer[client->head].type = type; client->buffer[client->head].code = code; client->buffer[client->head].value = value; @@ -121,6 +147,10 @@ evdev_free(evdev); } +#ifdef CONFIG_ANDROID_POWER + android_uninit_suspend_lock(&client->suspend_lock); +#endif + return 0; } @@ -143,6 +173,10 @@ if (!client) return -ENOMEM; +#ifdef CONFIG_ANDROID_POWER + client->suspend_lock.name = "evdev"; + android_init_suspend_lock(&client->suspend_lock); +#endif client->evdev = evdev; list_add_tail(&client->node, &evdev->client_list); @@ -306,6 +340,10 @@ return -EFAULT; client->tail = (client->tail + 1) & (EVDEV_BUFFER_SIZE - 1); +#ifdef CONFIG_ANDROID_POWER + if(client->head == client->tail) + android_unlock_suspend(&client->suspend_lock); +#endif retval += evdev_event_size(); } diff -urN linux-2.6.22.5/drivers/input/evdev.c.orig linux-2.6.22.5-android/drivers/input/evdev.c.orig --- linux-2.6.22.5/drivers/input/evdev.c.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/input/evdev.c.orig 2007-11-20 08:22:22.898363855 +1100 @@ -0,0 +1,741 @@ +/* + * Event char devices, giving access to raw input device events. + * + * Copyright (c) 1999-2002 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#define EVDEV_MINOR_BASE 64 +#define EVDEV_MINORS 32 +#define EVDEV_BUFFER_SIZE 64 + +#include +#include +#include +#include +#include +#include +#include +#include + +struct evdev { + int exist; + int open; + int minor; + char name[16]; + struct input_handle handle; + wait_queue_head_t wait; + struct evdev_client *grab; + struct list_head client_list; +}; + +struct evdev_client { + struct input_event buffer[EVDEV_BUFFER_SIZE]; + int head; + int tail; + struct fasync_struct *fasync; + struct evdev *evdev; + struct list_head node; +}; + +static struct evdev *evdev_table[EVDEV_MINORS]; + +static void evdev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) +{ + struct evdev *evdev = handle->private; + struct evdev_client *client; + + if (evdev->grab) { + client = evdev->grab; + + do_gettimeofday(&client->buffer[client->head].time); + client->buffer[client->head].type = type; + client->buffer[client->head].code = code; + client->buffer[client->head].value = value; + client->head = (client->head + 1) & (EVDEV_BUFFER_SIZE - 1); + + kill_fasync(&client->fasync, SIGIO, POLL_IN); + } else + list_for_each_entry(client, &evdev->client_list, node) { + + do_gettimeofday(&client->buffer[client->head].time); + client->buffer[client->head].type = type; + client->buffer[client->head].code = code; + client->buffer[client->head].value = value; + client->head = (client->head + 1) & (EVDEV_BUFFER_SIZE - 1); + + kill_fasync(&client->fasync, SIGIO, POLL_IN); + } + + wake_up_interruptible(&evdev->wait); +} + +static int evdev_fasync(int fd, struct file *file, int on) +{ + struct evdev_client *client = file->private_data; + int retval; + + retval = fasync_helper(fd, file, on, &client->fasync); + + return retval < 0 ? retval : 0; +} + +static int evdev_flush(struct file *file, fl_owner_t id) +{ + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; + + if (!evdev->exist) + return -ENODEV; + + return input_flush_device(&evdev->handle, file); +} + +static void evdev_free(struct evdev *evdev) +{ + evdev_table[evdev->minor] = NULL; + kfree(evdev); +} + +static int evdev_release(struct inode *inode, struct file *file) +{ + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; + + if (evdev->grab == client) { + input_release_device(&evdev->handle); + evdev->grab = NULL; + } + + evdev_fasync(-1, file, 0); + list_del(&client->node); + kfree(client); + + if (!--evdev->open) { + if (evdev->exist) + input_close_device(&evdev->handle); + else + evdev_free(evdev); + } + + return 0; +} + +static int evdev_open(struct inode *inode, struct file *file) +{ + struct evdev_client *client; + struct evdev *evdev; + int i = iminor(inode) - EVDEV_MINOR_BASE; + int error; + + if (i >= EVDEV_MINORS) + return -ENODEV; + + evdev = evdev_table[i]; + + if (!evdev || !evdev->exist) + return -ENODEV; + + client = kzalloc(sizeof(struct evdev_client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->evdev = evdev; + list_add_tail(&client->node, &evdev->client_list); + + if (!evdev->open++ && evdev->exist) { + error = input_open_device(&evdev->handle); + if (error) { + list_del(&client->node); + kfree(client); + return error; + } + } + + file->private_data = client; + return 0; +} + +#ifdef CONFIG_COMPAT + +struct input_event_compat { + struct compat_timeval time; + __u16 type; + __u16 code; + __s32 value; +}; + +/* Note to the author of this code: did it ever occur to + you why the ifdefs are needed? Think about it again. -AK */ +#ifdef CONFIG_X86_64 +# define COMPAT_TEST is_compat_task() +#elif defined(CONFIG_IA64) +# define COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current)) +#elif defined(CONFIG_S390) +# define COMPAT_TEST test_thread_flag(TIF_31BIT) +#elif defined(CONFIG_MIPS) +# define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR) +#else +# define COMPAT_TEST test_thread_flag(TIF_32BIT) +#endif + +static inline size_t evdev_event_size(void) +{ + return COMPAT_TEST ? + sizeof(struct input_event_compat) : sizeof(struct input_event); +} + +static int evdev_event_from_user(const char __user *buffer, struct input_event *event) +{ + if (COMPAT_TEST) { + struct input_event_compat compat_event; + + if (copy_from_user(&compat_event, buffer, sizeof(struct input_event_compat))) + return -EFAULT; + + event->time.tv_sec = compat_event.time.tv_sec; + event->time.tv_usec = compat_event.time.tv_usec; + event->type = compat_event.type; + event->code = compat_event.code; + event->value = compat_event.value; + + } else { + if (copy_from_user(event, buffer, sizeof(struct input_event))) + return -EFAULT; + } + + return 0; +} + +static int evdev_event_to_user(char __user *buffer, const struct input_event *event) +{ + if (COMPAT_TEST) { + struct input_event_compat compat_event; + + compat_event.time.tv_sec = event->time.tv_sec; + compat_event.time.tv_usec = event->time.tv_usec; + compat_event.type = event->type; + compat_event.code = event->code; + compat_event.value = event->value; + + if (copy_to_user(buffer, &compat_event, sizeof(struct input_event_compat))) + return -EFAULT; + + } else { + if (copy_to_user(buffer, event, sizeof(struct input_event))) + return -EFAULT; + } + + return 0; +} + +#else + +static inline size_t evdev_event_size(void) +{ + return sizeof(struct input_event); +} + +static int evdev_event_from_user(const char __user *buffer, struct input_event *event) +{ + if (copy_from_user(event, buffer, sizeof(struct input_event))) + return -EFAULT; + + return 0; +} + +static int evdev_event_to_user(char __user *buffer, const struct input_event *event) +{ + if (copy_to_user(buffer, event, sizeof(struct input_event))) + return -EFAULT; + + return 0; +} + +#endif /* CONFIG_COMPAT */ + +static ssize_t evdev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) +{ + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; + struct input_event event; + int retval = 0; + + if (!evdev->exist) + return -ENODEV; + + while (retval < count) { + + if (evdev_event_from_user(buffer + retval, &event)) + return -EFAULT; + input_inject_event(&evdev->handle, event.type, event.code, event.value); + retval += evdev_event_size(); + } + + return retval; +} + +static ssize_t evdev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) +{ + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; + int retval; + + if (count < evdev_event_size()) + return -EINVAL; + + if (client->head == client->tail && evdev->exist && (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + retval = wait_event_interruptible(evdev->wait, + client->head != client->tail || !evdev->exist); + if (retval) + return retval; + + if (!evdev->exist) + return -ENODEV; + + while (client->head != client->tail && retval + evdev_event_size() <= count) { + + struct input_event *event = (struct input_event *) client->buffer + client->tail; + + if (evdev_event_to_user(buffer + retval, event)) + return -EFAULT; + + client->tail = (client->tail + 1) & (EVDEV_BUFFER_SIZE - 1); + retval += evdev_event_size(); + } + + return retval; +} + +/* No kernel lock - fine */ +static unsigned int evdev_poll(struct file *file, poll_table *wait) +{ + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; + + poll_wait(file, &evdev->wait, wait); + return ((client->head == client->tail) ? 0 : (POLLIN | POLLRDNORM)) | + (evdev->exist ? 0 : (POLLHUP | POLLERR)); +} + +#ifdef CONFIG_COMPAT + +#define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8) +#define NBITS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1) + +#ifdef __BIG_ENDIAN +static int bits_to_user(unsigned long *bits, unsigned int maxbit, + unsigned int maxlen, void __user *p, int compat) +{ + int len, i; + + if (compat) { + len = NBITS_COMPAT(maxbit) * sizeof(compat_long_t); + if (len > maxlen) + len = maxlen; + + for (i = 0; i < len / sizeof(compat_long_t); i++) + if (copy_to_user((compat_long_t __user *) p + i, + (compat_long_t *) bits + + i + 1 - ((i % 2) << 1), + sizeof(compat_long_t))) + return -EFAULT; + } else { + len = NBITS(maxbit) * sizeof(long); + if (len > maxlen) + len = maxlen; + + if (copy_to_user(p, bits, len)) + return -EFAULT; + } + + return len; +} +#else +static int bits_to_user(unsigned long *bits, unsigned int maxbit, + unsigned int maxlen, void __user *p, int compat) +{ + int len = compat ? + NBITS_COMPAT(maxbit) * sizeof(compat_long_t) : + NBITS(maxbit) * sizeof(long); + + if (len > maxlen) + len = maxlen; + + return copy_to_user(p, bits, len) ? -EFAULT : len; +} +#endif /* __BIG_ENDIAN */ + +#else + +static int bits_to_user(unsigned long *bits, unsigned int maxbit, + unsigned int maxlen, void __user *p, int compat) +{ + int len = NBITS(maxbit) * sizeof(long); + + if (len > maxlen) + len = maxlen; + + return copy_to_user(p, bits, len) ? -EFAULT : len; +} + +#endif /* CONFIG_COMPAT */ + +static int str_to_user(const char *str, unsigned int maxlen, void __user *p) +{ + int len; + + if (!str) + return -ENOENT; + + len = strlen(str) + 1; + if (len > maxlen) + len = maxlen; + + return copy_to_user(p, str, len) ? -EFAULT : len; +} + +static long evdev_ioctl_handler(struct file *file, unsigned int cmd, + void __user *p, int compat_mode) +{ + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; + struct input_dev *dev = evdev->handle.dev; + struct input_absinfo abs; + struct ff_effect effect; + int __user *ip = (int __user *)p; + int i, t, u, v; + int error; + + if (!evdev->exist) + return -ENODEV; + + switch (cmd) { + + case EVIOCGVERSION: + return put_user(EV_VERSION, ip); + + case EVIOCGID: + if (copy_to_user(p, &dev->id, sizeof(struct input_id))) + return -EFAULT; + return 0; + + case EVIOCGREP: + if (!test_bit(EV_REP, dev->evbit)) + return -ENOSYS; + if (put_user(dev->rep[REP_DELAY], ip)) + return -EFAULT; + if (put_user(dev->rep[REP_PERIOD], ip + 1)) + return -EFAULT; + return 0; + + case EVIOCSREP: + if (!test_bit(EV_REP, dev->evbit)) + return -ENOSYS; + if (get_user(u, ip)) + return -EFAULT; + if (get_user(v, ip + 1)) + return -EFAULT; + + input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u); + input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v); + + return 0; + + case EVIOCGKEYCODE: + if (get_user(t, ip)) + return -EFAULT; + + error = dev->getkeycode(dev, t, &v); + if (error) + return error; + + if (put_user(v, ip + 1)) + return -EFAULT; + + return 0; + + case EVIOCSKEYCODE: + if (get_user(t, ip) || get_user(v, ip + 1)) + return -EFAULT; + + return dev->setkeycode(dev, t, v); + + case EVIOCSFF: + if (copy_from_user(&effect, p, sizeof(effect))) + return -EFAULT; + + error = input_ff_upload(dev, &effect, file); + + if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) + return -EFAULT; + + return error; + + case EVIOCRMFF: + return input_ff_erase(dev, (int)(unsigned long) p, file); + + case EVIOCGEFFECTS: + i = test_bit(EV_FF, dev->evbit) ? dev->ff->max_effects : 0; + if (put_user(i, ip)) + return -EFAULT; + return 0; + + case EVIOCGRAB: + if (p) { + if (evdev->grab) + return -EBUSY; + if (input_grab_device(&evdev->handle)) + return -EBUSY; + evdev->grab = client; + return 0; + } else { + if (evdev->grab != client) + return -EINVAL; + input_release_device(&evdev->handle); + evdev->grab = NULL; + return 0; + } + + default: + + if (_IOC_TYPE(cmd) != 'E') + return -EINVAL; + + if (_IOC_DIR(cmd) == _IOC_READ) { + + if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) { + + unsigned long *bits; + int len; + + switch (_IOC_NR(cmd) & EV_MAX) { + case 0: bits = dev->evbit; len = EV_MAX; break; + case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; + case EV_REL: bits = dev->relbit; len = REL_MAX; break; + case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; + case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; + case EV_LED: bits = dev->ledbit; len = LED_MAX; break; + case EV_SND: bits = dev->sndbit; len = SND_MAX; break; + case EV_FF: bits = dev->ffbit; len = FF_MAX; break; + case EV_SW: bits = dev->swbit; len = SW_MAX; break; + default: return -EINVAL; + } + return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode); + } + + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) + return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd), + p, compat_mode); + + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0))) + return bits_to_user(dev->led, LED_MAX, _IOC_SIZE(cmd), + p, compat_mode); + + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0))) + return bits_to_user(dev->snd, SND_MAX, _IOC_SIZE(cmd), + p, compat_mode); + + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0))) + return bits_to_user(dev->sw, SW_MAX, _IOC_SIZE(cmd), + p, compat_mode); + + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) + return str_to_user(dev->name, _IOC_SIZE(cmd), p); + + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) + return str_to_user(dev->phys, _IOC_SIZE(cmd), p); + + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) + return str_to_user(dev->uniq, _IOC_SIZE(cmd), p); + + if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { + + t = _IOC_NR(cmd) & ABS_MAX; + + abs.value = dev->abs[t]; + abs.minimum = dev->absmin[t]; + abs.maximum = dev->absmax[t]; + abs.fuzz = dev->absfuzz[t]; + abs.flat = dev->absflat[t]; + + if (copy_to_user(p, &abs, sizeof(struct input_absinfo))) + return -EFAULT; + + return 0; + } + + } + + if (_IOC_DIR(cmd) == _IOC_WRITE) { + + if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { + + t = _IOC_NR(cmd) & ABS_MAX; + + if (copy_from_user(&abs, p, sizeof(struct input_absinfo))) + return -EFAULT; + + dev->abs[t] = abs.value; + dev->absmin[t] = abs.minimum; + dev->absmax[t] = abs.maximum; + dev->absfuzz[t] = abs.fuzz; + dev->absflat[t] = abs.flat; + + return 0; + } + } + } + return -EINVAL; +} + +static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0); +} + +#ifdef CONFIG_COMPAT +static long evdev_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +{ + return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1); +} +#endif + +static const struct file_operations evdev_fops = { + .owner = THIS_MODULE, + .read = evdev_read, + .write = evdev_write, + .poll = evdev_poll, + .open = evdev_open, + .release = evdev_release, + .unlocked_ioctl = evdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = evdev_ioctl_compat, +#endif + .fasync = evdev_fasync, + .flush = evdev_flush +}; + +static int evdev_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) +{ + struct evdev *evdev; + struct class_device *cdev; + dev_t devt; + int minor; + int error; + + for (minor = 0; minor < EVDEV_MINORS && evdev_table[minor]; minor++); + if (minor == EVDEV_MINORS) { + printk(KERN_ERR "evdev: no more free evdev devices\n"); + return -ENFILE; + } + + evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL); + if (!evdev) + return -ENOMEM; + + INIT_LIST_HEAD(&evdev->client_list); + init_waitqueue_head(&evdev->wait); + + evdev->exist = 1; + evdev->minor = minor; + evdev->handle.dev = dev; + evdev->handle.name = evdev->name; + evdev->handle.handler = handler; + evdev->handle.private = evdev; + sprintf(evdev->name, "event%d", minor); + + evdev_table[minor] = evdev; + + devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor), + + cdev = class_device_create(&input_class, &dev->cdev, devt, + dev->cdev.dev, evdev->name); + if (IS_ERR(cdev)) { + error = PTR_ERR(cdev); + goto err_free_evdev; + } + + /* temporary symlink to keep userspace happy */ + error = sysfs_create_link(&input_class.subsys.kobj, + &cdev->kobj, evdev->name); + if (error) + goto err_cdev_destroy; + + error = input_register_handle(&evdev->handle); + if (error) + goto err_remove_link; + + return 0; + + err_remove_link: + sysfs_remove_link(&input_class.subsys.kobj, evdev->name); + err_cdev_destroy: + class_device_destroy(&input_class, devt); + err_free_evdev: + kfree(evdev); + evdev_table[minor] = NULL; + return error; +} + +static void evdev_disconnect(struct input_handle *handle) +{ + struct evdev *evdev = handle->private; + struct evdev_client *client; + + input_unregister_handle(handle); + + sysfs_remove_link(&input_class.subsys.kobj, evdev->name); + class_device_destroy(&input_class, + MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + evdev->minor)); + evdev->exist = 0; + + if (evdev->open) { + input_flush_device(handle, NULL); + input_close_device(handle); + list_for_each_entry(client, &evdev->client_list, node) + kill_fasync(&client->fasync, SIGIO, POLL_HUP); + wake_up_interruptible(&evdev->wait); + } else + evdev_free(evdev); +} + +static const struct input_device_id evdev_ids[] = { + { .driver_info = 1 }, /* Matches all devices */ + { }, /* Terminating zero entry */ +}; + +MODULE_DEVICE_TABLE(input, evdev_ids); + +static struct input_handler evdev_handler = { + .event = evdev_event, + .connect = evdev_connect, + .disconnect = evdev_disconnect, + .fops = &evdev_fops, + .minor = EVDEV_MINOR_BASE, + .name = "evdev", + .id_table = evdev_ids, +}; + +static int __init evdev_init(void) +{ + return input_register_handler(&evdev_handler); +} + +static void __exit evdev_exit(void) +{ + input_unregister_handler(&evdev_handler); +} + +module_init(evdev_init); +module_exit(evdev_exit); + +MODULE_AUTHOR("Vojtech Pavlik "); +MODULE_DESCRIPTION("Input driver event char devices"); +MODULE_LICENSE("GPL"); diff -urN linux-2.6.22.5/drivers/misc/Kconfig linux-2.6.22.5-android/drivers/misc/Kconfig --- linux-2.6.22.5/drivers/misc/Kconfig 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/drivers/misc/Kconfig 2007-11-20 09:03:43.000000000 +1100 @@ -187,5 +187,9 @@ If you are not sure, say Y here. +config LOW_MEMORY_KILLER + tristate "Low Memory Killer" + ---help--- + Register processes to be killed when memory is low. endmenu diff -urN linux-2.6.22.5/drivers/misc/Kconfig~ linux-2.6.22.5-android/drivers/misc/Kconfig~ --- linux-2.6.22.5/drivers/misc/Kconfig~ 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/misc/Kconfig~ 2007-11-20 08:46:07.724254589 +1100 @@ -0,0 +1,200 @@ +# +# Misc strange devices +# + +menu "Misc devices" + +config IBM_ASM + tristate "Device driver for IBM RSA service processor" + depends on X86 && PCI && EXPERIMENTAL + ---help--- + This option enables device driver support for in-band access to the + IBM RSA (Condor) service processor in eServer xSeries systems. + The ibmasm device driver allows user space application to access + ASM (Advanced Systems Management) functions on the service + processor. The driver is meant to be used in conjunction with + a user space API. + The ibmasm driver also enables the OS to use the UART on the + service processor board as a regular serial port. To make use of + this feature serial driver support (CONFIG_SERIAL_8250) must be + enabled. + + WARNING: This software may not be supported or function + correctly on your IBM server. Please consult the IBM ServerProven + website for + information on the specific driver level and support statement + for your IBM server. + +config PHANTOM + tristate "Sensable PHANToM" + depends on PCI + help + Say Y here if you want to build a driver for Sensable PHANToM device. + + If you choose to build module, its name will be phantom. If unsure, + say N here. + + + If unsure, say N. + +config SGI_IOC4 + tristate "SGI IOC4 Base IO support" + depends on PCI + ---help--- + This option enables basic support for the IOC4 chip on certain + SGI IO controller cards (IO9, IO10, and PCI-RT). This option + does not enable any specific functions on such a card, but provides + necessary infrastructure for other drivers to utilize. + + If you have an SGI Altix with an IOC4-based card say Y. + Otherwise say N. + +config TIFM_CORE + tristate "TI Flash Media interface support (EXPERIMENTAL)" + depends on EXPERIMENTAL && PCI + help + If you want support for Texas Instruments(R) Flash Media adapters + you should select this option and then also choose an appropriate + host adapter, such as 'TI Flash Media PCI74xx/PCI76xx host adapter + support', if you have a TI PCI74xx compatible card reader, for + example. + You will also have to select some flash card format drivers. MMC/SD + cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD + Interface support (MMC_TIFM_SD)'. + + To compile this driver as a module, choose M here: the module will + be called tifm_core. + +config TIFM_7XX1 + tristate "TI Flash Media PCI74xx/PCI76xx host adapter support (EXPERIMENTAL)" + depends on PCI && TIFM_CORE && EXPERIMENTAL + default TIFM_CORE + help + This option enables support for Texas Instruments(R) PCI74xx and + PCI76xx families of Flash Media adapters, found in many laptops. + To make actual use of the device, you will have to select some + flash card format drivers, as outlined in the TIFM_CORE Help. + + To compile this driver as a module, choose M here: the module will + be called tifm_7xx1. + +config ASUS_LAPTOP + tristate "Asus Laptop Extras (EXPERIMENTAL)" + depends on X86 + depends on ACPI + depends on EXPERIMENTAL && !ACPI_ASUS + depends on LEDS_CLASS + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is the new Linux driver for Asus laptops. It may also support some + MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate + standard ACPI events that go through /proc/acpi/events. It also adds + support for video output switching, LCD backlight control, Bluetooth and + Wlan control, and most importantly, allows you to blink those fancy LEDs. + + For more information and a userspace daemon for handling the extra + buttons see . + + If you have an ACPI-compatible ASUS laptop, say Y or M here. + +config MSI_LAPTOP + tristate "MSI Laptop Extras" + depends on X86 + depends on ACPI_EC + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is a driver for laptops built by MSI (MICRO-STAR + INTERNATIONAL): + + MSI MegaBook S270 (MS-1013) + Cytron/TCM/Medion/Tchibo MD96100/SAM2000 + + It adds support for Bluetooth, WLAN and LCD brightness control. + + More information about this driver is available at + . + + If you have an MSI S270 laptop, say Y or M here. + +config SONY_LAPTOP + tristate "Sony Laptop Extras" + depends on X86 && ACPI + select BACKLIGHT_CLASS_DEVICE + ---help--- + This mini-driver drives the SNC and SPIC devices present in the ACPI + BIOS of the Sony Vaio laptops. + + It gives access to some extra laptop functionalities like Bluetooth, + screen brightness control, Fn keys and allows powering on/off some + devices. + + Read for more information. + +config SONYPI_COMPAT + bool "Sonypi compatibility" + depends on SONY_LAPTOP + ---help--- + Build the sonypi driver compatibility code into the sony-laptop driver. + +config THINKPAD_ACPI + tristate "ThinkPad ACPI Laptop Extras" + depends on X86 && ACPI + select BACKLIGHT_CLASS_DEVICE + select HWMON + ---help--- + This is a driver for the IBM and Lenovo ThinkPad laptops. It adds + support for Fn-Fx key combinations, Bluetooth control, video + output switching, ThinkLight control, UltraBay eject and more. + For more information about this driver see + and . + + This driver was formely known as ibm-acpi. + + If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. + +config THINKPAD_ACPI_DEBUG + bool "Verbose debug mode" + depends on THINKPAD_ACPI + default n + ---help--- + Enables extra debugging information, at the expense of a slightly + increase in driver size. + + If you are not sure, say N here. + +config THINKPAD_ACPI_DOCK + bool "Legacy Docking Station Support" + depends on THINKPAD_ACPI + depends on ACPI_DOCK=n + default n + ---help--- + Allows the thinkpad_acpi driver to handle docking station events. + This support was made obsolete by the generic ACPI docking station + support (CONFIG_ACPI_DOCK). It will allow locking and removing the + laptop from the docking station, but will not properly connect PCI + devices. + + If you are not sure, say N here. + +config THINKPAD_ACPI_BAY + bool "Legacy Removable Bay Support" + depends on THINKPAD_ACPI + default y + ---help--- + Allows the thinkpad_acpi driver to handle removable bays. It will + eletrically disable the device in the bay, and also generate + notifications when the bay lever is ejected or inserted. + + If you are not sure, say Y here. + +config LOW_MEMORY_KILLER + tristate "Low Memory Killer" + ---help--- + Register processes to be killed when memory is low. + +config QEMU_TRACE + tristate "Virtual Device for QEMU tracing" + ---help--- + This is a virtual device for QEMU tracing. + +endmenu diff -urN linux-2.6.22.5/drivers/misc/Kconfig.orig linux-2.6.22.5-android/drivers/misc/Kconfig.orig --- linux-2.6.22.5/drivers/misc/Kconfig.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/misc/Kconfig.orig 2007-11-20 08:22:34.618987994 +1100 @@ -0,0 +1,191 @@ +# +# Misc strange devices +# + +menu "Misc devices" + +config IBM_ASM + tristate "Device driver for IBM RSA service processor" + depends on X86 && PCI && EXPERIMENTAL + ---help--- + This option enables device driver support for in-band access to the + IBM RSA (Condor) service processor in eServer xSeries systems. + The ibmasm device driver allows user space application to access + ASM (Advanced Systems Management) functions on the service + processor. The driver is meant to be used in conjunction with + a user space API. + The ibmasm driver also enables the OS to use the UART on the + service processor board as a regular serial port. To make use of + this feature serial driver support (CONFIG_SERIAL_8250) must be + enabled. + + WARNING: This software may not be supported or function + correctly on your IBM server. Please consult the IBM ServerProven + website for + information on the specific driver level and support statement + for your IBM server. + +config PHANTOM + tristate "Sensable PHANToM" + depends on PCI + help + Say Y here if you want to build a driver for Sensable PHANToM device. + + If you choose to build module, its name will be phantom. If unsure, + say N here. + + + If unsure, say N. + +config SGI_IOC4 + tristate "SGI IOC4 Base IO support" + depends on PCI + ---help--- + This option enables basic support for the IOC4 chip on certain + SGI IO controller cards (IO9, IO10, and PCI-RT). This option + does not enable any specific functions on such a card, but provides + necessary infrastructure for other drivers to utilize. + + If you have an SGI Altix with an IOC4-based card say Y. + Otherwise say N. + +config TIFM_CORE + tristate "TI Flash Media interface support (EXPERIMENTAL)" + depends on EXPERIMENTAL && PCI + help + If you want support for Texas Instruments(R) Flash Media adapters + you should select this option and then also choose an appropriate + host adapter, such as 'TI Flash Media PCI74xx/PCI76xx host adapter + support', if you have a TI PCI74xx compatible card reader, for + example. + You will also have to select some flash card format drivers. MMC/SD + cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD + Interface support (MMC_TIFM_SD)'. + + To compile this driver as a module, choose M here: the module will + be called tifm_core. + +config TIFM_7XX1 + tristate "TI Flash Media PCI74xx/PCI76xx host adapter support (EXPERIMENTAL)" + depends on PCI && TIFM_CORE && EXPERIMENTAL + default TIFM_CORE + help + This option enables support for Texas Instruments(R) PCI74xx and + PCI76xx families of Flash Media adapters, found in many laptops. + To make actual use of the device, you will have to select some + flash card format drivers, as outlined in the TIFM_CORE Help. + + To compile this driver as a module, choose M here: the module will + be called tifm_7xx1. + +config ASUS_LAPTOP + tristate "Asus Laptop Extras (EXPERIMENTAL)" + depends on X86 + depends on ACPI + depends on EXPERIMENTAL && !ACPI_ASUS + depends on LEDS_CLASS + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is the new Linux driver for Asus laptops. It may also support some + MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate + standard ACPI events that go through /proc/acpi/events. It also adds + support for video output switching, LCD backlight control, Bluetooth and + Wlan control, and most importantly, allows you to blink those fancy LEDs. + + For more information and a userspace daemon for handling the extra + buttons see . + + If you have an ACPI-compatible ASUS laptop, say Y or M here. + +config MSI_LAPTOP + tristate "MSI Laptop Extras" + depends on X86 + depends on ACPI_EC + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is a driver for laptops built by MSI (MICRO-STAR + INTERNATIONAL): + + MSI MegaBook S270 (MS-1013) + Cytron/TCM/Medion/Tchibo MD96100/SAM2000 + + It adds support for Bluetooth, WLAN and LCD brightness control. + + More information about this driver is available at + . + + If you have an MSI S270 laptop, say Y or M here. + +config SONY_LAPTOP + tristate "Sony Laptop Extras" + depends on X86 && ACPI + select BACKLIGHT_CLASS_DEVICE + ---help--- + This mini-driver drives the SNC and SPIC devices present in the ACPI + BIOS of the Sony Vaio laptops. + + It gives access to some extra laptop functionalities like Bluetooth, + screen brightness control, Fn keys and allows powering on/off some + devices. + + Read for more information. + +config SONYPI_COMPAT + bool "Sonypi compatibility" + depends on SONY_LAPTOP + ---help--- + Build the sonypi driver compatibility code into the sony-laptop driver. + +config THINKPAD_ACPI + tristate "ThinkPad ACPI Laptop Extras" + depends on X86 && ACPI + select BACKLIGHT_CLASS_DEVICE + select HWMON + ---help--- + This is a driver for the IBM and Lenovo ThinkPad laptops. It adds + support for Fn-Fx key combinations, Bluetooth control, video + output switching, ThinkLight control, UltraBay eject and more. + For more information about this driver see + and . + + This driver was formely known as ibm-acpi. + + If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. + +config THINKPAD_ACPI_DEBUG + bool "Verbose debug mode" + depends on THINKPAD_ACPI + default n + ---help--- + Enables extra debugging information, at the expense of a slightly + increase in driver size. + + If you are not sure, say N here. + +config THINKPAD_ACPI_DOCK + bool "Legacy Docking Station Support" + depends on THINKPAD_ACPI + depends on ACPI_DOCK=n + default n + ---help--- + Allows the thinkpad_acpi driver to handle docking station events. + This support was made obsolete by the generic ACPI docking station + support (CONFIG_ACPI_DOCK). It will allow locking and removing the + laptop from the docking station, but will not properly connect PCI + devices. + + If you are not sure, say N here. + +config THINKPAD_ACPI_BAY + bool "Legacy Removable Bay Support" + depends on THINKPAD_ACPI + default y + ---help--- + Allows the thinkpad_acpi driver to handle removable bays. It will + eletrically disable the device in the bay, and also generate + notifications when the bay lever is ejected or inserted. + + If you are not sure, say Y here. + + +endmenu diff -urN linux-2.6.22.5/drivers/misc/lowmemorykiller/lowmemorykiller.c linux-2.6.22.5-android/drivers/misc/lowmemorykiller/lowmemorykiller.c --- linux-2.6.22.5/drivers/misc/lowmemorykiller/lowmemorykiller.c 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/misc/lowmemorykiller/lowmemorykiller.c 2007-11-20 08:46:07.724254589 +1100 @@ -0,0 +1,119 @@ +/* drivers/misc/lowmemorykiller/lowmemorykiller.c +** +** Copyright (C) 2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#include +#include +#include +#include +#include + +static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask); + +static struct shrinker lowmem_shrinker = { + .shrink = lowmem_shrink, + .seeks = DEFAULT_SEEKS * 16 +}; +static uint32_t lowmem_debug_level = 2; +static int lowmem_adj[6] = { + 0, + 1, + 6, + 12, +}; +static int lowmem_adj_size = 4; +static size_t lowmem_minfree[6] = { + 3*512, // 6MB + 2*1024, // 8MB + 4*1024, // 16MB + 16*1024, // 64MB +}; +static int lowmem_minfree_size = 4; + +#define lowmem_print(level, x...) do { if(lowmem_debug_level >= (level)) printk(x); } while(0) + +module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); +module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, S_IRUGO | S_IWUSR); +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR); +module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); + +static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) +{ + struct task_struct *p; + struct task_struct *selected = NULL; + int rem = 0; + int tasksize; + int i; + int min_adj = OOM_ADJUST_MAX + 1; + int selected_tasksize = 0; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free = global_page_state(NR_FREE_PAGES) + global_page_state(NR_FILE_PAGES); + if(lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if(lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + for(i = 0; i < array_size; i++) { + if(other_free < lowmem_minfree[i]) { + min_adj = lowmem_adj[i]; + break; + } + } + if(nr_to_scan > 0) + lowmem_print(3, "lowmem_shrink %d, %x, ofree %d, ma %d\n", nr_to_scan, gfp_mask, other_free, min_adj); + read_lock(&tasklist_lock); + for_each_process(p) { + if(p->oomkilladj >= 0 && p->mm) { + tasksize = get_mm_rss(p->mm); + if(nr_to_scan > 0 && tasksize > 0 && p->oomkilladj >= min_adj) { + if(selected == NULL || + p->oomkilladj > selected->oomkilladj || + (p->oomkilladj == selected->oomkilladj && + tasksize > selected_tasksize)) { + selected = p; + selected_tasksize = tasksize; + lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", + p->pid, p->comm, p->oomkilladj, tasksize); + } + } + rem += tasksize; + } + } + if(selected != NULL) { + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected->oomkilladj, selected_tasksize); + force_sig(SIGKILL, selected); + rem -= selected_tasksize; + } + lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem); + read_unlock(&tasklist_lock); + return rem; +} + +static int __init lowmem_init(void) +{ + register_shrinker(&lowmem_shrinker); + return 0; +} + +static void __exit lowmem_exit(void) +{ + unregister_shrinker(&lowmem_shrinker); +} + +module_init(lowmem_init); +module_exit(lowmem_exit); + +MODULE_LICENSE("GPL"); + diff -urN linux-2.6.22.5/drivers/misc/lowmemorykiller/Makefile linux-2.6.22.5-android/drivers/misc/lowmemorykiller/Makefile --- linux-2.6.22.5/drivers/misc/lowmemorykiller/Makefile 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/misc/lowmemorykiller/Makefile 2007-11-20 08:46:07.724254589 +1100 @@ -0,0 +1 @@ +obj-$(CONFIG_LOW_MEMORY_KILLER) := lowmemorykiller.o diff -urN linux-2.6.22.5/drivers/misc/Makefile linux-2.6.22.5-android/drivers/misc/Makefile --- linux-2.6.22.5/drivers/misc/Makefile 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/drivers/misc/Makefile 2007-11-20 08:50:19.000000000 +1100 @@ -14,3 +14,4 @@ obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o +obj-$(CONFIG_LOW_MEMORY_KILLER) += lowmemorykiller/ diff -urN linux-2.6.22.5/drivers/misc/Makefile.orig linux-2.6.22.5-android/drivers/misc/Makefile.orig --- linux-2.6.22.5/drivers/misc/Makefile.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/misc/Makefile.orig 2007-11-20 08:22:34.618987994 +1100 @@ -0,0 +1,16 @@ +# +# Makefile for misc devices that really don't fit anywhere else. +# +obj- := misc.o # Dummy rule to force built-in.o to be made + +obj-$(CONFIG_IBM_ASM) += ibmasm/ +obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ +obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o +obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o +obj-$(CONFIG_LKDTM) += lkdtm.o +obj-$(CONFIG_TIFM_CORE) += tifm_core.o +obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o +obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_SGI_IOC4) += ioc4.o +obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o +obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o diff -urN linux-2.6.22.5/drivers/usb/gadget/Makefile linux-2.6.22.5-android/drivers/usb/gadget/Makefile --- linux-2.6.22.5/drivers/usb/gadget/Makefile 2007-11-20 08:03:54.859360319 +1100 +++ linux-2.6.22.5-android/drivers/usb/gadget/Makefile 2007-11-20 08:46:07.724254589 +1100 @@ -22,6 +22,9 @@ g_file_storage-objs := file_storage.o usbstring.o config.o \ epautoconf.o +# needed for drivers/android/android_gadget.c +obj-$(CONFIG_ANDROID_GADGET) += config.o epautoconf.o usbstring.o + ifeq ($(CONFIG_USB_ETH_RNDIS),y) g_ether-objs += rndis.o endif diff -urN linux-2.6.22.5/drivers/usb/gadget/Makefile.orig linux-2.6.22.5-android/drivers/usb/gadget/Makefile.orig --- linux-2.6.22.5/drivers/usb/gadget/Makefile.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/drivers/usb/gadget/Makefile.orig 2007-11-20 08:23:04.310569113 +1100 @@ -0,0 +1,35 @@ +# +# USB peripheral controller drivers +# +obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o +obj-$(CONFIG_USB_NET2280) += net2280.o +obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o +obj-$(CONFIG_USB_GOKU) += goku_udc.o +obj-$(CONFIG_USB_OMAP) += omap_udc.o +obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o +obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o +obj-$(CONFIG_USB_AT91) += at91_udc.o +obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o + +# +# USB gadget drivers +# +g_zero-objs := zero.o usbstring.o config.o epautoconf.o +g_ether-objs := ether.o usbstring.o config.o epautoconf.o +g_serial-objs := serial.o usbstring.o config.o epautoconf.o +g_midi-objs := gmidi.o usbstring.o config.o epautoconf.o +gadgetfs-objs := inode.o +g_file_storage-objs := file_storage.o usbstring.o config.o \ + epautoconf.o + +ifeq ($(CONFIG_USB_ETH_RNDIS),y) + g_ether-objs += rndis.o +endif + +obj-$(CONFIG_USB_ZERO) += g_zero.o +obj-$(CONFIG_USB_ETH) += g_ether.o +obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o +obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o +obj-$(CONFIG_USB_G_SERIAL) += g_serial.o +obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o + diff -urN linux-2.6.22.5/fs/inotify_user.c linux-2.6.22.5-android/fs/inotify_user.c --- linux-2.6.22.5/fs/inotify_user.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/fs/inotify_user.c 2007-11-20 08:46:07.724254589 +1100 @@ -31,6 +31,9 @@ #include #include #include +#ifdef CONFIG_ANDROID_POWER +#include +#endif #include @@ -81,6 +84,9 @@ unsigned int queue_size; /* size of the queue (bytes) */ unsigned int event_count; /* number of pending events */ unsigned int max_events; /* maximum number of events */ +#ifdef CONFIG_ANDROID_POWER + android_suspend_lock_t suspend_lock; +#endif }; /* @@ -157,6 +163,9 @@ if (atomic_dec_and_test(&dev->count)) { atomic_dec(&dev->user->inotify_devs); free_uid(dev->user); +#ifdef CONFIG_ANDROID_POWER + android_uninit_suspend_lock(&dev->suspend_lock); +#endif kfree(dev); } } @@ -301,6 +310,9 @@ dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; list_add_tail(&kevent->list, &dev->events); wake_up_interruptible(&dev->wq); +#ifdef CONFIG_ANDROID_POWER + android_lock_suspend_auto_expire(&dev->suspend_lock, 5 * HZ); +#endif out: mutex_unlock(&dev->ev_mutex); @@ -318,6 +330,10 @@ dev->event_count--; dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; +#ifdef CONFIG_ANDROID_POWER + if(dev->event_count == 0) + android_unlock_suspend(&dev->suspend_lock); +#endif kfree(kevent->name); kmem_cache_free(event_cachep, kevent); @@ -594,6 +610,10 @@ dev->max_events = inotify_max_queued_events; dev->user = user; atomic_set(&dev->count, 0); +#ifdef CONFIG_ANDROID_POWER + dev->suspend_lock.name = "inotify"; + android_init_suspend_lock(&dev->suspend_lock); +#endif get_inotify_dev(dev); atomic_inc(&user->inotify_devs); diff -urN linux-2.6.22.5/include/asm/elf.h linux-2.6.22.5-android/include/asm/elf.h --- linux-2.6.22.5/include/asm/elf.h 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/include/asm/elf.h 2007-11-20 08:46:07.724254589 +1100 @@ -80,6 +80,11 @@ #define ELF_PLATFORM (elf_platform) extern char elf_platform[]; + +struct task_struct; + +extern int dump_task_regs (struct task_struct *, elf_gregset_t *); + #endif /* @@ -134,6 +139,9 @@ } \ } while (0) +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) + + #endif #endif diff -urN linux-2.6.22.5/include/asm/elf.h.orig linux-2.6.22.5-android/include/asm/elf.h.orig --- linux-2.6.22.5/include/asm/elf.h.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/include/asm/elf.h.orig 2007-11-20 08:23:30.771978221 +1100 @@ -0,0 +1,139 @@ +#ifndef __ASMARM_ELF_H +#define __ASMARM_ELF_H + +#ifndef __ASSEMBLY__ +/* + * ELF register definitions.. + */ +#include +#include + +typedef unsigned long elf_greg_t; +typedef unsigned long elf_freg_t[3]; + +#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct user_fp elf_fpregset_t; +#endif + +#define EM_ARM 40 +#define EF_ARM_APCS26 0x08 +#define EF_ARM_SOFT_FLOAT 0x200 +#define EF_ARM_EABI_MASK 0xFF000000 + +#define R_ARM_NONE 0 +#define R_ARM_PC24 1 +#define R_ARM_ABS32 2 +#define R_ARM_CALL 28 +#define R_ARM_JUMP24 29 + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#ifdef __ARMEB__ +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB +#endif +#define ELF_ARCH EM_ARM + +/* + * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP + */ +#define HWCAP_SWP 1 +#define HWCAP_HALF 2 +#define HWCAP_THUMB 4 +#define HWCAP_26BIT 8 /* Play it safe */ +#define HWCAP_FAST_MULT 16 +#define HWCAP_FPA 32 +#define HWCAP_VFP 64 +#define HWCAP_EDSP 128 +#define HWCAP_JAVA 256 +#define HWCAP_IWMMXT 512 +#define HWCAP_CRUNCH 1024 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +/* + * This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +#define ELF_HWCAP (elf_hwcap) +extern unsigned int elf_hwcap; + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + * + * For now we just provide a fairly general string that describes the + * processor family. This could be made more specific later if someone + * implemented optimisations that require it. 26-bit CPUs give you + * "v1l" for ARM2 (no SWP) and "v2l" for anything else (ARM1 isn't + * supported). 32-bit CPUs give you "v3[lb]" for anything based on an + * ARM6 or ARM7 core and "armv4[lb]" for anything based on a StrongARM-1 + * core. + */ +#define ELF_PLATFORM_SIZE 8 +#define ELF_PLATFORM (elf_platform) + +extern char elf_platform[]; +#endif + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x)) + +/* + * 32-bit code is always OK. Some cpus can do 26-bit, some can't. + */ +#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x)) + +#define ELF_THUMB_OK(x) \ + ((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \ + ((x)->e_entry & 3) == 0) + +#define ELF_26BIT_OK(x) \ + ((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \ + ((x)->e_flags & EF_ARM_APCS26) == 0) + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +/* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we + have no such handler. */ +#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 + +/* + * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 + * and CP1, we only enable access to the iWMMXt coprocessor if the + * binary is EABI or softfloat (and thus, guaranteed not to use + * FPA instructions.) + */ +#define SET_PERSONALITY(ex, ibcs2) \ + do { \ + if ((ex).e_flags & EF_ARM_APCS26) { \ + set_personality(PER_LINUX); \ + } else { \ + set_personality(PER_LINUX_32BIT); \ + if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \ + set_thread_flag(TIF_USING_IWMMXT); \ + else \ + clear_thread_flag(TIF_USING_IWMMXT); \ + } \ + } while (0) + +#endif + +#endif diff -urN linux-2.6.22.5/include/asm-arm/elf.h linux-2.6.22.5-android/include/asm-arm/elf.h --- linux-2.6.22.5/include/asm-arm/elf.h 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/include/asm-arm/elf.h 2007-11-20 08:46:07.724254589 +1100 @@ -80,6 +80,11 @@ #define ELF_PLATFORM (elf_platform) extern char elf_platform[]; + +struct task_struct; + +extern int dump_task_regs (struct task_struct *, elf_gregset_t *); + #endif /* @@ -134,6 +139,9 @@ } \ } while (0) +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) + + #endif #endif diff -urN linux-2.6.22.5/include/asm-arm/elf.h.orig linux-2.6.22.5-android/include/asm-arm/elf.h.orig --- linux-2.6.22.5/include/asm-arm/elf.h.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/include/asm-arm/elf.h.orig 2007-11-20 08:23:30.771978221 +1100 @@ -0,0 +1,139 @@ +#ifndef __ASMARM_ELF_H +#define __ASMARM_ELF_H + +#ifndef __ASSEMBLY__ +/* + * ELF register definitions.. + */ +#include +#include + +typedef unsigned long elf_greg_t; +typedef unsigned long elf_freg_t[3]; + +#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct user_fp elf_fpregset_t; +#endif + +#define EM_ARM 40 +#define EF_ARM_APCS26 0x08 +#define EF_ARM_SOFT_FLOAT 0x200 +#define EF_ARM_EABI_MASK 0xFF000000 + +#define R_ARM_NONE 0 +#define R_ARM_PC24 1 +#define R_ARM_ABS32 2 +#define R_ARM_CALL 28 +#define R_ARM_JUMP24 29 + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#ifdef __ARMEB__ +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB +#endif +#define ELF_ARCH EM_ARM + +/* + * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP + */ +#define HWCAP_SWP 1 +#define HWCAP_HALF 2 +#define HWCAP_THUMB 4 +#define HWCAP_26BIT 8 /* Play it safe */ +#define HWCAP_FAST_MULT 16 +#define HWCAP_FPA 32 +#define HWCAP_VFP 64 +#define HWCAP_EDSP 128 +#define HWCAP_JAVA 256 +#define HWCAP_IWMMXT 512 +#define HWCAP_CRUNCH 1024 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +/* + * This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +#define ELF_HWCAP (elf_hwcap) +extern unsigned int elf_hwcap; + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + * + * For now we just provide a fairly general string that describes the + * processor family. This could be made more specific later if someone + * implemented optimisations that require it. 26-bit CPUs give you + * "v1l" for ARM2 (no SWP) and "v2l" for anything else (ARM1 isn't + * supported). 32-bit CPUs give you "v3[lb]" for anything based on an + * ARM6 or ARM7 core and "armv4[lb]" for anything based on a StrongARM-1 + * core. + */ +#define ELF_PLATFORM_SIZE 8 +#define ELF_PLATFORM (elf_platform) + +extern char elf_platform[]; +#endif + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x)) + +/* + * 32-bit code is always OK. Some cpus can do 26-bit, some can't. + */ +#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x)) + +#define ELF_THUMB_OK(x) \ + ((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \ + ((x)->e_entry & 3) == 0) + +#define ELF_26BIT_OK(x) \ + ((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \ + ((x)->e_flags & EF_ARM_APCS26) == 0) + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + +/* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we + have no such handler. */ +#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 + +/* + * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 + * and CP1, we only enable access to the iWMMXt coprocessor if the + * binary is EABI or softfloat (and thus, guaranteed not to use + * FPA instructions.) + */ +#define SET_PERSONALITY(ex, ibcs2) \ + do { \ + if ((ex).e_flags & EF_ARM_APCS26) { \ + set_personality(PER_LINUX); \ + } else { \ + set_personality(PER_LINUX_32BIT); \ + if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \ + set_thread_flag(TIF_USING_IWMMXT); \ + else \ + clear_thread_flag(TIF_USING_IWMMXT); \ + } \ + } while (0) + +#endif + +#endif diff -urN linux-2.6.22.5/include/linux/android_alarm.h linux-2.6.22.5-android/include/linux/android_alarm.h --- linux-2.6.22.5/include/linux/android_alarm.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/include/linux/android_alarm.h 2007-11-20 08:46:07.734255122 +1100 @@ -0,0 +1,55 @@ +/* include/linux/android_alarm.h +** +** Copyright (C) 2006-2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_ANDROID_ALARM_H +#define _LINUX_ANDROID_ALARM_H + +#include +#include + +typedef enum { + // return code bit numbers or set alarm arg + ANDROID_ALARM_RTC_WAKEUP, + ANDROID_ALARM_RTC, + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, + ANDROID_ALARM_ELAPSED_REALTIME, + ANDROID_ALARM_SYSTEMTIME, + // + ANDROID_ALARM_TYPE_COUNT, + + // return code bit numbers +// ANDROID_ALARM_TIME_CHANGE = 16 +} android_alarm_type_t; + +typedef enum { + ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, + ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, + ANDROID_ALARM_ELAPSED_REALTIME_MASK = 1U << ANDROID_ALARM_ELAPSED_REALTIME, + ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, + ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 +} android_alarm_return_flags_t; + +#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4)) // diasable alarm +#define ANDROID_ALARM_WAIT _IO('a', 1) // ack last alarm and wait for next +#define ANDROID_ALARM_SET(type) _IOW('a', 2 | ((type) << 4), struct timespec) // set alarm +#define ANDROID_ALARM_SET_AND_WAIT(type) _IOW('a', 3 | ((type) << 4), struct timespec) +#define ANDROID_ALARM_GET_TIME(type) _IOW('a', 4 | ((type) << 4), struct timespec) +#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) + +#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) +#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) + +#endif diff -urN linux-2.6.22.5/include/linux/android_gadget.h linux-2.6.22.5-android/include/linux/android_gadget.h --- linux-2.6.22.5/include/linux/android_gadget.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/include/linux/android_gadget.h 2007-11-20 08:46:07.734255122 +1100 @@ -0,0 +1,88 @@ +/* include/linux/android_gadget.h +** +** Copyright (C) 2006-2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_ANDROID_GADGET_H +#define _LINUX_ANDROID_GADGET_H + + +/* used for ANDROID_GADGET_OPEN_FILE */ +struct android_gadget_file_open { + const char* path; + uint32_t path_length; // strlen(path) + uint32_t flags; + uint32_t mode; +}; + + +/* + * Used for optimized file copy ioctls ANDROID_GADGET_READ_TO_FILE and ANDROID_GADGET_WRITE_FROM_FILE. + * File's seek position must be set to the proper location before calling ioctl. +*/ +struct android_gadget_file_copy { + uint32_t fd; + uint32_t length; + uint64_t offset; +}; + + +/* Set number of UMS devices, for USB_BULK_GET_MAX_LUN_REQUEST SETUP request */ +#define ANDROID_GADGET_SET_UMS_DEVICE_COUNT _IO('g',1) + +/* Sets USB configuration settings */ +#define ANDROID_GADGET_SET_MANUFACTURER_NAME _IO('g',2) +#define ANDROID_GADGET_SET_PRODUCT_NAME _IO('g',3) +#define ANDROID_GADGET_SET_SERIAL _IO('g',4) +#define ANDROID_GADGET_SET_VENDOR_ID _IO('g',5) +#define ANDROID_GADGET_SET_PRODUCT_ID _IO('g',6) +#define ANDROID_GADGET_SET_COMPOSITE_PRODUCT_ID _IO('g',19) + +/* Enables ADB interface */ +#define ANDROID_GADGET_ENABLE_ADB _IO('g',7) + +/* Enables mass storage interface */ +#define ANDROID_GADGET_ENABLE_UMS _IO('g',8) + +/* Enables kernel debug interface */ +#define ANDROID_GADGET_ENABLE_KDBG _IO('g',18) + +/* Enables MTP support */ +#define ANDROID_GADGET_ENABLE_MTP _IO('g',21) + +/* + * Enable or disable USB, depending on boolean argument. + * Do this after all other configuation is complete + */ +#define ANDROID_GADGET_ENABLE_USB _IO('g',9) + +/* used to open a file within the kernel. ioctl returns fd or error */ +#define ANDROID_GADGET_OPEN_FILE _IO('g',10) + +/* used to close a file opened with ANDROID_GADGET_OPEN_FILE */ +#define ANDROID_GADGET_CLOSE_FILE _IO('g',11) + +/* used to copy data from USB to a local file */ +#define ANDROID_GADGET_READ_TO_FILE _IO('g',12) + +/* used to copy data from a local file to USB */ +#define ANDROID_GADGET_WRITE_FROM_FILE _IO('g',13) + +/* Used to query current state of android_gadget */ +#define ANDROID_GADGET_IS_CONNECTED _IO('g',14) +#define ANDROID_GADGET_IS_ADB_ENABLED _IO('g',15) +#define ANDROID_GADGET_IS_UMS_ENABLED _IO('g',16) +#define ANDROID_GADGET_IS_CONFIGURED _IO('g',17) +#define ANDROID_GADGET_IS_KDBG_ENABLED _IO('g',20) + +#endif /* _LINUX_ANDROID_GADGET_H */ diff -urN linux-2.6.22.5/include/linux/android_power.h linux-2.6.22.5-android/include/linux/android_power.h --- linux-2.6.22.5/include/linux/android_power.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/include/linux/android_power.h 2007-11-20 08:46:07.734255122 +1100 @@ -0,0 +1,89 @@ +/* include/linux/android_power.h +** +** Copyright (C) 2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_ANDROID_POWER_H +#define _LINUX_ANDROID_POWER_H + +#include +#include + +typedef struct +{ + struct list_head link; + int flags; + const char *name; + int expires; +#ifdef CONFIG_ANDROID_POWER_STAT + struct { + int count; + int expire_count; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + } stat; +#endif +} android_suspend_lock_t; + +#if 0 // none of these flags are implemented +#define ANDROID_SUSPEND_LOCK_FLAG_COUNTED (1U << 0) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_READABLE (1U << 1) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_SET (1U << 2) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_CLEAR (1U << 3) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_INC (1U << 4) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_DEC (1U << 5) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_VISIBLE_MASK (0x1fU << 1) +#endif +#define ANDROID_SUSPEND_LOCK_AUTO_EXPIRE (1U << 6) +#define ANDROID_SUSPEND_LOCK_ACTIVE (1U << 7) + + +typedef struct android_early_suspend android_early_suspend_t; +struct android_early_suspend +{ + struct list_head link; + int level; + void (*suspend)(android_early_suspend_t *h); + void (*resume)(android_early_suspend_t *h); +}; + +typedef enum { + ANDROID_CHARGING_STATE_UNKNOWN, + ANDROID_CHARGING_STATE_DISCHARGE, + ANDROID_CHARGING_STATE_MAINTAIN, // or trickle + ANDROID_CHARGING_STATE_SLOW, + ANDROID_CHARGING_STATE_NORMAL, + ANDROID_CHARGING_STATE_FAST, + ANDROID_CHARGING_STATE_OVERHEAT +} android_charging_state_t; + +//android_suspend_lock_t *android_allocate_suspend_lock(const char *debug_name); +//void android_free_suspend_lock(android_suspend_lock_t *lock); +int android_init_suspend_lock(android_suspend_lock_t *lock); +void android_uninit_suspend_lock(android_suspend_lock_t *lock); +void android_lock_suspend(android_suspend_lock_t *lock); +void android_lock_suspend_auto_expire(android_suspend_lock_t *lock, int timeout); +void android_unlock_suspend(android_suspend_lock_t *lock); +void android_power_wakeup(int notification); /* notification = 0: normal wakeup, notification = 1: temporary wakeup */ + +int android_power_is_driver_suspended(void); + +void android_register_early_suspend(android_early_suspend_t *handler); +void android_unregister_early_suspend(android_early_suspend_t *handler); + +void android_power_set_battery_level(int level); // level 0-100 +void android_power_set_charging_state(android_charging_state_t state); + +#endif + diff -urN linux-2.6.22.5/include/linux/binder_module.h linux-2.6.22.5-android/include/linux/binder_module.h --- linux-2.6.22.5/include/linux/binder_module.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/include/linux/binder_module.h 2007-11-20 08:46:07.734255122 +1100 @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed as described in the file LICENSE, which + * you should have received as part of this distribution. The terms + * are also available at http://www.openbinder.org/license.html. + * + * This software consists of voluntary contributions made by many + * individuals. For the exact contribution history, see the revision + * history and logs, available at http://www.openbinder.org + */ + +#ifndef _BINDER_MODULE_H_ +#define _BINDER_MODULE_H_ + +#include + +#if TARGET_HOST == TARGET_HOST_PALMOS +#include +#include +#endif + +#ifdef __cplusplus +#if _SUPPORTS_NAMESPACE +namespace openbinder { +namespace support { +#endif +#endif + +// These are pre-packed type constants for the object type codes. +enum { + kPackedLargeBinderType = B_PACK_LARGE_TYPE(B_BINDER_TYPE), + kPackedLargeBinderWeakType = B_PACK_LARGE_TYPE(B_BINDER_WEAK_TYPE), + kPackedLargeBinderHandleType = B_PACK_LARGE_TYPE(B_BINDER_HANDLE_TYPE), + kPackedLargeBinderWeakHandleType = B_PACK_LARGE_TYPE(B_BINDER_WEAK_HANDLE_TYPE), + kPackedLargeBinderNodeType = B_PACK_LARGE_TYPE(B_BINDER_NODE_TYPE), + kPackedLargeBinderWeakNodeType = B_PACK_LARGE_TYPE(B_BINDER_WEAK_NODE_TYPE), +}; + +// Internal data structure used by driver. +struct binder_node; + +// This is the flattened representation of a Binder object for transfer +// between processes. The 'offsets' supplied as part of a binder transaction +// contains offsets into the data where these structures occur. The Binder +// driver takes care of re-writing the structure type and data as it moves +// between processes. +// +// Note that this is very intentionally designed to be the same as a user-space +// large_flat_data structure holding 8 bytes. The IPC mechanism requires that +// this structure be at least 8 bytes large. +typedef struct flat_binder_object +{ + // 8 bytes for large_flat_header. + unsigned long type; + unsigned long length; + + // 8 bytes of data. + union { + void* binder; // local object + signed long handle; // remote object + struct binder_node* node; // driver node + }; + void* cookie; // extra data associated with local object +} flat_binder_object_t; + +/* + * On 64-bit platforms where user code may run in 32-bits the driver must + * translate the buffer (and local binder) addresses apropriately. + */ + +typedef struct binder_write_read { + signed long write_size; // bytes to write + signed long write_consumed; // bytes consumed by driver (for ERESTARTSYS) + unsigned long write_buffer; + signed long read_size; // bytes to read + signed long read_consumed; // bytes consumed by driver (for ERESTARTSYS) + unsigned long read_buffer; +} binder_write_read_t; + +// Use with BINDER_VERSION, driver fills in fields. +typedef struct binder_version { + signed long protocol_version; // driver protocol version -- increment with incompatible change +} binder_version_t; + +// This is the current protocol version. +#define BINDER_CURRENT_PROTOCOL_VERSION 5 + +#define BINDER_IOC_MAGIC 'b' +#define BINDER_WRITE_READ _IOWR(BINDER_IOC_MAGIC, 1, binder_write_read_t) +#define BINDER_SET_WAKEUP_TIME _IOW(BINDER_IOC_MAGIC, 2, binder_wakeup_time_t) +#define BINDER_SET_IDLE_TIMEOUT _IOW(BINDER_IOC_MAGIC, 3, bigtime_t) +#define BINDER_SET_REPLY_TIMEOUT _IOW(BINDER_IOC_MAGIC, 4, bigtime_t) +#define BINDER_SET_MAX_THREADS _IOW(BINDER_IOC_MAGIC, 5, size_t) +#define BINDER_SET_IDLE_PRIORITY _IOW(BINDER_IOC_MAGIC, 6, int) +#define BINDER_SET_CONTEXT_MGR _IOW(BINDER_IOC_MAGIC, 7, int) +#define BINDER_THREAD_EXIT _IOW(BINDER_IOC_MAGIC, 8, int) +#define BINDER_VERSION _IOWR(BINDER_IOC_MAGIC, 9, binder_version_t) +#define BINDER_IOC_MAXNR 9 + +// NOTE: Two special error codes you should check for when calling +// in to the driver are: +// +// EINTR -- The operation has been interupted. This should be +// handled by retrying the ioctl() until a different error code +// is returned. +// +// ECONNREFUSED -- The driver is no longer accepting operations +// from your process. That is, the process is being destroyed. +// You should handle this by exiting from your process. Note +// that once this error code is returned, all further calls to +// the driver from any thread will return this same code. + +typedef int64_t bigtime_t; + +enum transaction_flags { + tfInline = 0x01, // not yet implemented + tfSynchronous = 0x02, // obsolete + tfRootObject = 0x04, // contents are the component's root object + tfStatusCode = 0x08 // contents are a 32-bit status code +}; + +typedef struct binder_transaction_data +{ + // The first two are only used for bcTRANSACTION and brTRANSACTION, + // identifying the target and contents of the transaction. + union { + unsigned long handle; // target descriptor of command transaction + void *ptr; // target descriptor of return transaction + } target; + void* cookie; // target object cookie + unsigned int code; // transaction command + + // General information about the transaction. + unsigned int flags; + int priority; // requested/current thread priority + size_t data_size; // number of bytes of data + size_t offsets_size; // number of bytes of flat_binder_object offsets + + // If this transaction is inline, the data immediately + // follows here; otherwise, it ends with a pointer to + // the data buffer. + union { + struct { + const void *buffer; // transaction data + const void *offsets; // offsets to flat_binder_object structs + } ptr; + uint8_t buf[8]; + } data; +} binder_transaction_data_t; + +typedef struct binder_wakeup_time +{ + bigtime_t time; + int priority; +} binder_wakeup_time_t; + +enum BinderDriverReturnProtocol { + brERROR = -1, + /* + int: error code + */ + + brOK = 0, + brTIMEOUT, + brWAKEUP, + /* No parameters! */ + + brTRANSACTION, + brREPLY, + /* + binder_transaction_data: the received command. + */ + + brACQUIRE_RESULT, + /* + int: 0 if the last bcATTEMPT_ACQUIRE was not successful. + Else the remote object has acquired a primary reference. + */ + + brDEAD_REPLY, + /* + The target of the last transaction (either a bcTRANSACTION or + a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. + */ + + brTRANSACTION_COMPLETE, + /* + No parameters... always refers to the last transaction requested + (including replies). Note that this will be sent even for asynchronous + transactions. + */ + + brINCREFS, + brACQUIRE, + brRELEASE, + brDECREFS, + /* + void *: ptr to binder + void *: cookie for binder + */ + + brATTEMPT_ACQUIRE, + /* + int: priority + void *: ptr to binder + void *: cookie for binder + */ + + brEVENT_OCCURRED, + /* + This is returned when the bcSET_NEXT_EVENT_TIME has elapsed. + At this point the next event time is set to B_INFINITE_TIMEOUT, + so you must send another bcSET_NEXT_EVENT_TIME command if you + have another event pending. + */ + + brNOOP, + /* + * No parameters. Do nothing and examine the next command. It exists + * primarily so that we can replace it with a brSPAWN_LOOPER command. + */ + + brSPAWN_LOOPER, + /* + * No parameters. The driver has determined that a process has no threads + * waiting to service incomming transactions. When a process receives this + * command, it must spawn a new service thread and register it via + * bcENTER_LOOPER. + */ + + brFINISHED, + + brDEAD_BINDER, + /* + void *: cookie + */ + brCLEAR_DEATH_NOTIFICATION_DONE, + /* + void *: cookie + */ + + brFAILED_REPLY + /* + The the last transaction (either a bcTRANSACTION or + a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. + */ +}; + +enum BinderDriverCommandProtocol { + bcNOOP = 0, + /* No parameters! */ + + bcTRANSACTION, + bcREPLY, + /* + binder_transaction_data: the sent command. + */ + + bcACQUIRE_RESULT, + /* + int: 0 if the last brATTEMPT_ACQUIRE was not successful. + Else you have acquired a primary reference on the object. + */ + + bcFREE_BUFFER, + /* + void *: ptr to transaction data received on a read + */ + + bcTRANSACTION_COMPLETE, + /* + No parameters... send when finishing an asynchronous transaction. + */ + + bcINCREFS, + bcACQUIRE, + bcRELEASE, + bcDECREFS, + /* + int: descriptor + */ + + bcINCREFS_DONE, + bcACQUIRE_DONE, + /* + void *: ptr to binder + void *: cookie for binder + */ + + bcATTEMPT_ACQUIRE, + /* + int: priority + int: descriptor + */ + + bcRETRIEVE_ROOT_OBJECT, + /* + int: process ID + */ + + bcSET_THREAD_ENTRY, + /* + void *: thread entry function for new threads created to handle tasks + void *: argument passed to those threads + */ + + bcREGISTER_LOOPER, + /* + No parameters. + Register a spawned looper thread with the device. This must be + called by the function that is supplied in bcSET_THREAD_ENTRY as + part of its initialization with the binder. + */ + + bcENTER_LOOPER, + bcEXIT_LOOPER, + /* + No parameters. + These two commands are sent as an application-level thread + enters and exits the binder loop, respectively. They are + used so the binder can have an accurate count of the number + of looping threads it has available. + */ + + bcSYNC, + /* + No parameters. + Upon receiving this command, the driver waits until all + pending asynchronous transactions have completed. + */ + +#if 0 + bcCATCH_ROOT_OBJECTS, + /* + No parameters. + Call this to have your team start catching root objects + published by other teams that are spawned outside of the binder. + When this happens, you will receive a brTRANSACTION with the + tfRootObject flag set. (Note that this is distinct from receiving + normal root objects, which are a brREPLY.) + */ +#endif + + bcSTOP_PROCESS, + /* + int: descriptor of process's root object + int: 1 to stop immediately, 0 when root object is released + */ + + bcSTOP_SELF, + /* + int: 1 to stop immediately, 0 when root object is released + */ + + bcREQUEST_DEATH_NOTIFICATION, + /* + void *: ptr to binder + void *: cookie + */ + + bcCLEAR_DEATH_NOTIFICATION, + /* + void *: ptr to binder + void *: cookie + */ + bcDEAD_BINDER_DONE + /* + void *: cookie + */ +}; + +#if 0 +/* Parameters for BINDER_READ_WRITE ioctl. */ +#if BINDER_DEBUG_LIB + +struct binder_write_read +{ + ssize_t write_size; + const void* write_buffer; + ssize_t read_size; + void* read_buffer; +}; + + +/* Below are calls to access the binder when debugging the driver from + user space by compiling it as libbinderdbg and linking libbe2 with it. */ + +extern int open_binder(int teamID=0); +extern status_t close_binder(int desc); +extern status_t ioctl_binder(int desc, int cmd, void *data, int len); +extern ssize_t read_binder(int desc, void *data, size_t numBytes); +extern ssize_t write_binder(int desc, void *data, size_t numBytes); + +#else + +#include +inline int open_binder(int ) { return open("/dev/misc/binder2",O_RDWR|O_CLOEXEC); }; +inline status_t close_binder(int desc) { return close(desc); }; +inline status_t ioctl_binder(int desc, int cmd, void *data, int len) { return ioctl(desc,cmd,data,len); }; +inline ssize_t read_binder(int desc, void *data, size_t numBytes) { return read(desc,data,numBytes); }; +inline ssize_t write_binder(int desc, void *data, size_t numBytes) { return write(desc,data,numBytes); }; + +#endif +#endif + +#ifdef __cplusplus +#if _SUPPORTS_NAMESPACE +} } // namespace openbinder::support +#endif +#endif + +#endif // _BINDER_MODULE_H_ diff -urN linux-2.6.22.5/include/linux/binder_type_constants.h linux-2.6.22.5-android/include/linux/binder_type_constants.h --- linux-2.6.22.5/include/linux/binder_type_constants.h 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/include/linux/binder_type_constants.h 2007-11-20 08:46:07.734255122 +1100 @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed as described in the file LICENSE, which + * you should have received as part of this distribution. The terms + * are also available at http://www.openbinder.org/license.html. + * + * This software consists of voluntary contributions made by many + * individuals. For the exact contribution history, see the revision + * history and logs, available at http://www.openbinder.org + */ + +#ifndef _SUPPORT_TYPECONSTANTS_H +#define _SUPPORT_TYPECONSTANTS_H + +/*! @file support/TypeConstants.h + @ingroup CoreSupportUtilities + @brief Format and standard definitions of SValue type codes. +*/ + +#ifdef __cplusplus +#if _SUPPORTS_NAMESPACE +namespace openbinder { +namespace support { +#endif +#endif + +/*! @addtogroup CoreSupportUtilities + @{ +*/ + +/*-------------------------------------------------------------*/ +/*----- Data Types --------------------------------------------*/ + +/*! @name Type Code Definitions + Type codes are 32-bit integers. The upper 24 bits are the + the code, and the lower 8 bits are metadata. The code is + constructed as 3 characters. Codes containing only the + characters a-z and _, and codes whose last letter is not + alphabetic (a-zA-Z), are reserved for use by the system. + Type codes that end with the character '*' contain + pointers to external objects. + Type codes that end with the character '#' are in a special + namespace reserved for SDimth units. */ +//@{ + +//! Type code manipulation. +enum { + B_TYPE_CODE_MASK = 0x7f7f7f00, // Usable bits for the type code + B_TYPE_CODE_SHIFT = 8, // Where code appears. + + B_TYPE_LENGTH_MASK = 0x00000007, // Usable bits for the data length + B_TYPE_LENGTH_MAX = 0x00000004, // Largest length that can be encoded in type + B_TYPE_LENGTH_LARGE = 0x00000005, // Value when length is > 4 bytes + B_TYPE_LENGTH_MAP = 0x00000007, // For use by SValue + + B_TYPE_BYTEORDER_MASK = 0x80000080, // Bits used to check byte order + B_TYPE_BYTEORDER_NORMAL = 0x00000080, // This bit is set if the byte order is okay + B_TYPE_BYTEORDER_SWAPPED = 0x80000000 // This bit is set if the byte order is swapped +}; + +//! Pack a small (size <= B_TYPE_LENGTH_MAX) type code from its constituent parts. +#define B_PACK_SMALL_TYPE(code, length) (((code)&B_TYPE_CODE_MASK) | (length) | B_TYPE_BYTEORDER_NORMAL) +//! Pack a large (size > B_TYPE_LENGTH_MAX) type code from its constituent parts. +#define B_PACK_LARGE_TYPE(code) (((code)&B_TYPE_CODE_MASK) | B_TYPE_LENGTH_LARGE | B_TYPE_BYTEORDER_NORMAL) +//! Retrieve type information from a packed type code. +#define B_UNPACK_TYPE_CODE(type) ((type)&B_TYPE_CODE_MASK) +//! Retrieve size information from a packaed type code. +#define B_UNPACK_TYPE_LENGTH(type) ((type)&B_TYPE_LENGTH_MASK) + +//! Build a valid code for a type code. +/*! Ensures only correct bits are used, and shifts value into correct location. */ +#define B_TYPE_CODE(code) (((code)< +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_LOGGER_H +#define _LINUX_LOGGER_H + +#include +#include + +struct logger_entry { + __u16 len; /* length of the payload */ + __u16 __pad; /* no matter what, we get 2 bytes of padding */ + __s32 pid; /* generating process's pid */ + __s32 tid; /* generating process's tid */ + __s32 sec; /* seconds since Epoch */ + __s32 nsec; /* nanoseconds */ + char msg[0]; /* the entry's payload */ +}; + +#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ +#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ +#define LOGGER_LOG_MAIN "log_main" /* everything else */ + +#define LOGGER_ENTRY_MAX_LEN (4*1024) +#define LOGGER_ENTRY_MAX_PAYLOAD \ + (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry)) + +#define __LOGGERIO 0xAE + +#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */ +#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ +#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ +#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ + +#endif /* _LINUX_LOGGER_H */ diff -urN linux-2.6.22.5/init/Kconfig linux-2.6.22.5-android/init/Kconfig --- linux-2.6.22.5/init/Kconfig 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/init/Kconfig 2007-11-20 08:46:07.734255122 +1100 @@ -361,6 +361,12 @@ config SYSCTL bool +config PANIC_TIMEOUT + int "Default panic timeout" + default 0 + help + Set default panic timeout. + menuconfig EMBEDDED bool "Configure standard kernel features (for small systems)" help diff -urN linux-2.6.22.5/init/Kconfig.orig linux-2.6.22.5-android/init/Kconfig.orig --- linux-2.6.22.5/init/Kconfig.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/init/Kconfig.orig 2007-11-20 08:23:55.433291472 +1100 @@ -0,0 +1,691 @@ +config DEFCONFIG_LIST + string + depends on !UML + option defconfig_list + default "/lib/modules/$UNAME_RELEASE/.config" + default "/etc/kernel-config" + default "/boot/config-$UNAME_RELEASE" + default "arch/$ARCH/defconfig" + +menu "Code maturity level options" + +config EXPERIMENTAL + bool "Prompt for development and/or incomplete code/drivers" + ---help--- + Some of the various things that Linux supports (such as network + drivers, file systems, network protocols, etc.) can be in a state + of development where the functionality, stability, or the level of + testing is not yet high enough for general use. This is usually + known as the "alpha-test" phase among developers. If a feature is + currently in alpha-test, then the developers usually discourage + uninformed widespread use of this feature by the general public to + avoid "Why doesn't this work?" type mail messages. However, active + testing and use of these systems is welcomed. Just be aware that it + may not meet the normal level of reliability or it may fail to work + in some special cases. Detailed bug reports from people familiar + with the kernel internals are usually welcomed by the developers + (before submitting bug reports, please read the documents + , , , + , and + in the kernel source). + + This option will also make obsoleted drivers available. These are + drivers that have been replaced by something else, and/or are + scheduled to be removed in a future kernel release. + + Unless you intend to help test and develop a feature or driver that + falls into this category, or you have a situation that requires + using these features, you should probably say N here, which will + cause the configurator to present you with fewer choices. If + you say Y here, you will be offered the choice of using features or + drivers that are currently considered to be in the alpha-test phase. + +config BROKEN + bool + +config BROKEN_ON_SMP + bool + depends on BROKEN || !SMP + default y + +config LOCK_KERNEL + bool + depends on SMP || PREEMPT + default y + +config INIT_ENV_ARG_LIMIT + int + default 32 if !UML + default 128 if UML + help + Maximum of each of the number of arguments and environment + variables passed to init from the kernel command line. + +endmenu + +menu "General setup" + +config LOCALVERSION + string "Local version - append to kernel release" + help + Append an extra string to the end of your kernel version. + This will show up when you type uname, for example. + The string you set here will be appended after the contents of + any files with a filename matching localversion* in your + object and source tree, in that order. Your total string can + be a maximum of 64 characters. + +config LOCALVERSION_AUTO + bool "Automatically append version information to the version string" + default y + help + This will try to automatically determine if the current tree is a + release tree by looking for git tags that belong to the current + top of tree revision. + + A string of the format -gxxxxxxxx will be added to the localversion + if a git-based tree is found. The string generated by this will be + appended after any matching localversion* files, and after the value + set in CONFIG_LOCALVERSION. + + (The actual string used here is the first eight characters produced + by running the command: + + $ git rev-parse --verify HEAD + + which is done within the script "scripts/setlocalversion".) + +config SWAP + bool "Support for paging of anonymous memory (swap)" + depends on MMU && BLOCK + default y + help + This option allows you to choose whether you want to have support + for so called swap devices or swap files in your kernel that are + used to provide more virtual memory than the actual RAM present + in your computer. If unsure say Y. + +config SYSVIPC + bool "System V IPC" + ---help--- + Inter Process Communication is a suite of library functions and + system calls which let processes (running programs) synchronize and + exchange information. It is generally considered to be a good thing, + and some programs won't run unless you say Y here. In particular, if + you want to run the DOS emulator dosemu under Linux (read the + DOSEMU-HOWTO, available from ), + you'll need to say Y here. + + You can find documentation about IPC with "info ipc" and also in + section 6.4 of the Linux Programmer's Guide, available from + . + +config IPC_NS + bool "IPC Namespaces" + depends on SYSVIPC + default n + help + Support ipc namespaces. This allows containers, i.e. virtual + environments, to use ipc namespaces to provide different ipc + objects for different servers. If unsure, say N. + +config SYSVIPC_SYSCTL + bool + depends on SYSVIPC + depends on SYSCTL + default y + +config POSIX_MQUEUE + bool "POSIX Message Queues" + depends on NET && EXPERIMENTAL + ---help--- + POSIX variant of message queues is a part of IPC. In POSIX message + queues every message has a priority which decides about succession + of receiving it by a process. If you want to compile and run + programs written e.g. for Solaris with use of its POSIX message + queues (functions mq_*) say Y here. + + POSIX message queues are visible as a filesystem called 'mqueue' + and can be mounted somewhere if you want to do filesystem + operations on message queues. + + If unsure, say Y. + +config BSD_PROCESS_ACCT + bool "BSD Process Accounting" + help + If you say Y here, a user level program will be able to instruct the + kernel (via a special system call) to write process accounting + information to a file: whenever a process exits, information about + that process will be appended to the file by the kernel. The + information includes things such as creation time, owning user, + command name, memory usage, controlling terminal etc. (the complete + list is in the struct acct in ). It is + up to the user level program to do useful things with this + information. This is generally a good idea, so say Y. + +config BSD_PROCESS_ACCT_V3 + bool "BSD Process Accounting version 3 file format" + depends on BSD_PROCESS_ACCT + default n + help + If you say Y here, the process accounting information is written + in a new file format that also logs the process IDs of each + process and it's parent. Note that this file format is incompatible + with previous v0/v1/v2 file formats, so you will need updated tools + for processing it. A preliminary version of these tools is available + at . + +config TASKSTATS + bool "Export task/process statistics through netlink (EXPERIMENTAL)" + depends on NET + default n + help + Export selected statistics for tasks/processes through the + generic netlink interface. Unlike BSD process accounting, the + statistics are available during the lifetime of tasks/processes as + responses to commands. Like BSD accounting, they are sent to user + space on task exit. + + Say N if unsure. + +config TASK_DELAY_ACCT + bool "Enable per-task delay accounting (EXPERIMENTAL)" + depends on TASKSTATS + help + Collect information on time spent by a task waiting for system + resources like cpu, synchronous block I/O completion and swapping + in pages. Such statistics can help in setting a task's priorities + relative to other tasks for cpu, io, rss limits etc. + + Say N if unsure. + +config TASK_XACCT + bool "Enable extended accounting over taskstats (EXPERIMENTAL)" + depends on TASKSTATS + help + Collect extended task accounting data and send the data + to userland for processing over the taskstats interface. + + Say N if unsure. + +config TASK_IO_ACCOUNTING + bool "Enable per-task storage I/O accounting (EXPERIMENTAL)" + depends on TASK_XACCT + help + Collect information on the number of bytes of storage I/O which this + task has caused. + + Say N if unsure. + +config UTS_NS + bool "UTS Namespaces" + default n + help + Support uts namespaces. This allows containers, i.e. + vservers, to use uts namespaces to provide different + uts info for different servers. If unsure, say N. + +config AUDIT + bool "Auditing support" + depends on NET + help + Enable auditing infrastructure that can be used with another + kernel subsystem, such as SELinux (which requires this for + logging of avc messages output). Does not do system-call + auditing without CONFIG_AUDITSYSCALL. + +config AUDITSYSCALL + bool "Enable system-call auditing support" + depends on AUDIT && (X86 || PPC || PPC64 || S390 || IA64 || UML || SPARC64) + default y if SECURITY_SELINUX + help + Enable low-overhead system-call auditing infrastructure that + can be used independently or with another kernel subsystem, + such as SELinux. To use audit's filesystem watch feature, please + ensure that INOTIFY is configured. + +config IKCONFIG + tristate "Kernel .config support" + ---help--- + This option enables the complete Linux kernel ".config" file + contents to be saved in the kernel. It provides documentation + of which kernel options are used in a running kernel or in an + on-disk kernel. This information can be extracted from the kernel + image file with the script scripts/extract-ikconfig and used as + input to rebuild the current kernel or to build another kernel. + It can also be extracted from a running kernel by reading + /proc/config.gz if enabled (below). + +config IKCONFIG_PROC + bool "Enable access to .config through /proc/config.gz" + depends on IKCONFIG && PROC_FS + ---help--- + This option enables access to the kernel configuration file + through /proc/config.gz. + +config LOG_BUF_SHIFT + int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" + range 12 21 + default 17 if S390 || LOCKDEP + default 16 if X86_NUMAQ || IA64 + default 15 if SMP + default 14 + help + Select kernel log buffer size as a power of 2. + Defaults and Examples: + 17 => 128 KB for S/390 + 16 => 64 KB for x86 NUMAQ or IA-64 + 15 => 32 KB for SMP + 14 => 16 KB for uniprocessor + 13 => 8 KB + 12 => 4 KB + +config CPUSETS + bool "Cpuset support" + depends on SMP + help + This option will let you create and manage CPUSETs which + allow dynamically partitioning a system into sets of CPUs and + Memory Nodes and assigning tasks to run only within those sets. + This is primarily useful on large SMP or NUMA systems. + + Say N if unsure. + +config SYSFS_DEPRECATED + bool "Create deprecated sysfs files" + default y + help + This option creates deprecated symlinks such as the + "device"-link, the :-link, and the + "bus"-link. It may also add deprecated key in the + uevent environment. + None of these features or values should be used today, as + they export driver core implementation details to userspace + or export properties which can't be kept stable across kernel + releases. + + If enabled, this option will also move any device structures + that belong to a class, back into the /sys/class hierarchy, in + order to support older versions of udev. + + If you are using a distro that was released in 2006 or later, + it should be safe to say N here. + +config RELAY + bool "Kernel->user space relay support (formerly relayfs)" + help + This option enables support for relay interface support in + certain file systems (such as debugfs). + It is designed to provide an efficient mechanism for tools and + facilities to relay large amounts of data from kernel space to + user space. + + If unsure, say N. + +config BLK_DEV_INITRD + bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" + depends on BROKEN || !FRV + help + The initial RAM filesystem is a ramfs which is loaded by the + boot loader (loadlin or lilo) and that is mounted as root + before the normal boot procedure. It is typically used to + load modules needed to mount the "real" root file system, + etc. See for details. + + If RAM disk support (BLK_DEV_RAM) is also included, this + also enables initial RAM disk (initrd) support and adds + 15 Kbytes (more on some other architectures) to the kernel size. + + If unsure say Y. + +if BLK_DEV_INITRD + +source "usr/Kconfig" + +endif + +config CC_OPTIMIZE_FOR_SIZE + bool "Optimize for size (Look out for broken compilers!)" + default y + depends on ARM || H8300 || EXPERIMENTAL + help + Enabling this option will pass "-Os" instead of "-O2" to gcc + resulting in a smaller kernel. + + WARNING: some versions of gcc may generate incorrect code with this + option. If problems are observed, a gcc upgrade may be needed. + + If unsure, say N. + +config SYSCTL + bool + +menuconfig EMBEDDED + bool "Configure standard kernel features (for small systems)" + help + This option allows certain base kernel options and settings + to be disabled or tweaked. This is for specialized + environments which can tolerate a "non-standard" kernel. + Only use this if you really know what you are doing. + +config UID16 + bool "Enable 16-bit UID system calls" if EMBEDDED + depends on ARM || BFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && SPARC32_COMPAT) || UML || (X86_64 && IA32_EMULATION) + default y + help + This enables the legacy 16-bit UID syscall wrappers. + +config SYSCTL_SYSCALL + bool "Sysctl syscall support" if EMBEDDED + default y + select SYSCTL + ---help--- + sys_sysctl uses binary paths that have been found challenging + to properly maintain and use. The interface in /proc/sys + using paths with ascii names is now the primary path to this + information. + + Almost nothing using the binary sysctl interface so if you are + trying to save some space it is probably safe to disable this, + making your kernel marginally smaller. + + If unsure say Y here. + +config KALLSYMS + bool "Load all symbols for debugging/ksymoops" if EMBEDDED + default y + help + Say Y here to let the kernel print out symbolic crash information and + symbolic stack backtraces. This increases the size of the kernel + somewhat, as all symbols have to be loaded into the kernel image. + +config KALLSYMS_ALL + bool "Include all symbols in kallsyms" + depends on DEBUG_KERNEL && KALLSYMS + help + Normally kallsyms only contains the symbols of functions, for nicer + OOPS messages. Some debuggers can use kallsyms for other + symbols too: say Y here to include all symbols, if you need them + and you don't care about adding 300k to the size of your kernel. + + Say N. + +config KALLSYMS_EXTRA_PASS + bool "Do an extra kallsyms pass" + depends on KALLSYMS + help + If kallsyms is not working correctly, the build will fail with + inconsistent kallsyms data. If that occurs, log a bug report and + turn on KALLSYMS_EXTRA_PASS which should result in a stable build. + Always say N here unless you find a bug in kallsyms, which must be + reported. KALLSYMS_EXTRA_PASS is only a temporary workaround while + you wait for kallsyms to be fixed. + + +config HOTPLUG + bool "Support for hot-pluggable devices" if EMBEDDED + default y + help + This option is provided for the case where no hotplug or uevent + capabilities is wanted by the kernel. You should only consider + disabling this option for embedded systems that do not use modules, a + dynamic /dev tree, or dynamic device discovery. Just say Y. + +config PRINTK + default y + bool "Enable support for printk" if EMBEDDED + help + This option enables normal printk support. Removing it + eliminates most of the message strings from the kernel image + and makes the kernel more or less silent. As this makes it + very difficult to diagnose system problems, saying N here is + strongly discouraged. + +config BUG + bool "BUG() support" if EMBEDDED + default y + help + Disabling this option eliminates support for BUG and WARN, reducing + the size of your kernel image and potentially quietly ignoring + numerous fatal conditions. You should only consider disabling this + option for embedded systems with no facilities for reporting errors. + Just say Y. + +config ELF_CORE + default y + bool "Enable ELF core dumps" if EMBEDDED + help + Enable support for generating core dumps. Disabling saves about 4k. + +config BASE_FULL + default y + bool "Enable full-sized data structures for core" if EMBEDDED + help + Disabling this option reduces the size of miscellaneous core + kernel data structures. This saves memory on small machines, + but may reduce performance. + +config FUTEX + bool "Enable futex support" if EMBEDDED + default y + select RT_MUTEXES + help + Disabling this option will cause the kernel to be built without + support for "fast userspace mutexes". The resulting kernel may not + run glibc-based applications correctly. + +config ANON_INODES + bool "Enable anonymous inode source" if EMBEDDED + default y + help + Anonymous inode source for pseudo-files like epoll, signalfd, + timerfd and eventfd. + + If unsure, say Y. + +config EPOLL + bool "Enable eventpoll support" if EMBEDDED + default y + depends on ANON_INODES + help + Disabling this option will cause the kernel to be built without + support for epoll family of system calls. + +config SIGNALFD + bool "Enable signalfd() system call" if EMBEDDED + depends on ANON_INODES + default y + help + Enable the signalfd() system call that allows to receive signals + on a file descriptor. + + If unsure, say Y. + +config TIMERFD + bool "Enable timerfd() system call" if EMBEDDED + depends on ANON_INODES + default y + help + Enable the timerfd() system call that allows to receive timer + events on a file descriptor. + + If unsure, say Y. + +config EVENTFD + bool "Enable eventfd() system call" if EMBEDDED + depends on ANON_INODES + default y + help + Enable the eventfd() system call that allows to receive both + kernel notification (ie. KAIO) or userspace notifications. + + If unsure, say Y. + +config SHMEM + bool "Use full shmem filesystem" if EMBEDDED + default y + depends on MMU + help + The shmem is an internal filesystem used to manage shared memory. + It is backed by swap and manages resource limits. It is also exported + to userspace as tmpfs if TMPFS is enabled. Disabling this + option replaces shmem and tmpfs with the much simpler ramfs code, + which may be appropriate on small systems without swap. + +config VM_EVENT_COUNTERS + default y + bool "Enable VM event counters for /proc/vmstat" if EMBEDDED + help + VM event counters are needed for event counts to be shown. + This option allows the disabling of the VM event counters + on EMBEDDED systems. /proc/vmstat will only show page counts + if VM event counters are disabled. + +config SLUB_DEBUG + default y + bool "Enable SLUB debugging support" if EMBEDDED + depends on SLUB + help + SLUB has extensive debug support features. Disabling these can + result in significant savings in code size. This also disables + SLUB sysfs support. /sys/slab will not exist and there will be + no support for cache validation etc. + +choice + prompt "Choose SLAB allocator" + default SLAB + help + This option allows to select a slab allocator. + +config SLAB + bool "SLAB" + help + The regular slab allocator that is established and known to work + well in all environments. It organizes cache hot objects in + per cpu and per node queues. SLAB is the default choice for + a slab allocator. + +config SLUB + bool "SLUB (Unqueued Allocator)" + help + SLUB is a slab allocator that minimizes cache line usage + instead of managing queues of cached objects (SLAB approach). + Per cpu caching is realized using slabs of objects instead + of queues of objects. SLUB can use memory efficiently + and has enhanced diagnostics. + +config SLOB + depends on EMBEDDED && !SPARSEMEM + bool "SLOB (Simple Allocator)" + help + SLOB replaces the SLAB allocator with a drastically simpler + allocator. SLOB is more space efficient than SLAB but does not + scale well (single lock for all operations) and is also highly + susceptible to fragmentation. SLUB can accomplish a higher object + density. It is usually better to use SLUB instead of SLOB. + +endchoice + +endmenu # General setup + +config RT_MUTEXES + boolean + select PLIST + +config TINY_SHMEM + default !SHMEM + bool + +config BASE_SMALL + int + default 0 if BASE_FULL + default 1 if !BASE_FULL + +menu "Loadable module support" + +config MODULES + bool "Enable loadable module support" + help + Kernel modules are small pieces of compiled code which can + be inserted in the running kernel, rather than being + permanently built into the kernel. You use the "modprobe" + tool to add (and sometimes remove) them. If you say Y here, + many parts of the kernel can be built as modules (by + answering M instead of Y where indicated): this is most + useful for infrequently used options which are not required + for booting. For more information, see the man pages for + modprobe, lsmod, modinfo, insmod and rmmod. + + If you say Y here, you will need to run "make + modules_install" to put the modules under /lib/modules/ + where modprobe can find them (you may need to be root to do + this). + + If unsure, say Y. + +config MODULE_UNLOAD + bool "Module unloading" + depends on MODULES + help + Without this option you will not be able to unload any + modules (note that some modules may not be unloadable + anyway), which makes your kernel slightly smaller and + simpler. If unsure, say Y. + +config MODULE_FORCE_UNLOAD + bool "Forced module unloading" + depends on MODULE_UNLOAD && EXPERIMENTAL + help + This option allows you to force a module to unload, even if the + kernel believes it is unsafe: the kernel will remove the module + without waiting for anyone to stop using it (using the -f option to + rmmod). This is mainly for kernel developers and desperate users. + If unsure, say N. + +config MODVERSIONS + bool "Module versioning support" + depends on MODULES + help + Usually, you have to use modules compiled with your kernel. + Saying Y here makes it sometimes possible to use modules + compiled for different kernels, by adding enough information + to the modules to (hopefully) spot any changes which would + make them incompatible with the kernel you are running. If + unsure, say N. + +config MODULE_SRCVERSION_ALL + bool "Source checksum for all modules" + depends on MODULES + help + Modules which contain a MODULE_VERSION get an extra "srcversion" + field inserted into their modinfo section, which contains a + sum of the source files which made it. This helps maintainers + see exactly which source was used to build a module (since + others sometimes change the module source without updating + the version). With this option, such a "srcversion" field + will be created for all modules. If unsure, say N. + +config KMOD + bool "Automatic kernel module loading" + depends on MODULES + help + Normally when you have selected some parts of the kernel to + be created as kernel modules, you must load them (using the + "modprobe" command) before you can use them. If you say Y + here, some parts of the kernel will be able to load modules + automatically: when a part of the kernel needs a module, it + runs modprobe with the appropriate arguments, thereby + loading the module if it is available. If unsure, say Y. + +config STOP_MACHINE + bool + default y + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU + help + Need stop_machine() primitive. +endmenu + +menu "Block layer" +source "block/Kconfig" +endmenu diff -urN linux-2.6.22.5/kernel/futex.c linux-2.6.22.5-android/kernel/futex.c --- linux-2.6.22.5/kernel/futex.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/kernel/futex.c 2007-11-20 08:46:07.734255122 +1100 @@ -1274,8 +1274,8 @@ restart->fn = futex_wait_restart; restart->arg0 = (unsigned long)uaddr; restart->arg1 = (unsigned long)val; - restart->arg2 = (unsigned long)abs_time; - restart->arg3 = 0; + restart->arg2 = abs_time->tv64 & 0xFFFFFFFF; + restart->arg3 = abs_time->tv64 >> 32; if (fshared) restart->arg3 |= ARG3_SHARED; return -ERESTART_RESTARTBLOCK; @@ -1295,13 +1295,14 @@ { u32 __user *uaddr = (u32 __user *)restart->arg0; u32 val = (u32)restart->arg1; - ktime_t *abs_time = (ktime_t *)restart->arg2; + ktime_t abs_time; struct rw_semaphore *fshared = NULL; + abs_time.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; restart->fn = do_no_restart_syscall; if (restart->arg3 & ARG3_SHARED) fshared = ¤t->mm->mmap_sem; - return (long)futex_wait(uaddr, fshared, val, abs_time); + return (long)futex_wait(uaddr, fshared, val, &abs_time); } diff -urN linux-2.6.22.5/kernel/futex.c.orig linux-2.6.22.5-android/kernel/futex.c.orig --- linux-2.6.22.5/kernel/futex.c.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/kernel/futex.c.orig 2007-11-20 08:23:56.293337270 +1100 @@ -0,0 +1,2105 @@ +/* + * Fast Userspace Mutexes (which I call "Futexes!"). + * (C) Rusty Russell, IBM 2002 + * + * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar + * (C) Copyright 2003 Red Hat Inc, All Rights Reserved + * + * Removed page pinning, fix privately mapped COW pages and other cleanups + * (C) Copyright 2003, 2004 Jamie Lokier + * + * Robust futex support started by Ingo Molnar + * (C) Copyright 2006 Red Hat Inc, All Rights Reserved + * Thanks to Thomas Gleixner for suggestions, analysis and fixes. + * + * PI-futex support started by Ingo Molnar and Thomas Gleixner + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006 Timesys Corp., Thomas Gleixner + * + * PRIVATE futexes by Eric Dumazet + * Copyright (C) 2007 Eric Dumazet + * + * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly + * enough at me, Linus for the original (flawed) idea, Matthew + * Kirkwood for proof-of-concept implementation. + * + * "The futexes are also cursed." + * "But they come in a choice of three flavours!" + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtmutex_common.h" + +#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) + +/* + * Priority Inheritance state: + */ +struct futex_pi_state { + /* + * list of 'owned' pi_state instances - these have to be + * cleaned up in do_exit() if the task exits prematurely: + */ + struct list_head list; + + /* + * The PI object: + */ + struct rt_mutex pi_mutex; + + struct task_struct *owner; + atomic_t refcount; + + union futex_key key; +}; + +/* + * We use this hashed waitqueue instead of a normal wait_queue_t, so + * we can wake only the relevant ones (hashed queues may be shared). + * + * A futex_q has a woken state, just like tasks have TASK_RUNNING. + * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. + * The order of wakup is always to make the first condition true, then + * wake up q->waiters, then make the second condition true. + */ +struct futex_q { + struct plist_node list; + wait_queue_head_t waiters; + + /* Which hash list lock to use: */ + spinlock_t *lock_ptr; + + /* Key which the futex is hashed on: */ + union futex_key key; + + /* For fd, sigio sent using these: */ + int fd; + struct file *filp; + + /* Optional priority inheritance state: */ + struct futex_pi_state *pi_state; + struct task_struct *task; +}; + +/* + * Split the global futex_lock into every hash list lock. + */ +struct futex_hash_bucket { + spinlock_t lock; + struct plist_head chain; +}; + +static struct futex_hash_bucket futex_queues[1<both.word, + (sizeof(key->both.word)+sizeof(key->both.ptr))/4, + key->both.offset); + return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)]; +} + +/* + * Return 1 if two futex_keys are equal, 0 otherwise. + */ +static inline int match_futex(union futex_key *key1, union futex_key *key2) +{ + return (key1->both.word == key2->both.word + && key1->both.ptr == key2->both.ptr + && key1->both.offset == key2->both.offset); +} + +/** + * get_futex_key - Get parameters which are the keys for a futex. + * @uaddr: virtual address of the futex + * @shared: NULL for a PROCESS_PRIVATE futex, + * ¤t->mm->mmap_sem for a PROCESS_SHARED futex + * @key: address where result is stored. + * + * Returns a negative error code or 0 + * The key words are stored in *key on success. + * + * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, + * offset_within_page). For private mappings, it's (uaddr, current->mm). + * We can usually work out the index without swapping in the page. + * + * fshared is NULL for PROCESS_PRIVATE futexes + * For other futexes, it points to ¤t->mm->mmap_sem and + * caller must have taken the reader lock. but NOT any spinlocks. + */ +int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, + union futex_key *key) +{ + unsigned long address = (unsigned long)uaddr; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct page *page; + int err; + + /* + * The futex address must be "naturally" aligned. + */ + key->both.offset = address % PAGE_SIZE; + if (unlikely((address % sizeof(u32)) != 0)) + return -EINVAL; + address -= key->both.offset; + + /* + * PROCESS_PRIVATE futexes are fast. + * As the mm cannot disappear under us and the 'key' only needs + * virtual address, we dont even have to find the underlying vma. + * Note : We do have to check 'uaddr' is a valid user address, + * but access_ok() should be faster than find_vma() + */ + if (!fshared) { + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) + return -EFAULT; + key->private.mm = mm; + key->private.address = address; + return 0; + } + /* + * The futex is hashed differently depending on whether + * it's in a shared or private mapping. So check vma first. + */ + vma = find_extend_vma(mm, address); + if (unlikely(!vma)) + return -EFAULT; + + /* + * Permissions. + */ + if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) + return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; + + /* + * Private mappings are handled in a simple way. + * + * NOTE: When userspace waits on a MAP_SHARED mapping, even if + * it's a read-only handle, it's expected that futexes attach to + * the object not the particular process. Therefore we use + * VM_MAYSHARE here, not VM_SHARED which is restricted to shared + * mappings of _writable_ handles. + */ + if (likely(!(vma->vm_flags & VM_MAYSHARE))) { + key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ + key->private.mm = mm; + key->private.address = address; + return 0; + } + + /* + * Linear file mappings are also simple. + */ + key->shared.inode = vma->vm_file->f_path.dentry->d_inode; + key->both.offset |= FUT_OFF_INODE; /* inode-based key. */ + if (likely(!(vma->vm_flags & VM_NONLINEAR))) { + key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT) + + vma->vm_pgoff); + return 0; + } + + /* + * We could walk the page table to read the non-linear + * pte, and get the page index without fetching the page + * from swap. But that's a lot of code to duplicate here + * for a rare case, so we simply fetch the page. + */ + err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); + if (err >= 0) { + key->shared.pgoff = + page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + put_page(page); + return 0; + } + return err; +} +EXPORT_SYMBOL_GPL(get_futex_key); + +/* + * Take a reference to the resource addressed by a key. + * Can be called while holding spinlocks. + * + */ +inline void get_futex_key_refs(union futex_key *key) +{ + if (key->both.ptr == 0) + return; + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { + case FUT_OFF_INODE: + atomic_inc(&key->shared.inode->i_count); + break; + case FUT_OFF_MMSHARED: + atomic_inc(&key->private.mm->mm_count); + break; + } +} +EXPORT_SYMBOL_GPL(get_futex_key_refs); + +/* + * Drop a reference to the resource addressed by a key. + * The hash bucket spinlock must not be held. + */ +void drop_futex_key_refs(union futex_key *key) +{ + if (key->both.ptr == 0) + return; + switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { + case FUT_OFF_INODE: + iput(key->shared.inode); + break; + case FUT_OFF_MMSHARED: + mmdrop(key->private.mm); + break; + } +} +EXPORT_SYMBOL_GPL(drop_futex_key_refs); + +static inline int get_futex_value_locked(u32 *dest, u32 __user *from) +{ + int ret; + + pagefault_disable(); + ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + +/* + * Fault handling. + * if fshared is non NULL, current->mm->mmap_sem is already held + */ +static int futex_handle_fault(unsigned long address, + struct rw_semaphore *fshared, int attempt) +{ + struct vm_area_struct * vma; + struct mm_struct *mm = current->mm; + int ret = -EFAULT; + + if (attempt > 2) + return ret; + + if (!fshared) + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + if (vma && address >= vma->vm_start && + (vma->vm_flags & VM_WRITE)) { + switch (handle_mm_fault(mm, vma, address, 1)) { + case VM_FAULT_MINOR: + ret = 0; + current->min_flt++; + break; + case VM_FAULT_MAJOR: + ret = 0; + current->maj_flt++; + break; + } + } + if (!fshared) + up_read(&mm->mmap_sem); + return ret; +} + +/* + * PI code: + */ +static int refill_pi_state_cache(void) +{ + struct futex_pi_state *pi_state; + + if (likely(current->pi_state_cache)) + return 0; + + pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); + + if (!pi_state) + return -ENOMEM; + + INIT_LIST_HEAD(&pi_state->list); + /* pi_mutex gets initialized later */ + pi_state->owner = NULL; + atomic_set(&pi_state->refcount, 1); + + current->pi_state_cache = pi_state; + + return 0; +} + +static struct futex_pi_state * alloc_pi_state(void) +{ + struct futex_pi_state *pi_state = current->pi_state_cache; + + WARN_ON(!pi_state); + current->pi_state_cache = NULL; + + return pi_state; +} + +static void free_pi_state(struct futex_pi_state *pi_state) +{ + if (!atomic_dec_and_test(&pi_state->refcount)) + return; + + /* + * If pi_state->owner is NULL, the owner is most probably dying + * and has cleaned up the pi_state already + */ + if (pi_state->owner) { + spin_lock_irq(&pi_state->owner->pi_lock); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); + + rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); + } + + if (current->pi_state_cache) + kfree(pi_state); + else { + /* + * pi_state->list is already empty. + * clear pi_state->owner. + * refcount is at 0 - put it back to 1. + */ + pi_state->owner = NULL; + atomic_set(&pi_state->refcount, 1); + current->pi_state_cache = pi_state; + } +} + +/* + * Look up the task based on what TID userspace gave us. + * We dont trust it. + */ +static struct task_struct * futex_find_get_task(pid_t pid) +{ + struct task_struct *p; + + rcu_read_lock(); + p = find_task_by_pid(pid); + + if (!p || ((current->euid != p->euid) && (current->euid != p->uid))) + p = ERR_PTR(-ESRCH); + else + get_task_struct(p); + + rcu_read_unlock(); + + return p; +} + +/* + * This task is holding PI mutexes at exit time => bad. + * Kernel cleans up PI-state, but userspace is likely hosed. + * (Robust-futex cleanup is separate and might save the day for userspace.) + */ +void exit_pi_state_list(struct task_struct *curr) +{ + struct list_head *next, *head = &curr->pi_state_list; + struct futex_pi_state *pi_state; + struct futex_hash_bucket *hb; + union futex_key key; + + /* + * We are a ZOMBIE and nobody can enqueue itself on + * pi_state_list anymore, but we have to be careful + * versus waiters unqueueing themselves: + */ + spin_lock_irq(&curr->pi_lock); + while (!list_empty(head)) { + + next = head->next; + pi_state = list_entry(next, struct futex_pi_state, list); + key = pi_state->key; + hb = hash_futex(&key); + spin_unlock_irq(&curr->pi_lock); + + spin_lock(&hb->lock); + + spin_lock_irq(&curr->pi_lock); + /* + * We dropped the pi-lock, so re-check whether this + * task still owns the PI-state: + */ + if (head->next != next) { + spin_unlock(&hb->lock); + continue; + } + + WARN_ON(pi_state->owner != curr); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + pi_state->owner = NULL; + spin_unlock_irq(&curr->pi_lock); + + rt_mutex_unlock(&pi_state->pi_mutex); + + spin_unlock(&hb->lock); + + spin_lock_irq(&curr->pi_lock); + } + spin_unlock_irq(&curr->pi_lock); +} + +static int +lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, + union futex_key *key, struct futex_pi_state **ps) +{ + struct futex_pi_state *pi_state = NULL; + struct futex_q *this, *next; + struct plist_head *head; + struct task_struct *p; + pid_t pid = uval & FUTEX_TID_MASK; + + head = &hb->chain; + + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex(&this->key, key)) { + /* + * Another waiter already exists - bump up + * the refcount and return its pi_state: + */ + pi_state = this->pi_state; + /* + * Userspace might have messed up non PI and PI futexes + */ + if (unlikely(!pi_state)) + return -EINVAL; + + WARN_ON(!atomic_read(&pi_state->refcount)); + WARN_ON(pid && pi_state->owner && + pi_state->owner->pid != pid); + + atomic_inc(&pi_state->refcount); + *ps = pi_state; + + return 0; + } + } + + /* + * We are the first waiter - try to look up the real owner and attach + * the new pi_state to it, but bail out when TID = 0 + */ + if (!pid) + return -ESRCH; + p = futex_find_get_task(pid); + if (IS_ERR(p)) + return PTR_ERR(p); + + /* + * We need to look at the task state flags to figure out, + * whether the task is exiting. To protect against the do_exit + * change of the task flags, we do this protected by + * p->pi_lock: + */ + spin_lock_irq(&p->pi_lock); + if (unlikely(p->flags & PF_EXITING)) { + /* + * The task is on the way out. When PF_EXITPIDONE is + * set, we know that the task has finished the + * cleanup: + */ + int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; + + spin_unlock_irq(&p->pi_lock); + put_task_struct(p); + return ret; + } + + pi_state = alloc_pi_state(); + + /* + * Initialize the pi_mutex in locked state and make 'p' + * the owner of it: + */ + rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); + + /* Store the key for possible exit cleanups: */ + pi_state->key = *key; + + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &p->pi_state_list); + pi_state->owner = p; + spin_unlock_irq(&p->pi_lock); + + put_task_struct(p); + + *ps = pi_state; + + return 0; +} + +/* + * The hash bucket lock must be held when this is called. + * Afterwards, the futex_q must not be accessed. + */ +static void wake_futex(struct futex_q *q) +{ + plist_del(&q->list, &q->list.plist); + if (q->filp) + send_sigio(&q->filp->f_owner, q->fd, POLL_IN); + /* + * The lock in wake_up_all() is a crucial memory barrier after the + * plist_del() and also before assigning to q->lock_ptr. + */ + wake_up_all(&q->waiters); + /* + * The waiting task can free the futex_q as soon as this is written, + * without taking any locks. This must come last. + * + * A memory barrier is required here to prevent the following store + * to lock_ptr from getting ahead of the wakeup. Clearing the lock + * at the end of wake_up_all() does not prevent this store from + * moving. + */ + smp_wmb(); + q->lock_ptr = NULL; +} + +static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) +{ + struct task_struct *new_owner; + struct futex_pi_state *pi_state = this->pi_state; + u32 curval, newval; + + if (!pi_state) + return -EINVAL; + + spin_lock(&pi_state->pi_mutex.wait_lock); + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); + + /* + * This happens when we have stolen the lock and the original + * pending owner did not enqueue itself back on the rt_mutex. + * Thats not a tragedy. We know that way, that a lock waiter + * is on the fly. We make the futex_q waiter the pending owner. + */ + if (!new_owner) + new_owner = this->task; + + /* + * We pass it to the next owner. (The WAITERS bit is always + * kept enabled while there is PI state around. We must also + * preserve the owner died bit.) + */ + if (!(uval & FUTEX_OWNER_DIED)) { + int ret = 0; + + newval = FUTEX_WAITERS | new_owner->pid; + + pagefault_disable(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + pagefault_enable(); + + if (curval == -EFAULT) + ret = -EFAULT; + if (curval != uval) + ret = -EINVAL; + if (ret) { + spin_unlock(&pi_state->pi_mutex.wait_lock); + return ret; + } + } + + spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); + + spin_lock_irq(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &new_owner->pi_state_list); + pi_state->owner = new_owner; + spin_unlock_irq(&new_owner->pi_lock); + + spin_unlock(&pi_state->pi_mutex.wait_lock); + rt_mutex_unlock(&pi_state->pi_mutex); + + return 0; +} + +static int unlock_futex_pi(u32 __user *uaddr, u32 uval) +{ + u32 oldval; + + /* + * There is no waiter, so we unlock the futex. The owner died + * bit has not to be preserved here. We are the owner: + */ + pagefault_disable(); + oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0); + pagefault_enable(); + + if (oldval == -EFAULT) + return oldval; + if (oldval != uval) + return -EAGAIN; + + return 0; +} + +/* + * Express the locking dependencies for lockdep: + */ +static inline void +double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) +{ + if (hb1 <= hb2) { + spin_lock(&hb1->lock); + if (hb1 < hb2) + spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); + } else { /* hb1 > hb2 */ + spin_lock(&hb2->lock); + spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); + } +} + +/* + * Wake up all waiters hashed on the physical page that is mapped + * to this virtual address: + */ +static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, + int nr_wake) +{ + struct futex_hash_bucket *hb; + struct futex_q *this, *next; + struct plist_head *head; + union futex_key key; + int ret; + + if (fshared) + down_read(fshared); + + ret = get_futex_key(uaddr, fshared, &key); + if (unlikely(ret != 0)) + goto out; + + hb = hash_futex(&key); + spin_lock(&hb->lock); + head = &hb->chain; + + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex (&this->key, &key)) { + if (this->pi_state) { + ret = -EINVAL; + break; + } + wake_futex(this); + if (++ret >= nr_wake) + break; + } + } + + spin_unlock(&hb->lock); +out: + if (fshared) + up_read(fshared); + return ret; +} + +/* + * Wake up all waiters hashed on the physical page that is mapped + * to this virtual address: + */ +static int +futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, + u32 __user *uaddr2, + int nr_wake, int nr_wake2, int op) +{ + union futex_key key1, key2; + struct futex_hash_bucket *hb1, *hb2; + struct plist_head *head; + struct futex_q *this, *next; + int ret, op_ret, attempt = 0; + +retryfull: + if (fshared) + down_read(fshared); + + ret = get_futex_key(uaddr1, fshared, &key1); + if (unlikely(ret != 0)) + goto out; + ret = get_futex_key(uaddr2, fshared, &key2); + if (unlikely(ret != 0)) + goto out; + + hb1 = hash_futex(&key1); + hb2 = hash_futex(&key2); + +retry: + double_lock_hb(hb1, hb2); + + op_ret = futex_atomic_op_inuser(op, uaddr2); + if (unlikely(op_ret < 0)) { + u32 dummy; + + spin_unlock(&hb1->lock); + if (hb1 != hb2) + spin_unlock(&hb2->lock); + +#ifndef CONFIG_MMU + /* + * we don't get EFAULT from MMU faults if we don't have an MMU, + * but we might get them from range checking + */ + ret = op_ret; + goto out; +#endif + + if (unlikely(op_ret != -EFAULT)) { + ret = op_ret; + goto out; + } + + /* + * futex_atomic_op_inuser needs to both read and write + * *(int __user *)uaddr2, but we can't modify it + * non-atomically. Therefore, if get_user below is not + * enough, we need to handle the fault ourselves, while + * still holding the mmap_sem. + */ + if (attempt++) { + ret = futex_handle_fault((unsigned long)uaddr2, + fshared, attempt); + if (ret) + goto out; + goto retry; + } + + /* + * If we would have faulted, release mmap_sem, + * fault it in and start all over again. + */ + if (fshared) + up_read(fshared); + + ret = get_user(dummy, uaddr2); + if (ret) + return ret; + + goto retryfull; + } + + head = &hb1->chain; + + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex (&this->key, &key1)) { + wake_futex(this); + if (++ret >= nr_wake) + break; + } + } + + if (op_ret > 0) { + head = &hb2->chain; + + op_ret = 0; + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex (&this->key, &key2)) { + wake_futex(this); + if (++op_ret >= nr_wake2) + break; + } + } + ret += op_ret; + } + + spin_unlock(&hb1->lock); + if (hb1 != hb2) + spin_unlock(&hb2->lock); +out: + if (fshared) + up_read(fshared); + return ret; +} + +/* + * Requeue all waiters hashed on one physical page to another + * physical page. + */ +static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, + u32 __user *uaddr2, + int nr_wake, int nr_requeue, u32 *cmpval) +{ + union futex_key key1, key2; + struct futex_hash_bucket *hb1, *hb2; + struct plist_head *head1; + struct futex_q *this, *next; + int ret, drop_count = 0; + + retry: + if (fshared) + down_read(fshared); + + ret = get_futex_key(uaddr1, fshared, &key1); + if (unlikely(ret != 0)) + goto out; + ret = get_futex_key(uaddr2, fshared, &key2); + if (unlikely(ret != 0)) + goto out; + + hb1 = hash_futex(&key1); + hb2 = hash_futex(&key2); + + double_lock_hb(hb1, hb2); + + if (likely(cmpval != NULL)) { + u32 curval; + + ret = get_futex_value_locked(&curval, uaddr1); + + if (unlikely(ret)) { + spin_unlock(&hb1->lock); + if (hb1 != hb2) + spin_unlock(&hb2->lock); + + /* + * If we would have faulted, release mmap_sem, fault + * it in and start all over again. + */ + if (fshared) + up_read(fshared); + + ret = get_user(curval, uaddr1); + + if (!ret) + goto retry; + + return ret; + } + if (curval != *cmpval) { + ret = -EAGAIN; + goto out_unlock; + } + } + + head1 = &hb1->chain; + plist_for_each_entry_safe(this, next, head1, list) { + if (!match_futex (&this->key, &key1)) + continue; + if (++ret <= nr_wake) { + wake_futex(this); + } else { + /* + * If key1 and key2 hash to the same bucket, no need to + * requeue. + */ + if (likely(head1 != &hb2->chain)) { + plist_del(&this->list, &hb1->chain); + plist_add(&this->list, &hb2->chain); + this->lock_ptr = &hb2->lock; +#ifdef CONFIG_DEBUG_PI_LIST + this->list.plist.lock = &hb2->lock; +#endif + } + this->key = key2; + get_futex_key_refs(&key2); + drop_count++; + + if (ret - nr_wake >= nr_requeue) + break; + } + } + +out_unlock: + spin_unlock(&hb1->lock); + if (hb1 != hb2) + spin_unlock(&hb2->lock); + + /* drop_futex_key_refs() must be called outside the spinlocks. */ + while (--drop_count >= 0) + drop_futex_key_refs(&key1); + +out: + if (fshared) + up_read(fshared); + return ret; +} + +/* The key must be already stored in q->key. */ +static inline struct futex_hash_bucket * +queue_lock(struct futex_q *q, int fd, struct file *filp) +{ + struct futex_hash_bucket *hb; + + q->fd = fd; + q->filp = filp; + + init_waitqueue_head(&q->waiters); + + get_futex_key_refs(&q->key); + hb = hash_futex(&q->key); + q->lock_ptr = &hb->lock; + + spin_lock(&hb->lock); + return hb; +} + +static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) +{ + int prio; + + /* + * The priority used to register this element is + * - either the real thread-priority for the real-time threads + * (i.e. threads with a priority lower than MAX_RT_PRIO) + * - or MAX_RT_PRIO for non-RT threads. + * Thus, all RT-threads are woken first in priority order, and + * the others are woken last, in FIFO order. + */ + prio = min(current->normal_prio, MAX_RT_PRIO); + + plist_node_init(&q->list, prio); +#ifdef CONFIG_DEBUG_PI_LIST + q->list.plist.lock = &hb->lock; +#endif + plist_add(&q->list, &hb->chain); + q->task = current; + spin_unlock(&hb->lock); +} + +static inline void +queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) +{ + spin_unlock(&hb->lock); + drop_futex_key_refs(&q->key); +} + +/* + * queue_me and unqueue_me must be called as a pair, each + * exactly once. They are called with the hashed spinlock held. + */ + +/* The key must be already stored in q->key. */ +static void queue_me(struct futex_q *q, int fd, struct file *filp) +{ + struct futex_hash_bucket *hb; + + hb = queue_lock(q, fd, filp); + __queue_me(q, hb); +} + +/* Return 1 if we were still queued (ie. 0 means we were woken) */ +static int unqueue_me(struct futex_q *q) +{ + spinlock_t *lock_ptr; + int ret = 0; + + /* In the common case we don't take the spinlock, which is nice. */ + retry: + lock_ptr = q->lock_ptr; + barrier(); + if (lock_ptr != 0) { + spin_lock(lock_ptr); + /* + * q->lock_ptr can change between reading it and + * spin_lock(), causing us to take the wrong lock. This + * corrects the race condition. + * + * Reasoning goes like this: if we have the wrong lock, + * q->lock_ptr must have changed (maybe several times) + * between reading it and the spin_lock(). It can + * change again after the spin_lock() but only if it was + * already changed before the spin_lock(). It cannot, + * however, change back to the original value. Therefore + * we can detect whether we acquired the correct lock. + */ + if (unlikely(lock_ptr != q->lock_ptr)) { + spin_unlock(lock_ptr); + goto retry; + } + WARN_ON(plist_node_empty(&q->list)); + plist_del(&q->list, &q->list.plist); + + BUG_ON(q->pi_state); + + spin_unlock(lock_ptr); + ret = 1; + } + + drop_futex_key_refs(&q->key); + return ret; +} + +/* + * PI futexes can not be requeued and must remove themself from the + * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry + * and dropped here. + */ +static void unqueue_me_pi(struct futex_q *q) +{ + WARN_ON(plist_node_empty(&q->list)); + plist_del(&q->list, &q->list.plist); + + BUG_ON(!q->pi_state); + free_pi_state(q->pi_state); + q->pi_state = NULL; + + spin_unlock(q->lock_ptr); + + drop_futex_key_refs(&q->key); +} + +/* + * Fixup the pi_state owner with current. + * + * Must be called with hash bucket lock held and mm->sem held for non + * private futexes. + */ +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct task_struct *curr) +{ + u32 newtid = curr->pid | FUTEX_WAITERS; + struct futex_pi_state *pi_state = q->pi_state; + u32 uval, curval, newval; + int ret; + + /* Owner died? */ + if (pi_state->owner != NULL) { + spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); + } else + newtid |= FUTEX_OWNER_DIED; + + pi_state->owner = curr; + + spin_lock_irq(&curr->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &curr->pi_state_list); + spin_unlock_irq(&curr->pi_lock); + + /* + * We own it, so we have to replace the pending owner + * TID. This must be atomic as we have preserve the + * owner died bit here. + */ + ret = get_futex_value_locked(&uval, uaddr); + + while (!ret) { + newval = (uval & FUTEX_OWNER_DIED) | newtid; + + pagefault_disable(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, + uval, newval); + pagefault_enable(); + + if (curval == -EFAULT) + ret = -EFAULT; + if (curval == uval) + break; + uval = curval; + } + return ret; +} + +/* + * In case we must use restart_block to restart a futex_wait, + * we encode in the 'arg3' shared capability + */ +#define ARG3_SHARED 1 + +static long futex_wait_restart(struct restart_block *restart); +static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, + u32 val, ktime_t *abs_time) +{ + struct task_struct *curr = current; + DECLARE_WAITQUEUE(wait, curr); + struct futex_hash_bucket *hb; + struct futex_q q; + u32 uval; + int ret; + struct hrtimer_sleeper t; + int rem = 0; + + q.pi_state = NULL; + retry: + if (fshared) + down_read(fshared); + + ret = get_futex_key(uaddr, fshared, &q.key); + if (unlikely(ret != 0)) + goto out_release_sem; + + hb = queue_lock(&q, -1, NULL); + + /* + * Access the page AFTER the futex is queued. + * Order is important: + * + * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); + * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } + * + * The basic logical guarantee of a futex is that it blocks ONLY + * if cond(var) is known to be true at the time of blocking, for + * any cond. If we queued after testing *uaddr, that would open + * a race condition where we could block indefinitely with + * cond(var) false, which would violate the guarantee. + * + * A consequence is that futex_wait() can return zero and absorb + * a wakeup when *uaddr != val on entry to the syscall. This is + * rare, but normal. + * + * for shared futexes, we hold the mmap semaphore, so the mapping + * cannot have changed since we looked it up in get_futex_key. + */ + ret = get_futex_value_locked(&uval, uaddr); + + if (unlikely(ret)) { + queue_unlock(&q, hb); + + /* + * If we would have faulted, release mmap_sem, fault it in and + * start all over again. + */ + if (fshared) + up_read(fshared); + + ret = get_user(uval, uaddr); + + if (!ret) + goto retry; + return ret; + } + ret = -EWOULDBLOCK; + if (uval != val) + goto out_unlock_release_sem; + + /* Only actually queue if *uaddr contained val. */ + __queue_me(&q, hb); + + /* + * Now the futex is queued and we have checked the data, we + * don't want to hold mmap_sem while we sleep. + */ + if (fshared) + up_read(fshared); + + /* + * There might have been scheduling since the queue_me(), as we + * cannot hold a spinlock across the get_user() in case it + * faults, and we cannot just set TASK_INTERRUPTIBLE state when + * queueing ourselves into the futex hash. This code thus has to + * rely on the futex_wake() code removing us from hash when it + * wakes us up. + */ + + /* add_wait_queue is the barrier after __set_current_state. */ + __set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&q.waiters, &wait); + /* + * !plist_node_empty() is safe here without any lock. + * q.lock_ptr != 0 is not safe, because of ordering against wakeup. + */ + if (likely(!plist_node_empty(&q.list))) { + if (!abs_time) + schedule(); + else { + hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init_sleeper(&t, current); + t.timer.expires = *abs_time; + + hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS); + + /* + * the timer could have already expired, in which + * case current would be flagged for rescheduling. + * Don't bother calling schedule. + */ + if (likely(t.task)) + schedule(); + + hrtimer_cancel(&t.timer); + + /* Flag if a timeout occured */ + rem = (t.task == NULL); + } + } + __set_current_state(TASK_RUNNING); + + /* + * NOTE: we don't remove ourselves from the waitqueue because + * we are the only user of it. + */ + + /* If we were woken (and unqueued), we succeeded, whatever. */ + if (!unqueue_me(&q)) + return 0; + if (rem) + return -ETIMEDOUT; + + /* + * We expect signal_pending(current), but another thread may + * have handled it for us already. + */ + if (!abs_time) + return -ERESTARTSYS; + else { + struct restart_block *restart; + restart = ¤t_thread_info()->restart_block; + restart->fn = futex_wait_restart; + restart->arg0 = (unsigned long)uaddr; + restart->arg1 = (unsigned long)val; + restart->arg2 = (unsigned long)abs_time; + restart->arg3 = 0; + if (fshared) + restart->arg3 |= ARG3_SHARED; + return -ERESTART_RESTARTBLOCK; + } + + out_unlock_release_sem: + queue_unlock(&q, hb); + + out_release_sem: + if (fshared) + up_read(fshared); + return ret; +} + + +static long futex_wait_restart(struct restart_block *restart) +{ + u32 __user *uaddr = (u32 __user *)restart->arg0; + u32 val = (u32)restart->arg1; + ktime_t *abs_time = (ktime_t *)restart->arg2; + struct rw_semaphore *fshared = NULL; + + restart->fn = do_no_restart_syscall; + if (restart->arg3 & ARG3_SHARED) + fshared = ¤t->mm->mmap_sem; + return (long)futex_wait(uaddr, fshared, val, abs_time); +} + + +/* + * Userspace tried a 0 -> TID atomic transition of the futex value + * and failed. The kernel side here does the whole locking operation: + * if there are waiters then it will block, it does PI, etc. (Due to + * races the kernel might see a 0 value of the futex too.) + */ +static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, + int detect, ktime_t *time, int trylock) +{ + struct hrtimer_sleeper timeout, *to = NULL; + struct task_struct *curr = current; + struct futex_hash_bucket *hb; + u32 uval, newval, curval; + struct futex_q q; + int ret, lock_taken, ownerdied = 0, attempt = 0; + + if (refill_pi_state_cache()) + return -ENOMEM; + + if (time) { + to = &timeout; + hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + hrtimer_init_sleeper(to, current); + to->timer.expires = *time; + } + + q.pi_state = NULL; + retry: + if (fshared) + down_read(fshared); + + ret = get_futex_key(uaddr, fshared, &q.key); + if (unlikely(ret != 0)) + goto out_release_sem; + + retry_unlocked: + hb = queue_lock(&q, -1, NULL); + + retry_locked: + ret = lock_taken = 0; + + /* + * To avoid races, we attempt to take the lock here again + * (by doing a 0 -> TID atomic cmpxchg), while holding all + * the locks. It will most likely not succeed. + */ + newval = current->pid; + + pagefault_disable(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval); + pagefault_enable(); + + if (unlikely(curval == -EFAULT)) + goto uaddr_faulted; + + /* + * Detect deadlocks. In case of REQUEUE_PI this is a valid + * situation and we return success to user space. + */ + if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) { + ret = -EDEADLK; + goto out_unlock_release_sem; + } + + /* + * Surprise - we got the lock. Just return to userspace: + */ + if (unlikely(!curval)) + goto out_unlock_release_sem; + + uval = curval; + + /* + * Set the WAITERS flag, so the owner will know it has someone + * to wake at next unlock + */ + newval = curval | FUTEX_WAITERS; + + /* + * There are two cases, where a futex might have no owner (the + * owner TID is 0): OWNER_DIED. We take over the futex in this + * case. We also do an unconditional take over, when the owner + * of the futex died. + * + * This is safe as we are protected by the hash bucket lock ! + */ + if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { + /* Keep the OWNER_DIED bit */ + newval = (curval & ~FUTEX_TID_MASK) | current->pid; + ownerdied = 0; + lock_taken = 1; + } + + pagefault_disable(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + pagefault_enable(); + + if (unlikely(curval == -EFAULT)) + goto uaddr_faulted; + if (unlikely(curval != uval)) + goto retry_locked; + + /* + * We took the lock due to owner died take over. + */ + if (unlikely(lock_taken)) + goto out_unlock_release_sem; + + /* + * We dont have the lock. Look up the PI state (or create it if + * we are the first waiter): + */ + ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state); + + if (unlikely(ret)) { + switch (ret) { + + case -EAGAIN: + /* + * Task is exiting and we just wait for the + * exit to complete. + */ + queue_unlock(&q, hb); + if (fshared) + up_read(fshared); + cond_resched(); + goto retry; + + case -ESRCH: + /* + * No owner found for this futex. Check if the + * OWNER_DIED bit is set to figure out whether + * this is a robust futex or not. + */ + if (get_futex_value_locked(&curval, uaddr)) + goto uaddr_faulted; + + /* + * We simply start over in case of a robust + * futex. The code above will take the futex + * and return happy. + */ + if (curval & FUTEX_OWNER_DIED) { + ownerdied = 1; + goto retry_locked; + } + default: + goto out_unlock_release_sem; + } + } + + /* + * Only actually queue now that the atomic ops are done: + */ + __queue_me(&q, hb); + + /* + * Now the futex is queued and we have checked the data, we + * don't want to hold mmap_sem while we sleep. + */ + if (fshared) + up_read(fshared); + + WARN_ON(!q.pi_state); + /* + * Block on the PI mutex: + */ + if (!trylock) + ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1); + else { + ret = rt_mutex_trylock(&q.pi_state->pi_mutex); + /* Fixup the trylock return value: */ + ret = ret ? 0 : -EWOULDBLOCK; + } + + if (fshared) + down_read(fshared); + spin_lock(q.lock_ptr); + + if (!ret) { + /* + * Got the lock. We might not be the anticipated owner + * if we did a lock-steal - fix up the PI-state in + * that case: + */ + if (q.pi_state->owner != curr) + ret = fixup_pi_state_owner(uaddr, &q, curr); + } else { + /* + * Catch the rare case, where the lock was released + * when we were on the way back before we locked the + * hash bucket. + */ + if (q.pi_state->owner == curr && + rt_mutex_trylock(&q.pi_state->pi_mutex)) { + ret = 0; + } else { + /* + * Paranoia check. If we did not take the lock + * in the trylock above, then we should not be + * the owner of the rtmutex, neither the real + * nor the pending one: + */ + if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr) + printk(KERN_ERR "futex_lock_pi: ret = %d " + "pi-mutex: %p pi-state %p\n", ret, + q.pi_state->pi_mutex.owner, + q.pi_state->owner); + } + } + + /* Unqueue and drop the lock */ + unqueue_me_pi(&q); + if (fshared) + up_read(fshared); + + return ret != -EINTR ? ret : -ERESTARTNOINTR; + + out_unlock_release_sem: + queue_unlock(&q, hb); + + out_release_sem: + if (fshared) + up_read(fshared); + return ret; + + uaddr_faulted: + /* + * We have to r/w *(int __user *)uaddr, but we can't modify it + * non-atomically. Therefore, if get_user below is not + * enough, we need to handle the fault ourselves, while + * still holding the mmap_sem. + * + * ... and hb->lock. :-) --ANK + */ + queue_unlock(&q, hb); + + if (attempt++) { + ret = futex_handle_fault((unsigned long)uaddr, fshared, + attempt); + if (ret) + goto out_release_sem; + goto retry_unlocked; + } + + if (fshared) + up_read(fshared); + + ret = get_user(uval, uaddr); + if (!ret && (uval != -EFAULT)) + goto retry; + + return ret; +} + +/* + * Userspace attempted a TID -> 0 atomic transition, and failed. + * This is the in-kernel slowpath: we look up the PI state (if any), + * and do the rt-mutex unlock. + */ +static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) +{ + struct futex_hash_bucket *hb; + struct futex_q *this, *next; + u32 uval; + struct plist_head *head; + union futex_key key; + int ret, attempt = 0; + +retry: + if (get_user(uval, uaddr)) + return -EFAULT; + /* + * We release only a lock we actually own: + */ + if ((uval & FUTEX_TID_MASK) != current->pid) + return -EPERM; + /* + * First take all the futex related locks: + */ + if (fshared) + down_read(fshared); + + ret = get_futex_key(uaddr, fshared, &key); + if (unlikely(ret != 0)) + goto out; + + hb = hash_futex(&key); +retry_unlocked: + spin_lock(&hb->lock); + + /* + * To avoid races, try to do the TID -> 0 atomic transition + * again. If it succeeds then we can return without waking + * anyone else up: + */ + if (!(uval & FUTEX_OWNER_DIED)) { + pagefault_disable(); + uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); + pagefault_enable(); + } + + if (unlikely(uval == -EFAULT)) + goto pi_faulted; + /* + * Rare case: we managed to release the lock atomically, + * no need to wake anyone else up: + */ + if (unlikely(uval == current->pid)) + goto out_unlock; + + /* + * Ok, other tasks may need to be woken up - check waiters + * and do the wakeup if necessary: + */ + head = &hb->chain; + + plist_for_each_entry_safe(this, next, head, list) { + if (!match_futex (&this->key, &key)) + continue; + ret = wake_futex_pi(uaddr, uval, this); + /* + * The atomic access to the futex value + * generated a pagefault, so retry the + * user-access and the wakeup: + */ + if (ret == -EFAULT) + goto pi_faulted; + goto out_unlock; + } + /* + * No waiters - kernel unlocks the futex: + */ + if (!(uval & FUTEX_OWNER_DIED)) { + ret = unlock_futex_pi(uaddr, uval); + if (ret == -EFAULT) + goto pi_faulted; + } + +out_unlock: + spin_unlock(&hb->lock); +out: + if (fshared) + up_read(fshared); + + return ret; + +pi_faulted: + /* + * We have to r/w *(int __user *)uaddr, but we can't modify it + * non-atomically. Therefore, if get_user below is not + * enough, we need to handle the fault ourselves, while + * still holding the mmap_sem. + * + * ... and hb->lock. --ANK + */ + spin_unlock(&hb->lock); + + if (attempt++) { + ret = futex_handle_fault((unsigned long)uaddr, fshared, + attempt); + if (ret) + goto out; + goto retry_unlocked; + } + + if (fshared) + up_read(fshared); + + ret = get_user(uval, uaddr); + if (!ret && (uval != -EFAULT)) + goto retry; + + return ret; +} + +static int futex_close(struct inode *inode, struct file *filp) +{ + struct futex_q *q = filp->private_data; + + unqueue_me(q); + kfree(q); + + return 0; +} + +/* This is one-shot: once it's gone off you need a new fd */ +static unsigned int futex_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct futex_q *q = filp->private_data; + int ret = 0; + + poll_wait(filp, &q->waiters, wait); + + /* + * plist_node_empty() is safe here without any lock. + * q->lock_ptr != 0 is not safe, because of ordering against wakeup. + */ + if (plist_node_empty(&q->list)) + ret = POLLIN | POLLRDNORM; + + return ret; +} + +static const struct file_operations futex_fops = { + .release = futex_close, + .poll = futex_poll, +}; + +/* + * Signal allows caller to avoid the race which would occur if they + * set the sigio stuff up afterwards. + */ +static int futex_fd(u32 __user *uaddr, int signal) +{ + struct futex_q *q; + struct file *filp; + int ret, err; + struct rw_semaphore *fshared; + static unsigned long printk_interval; + + if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { + printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " + "will be removed from the kernel in June 2007\n", + current->comm); + } + + ret = -EINVAL; + if (!valid_signal(signal)) + goto out; + + ret = get_unused_fd(); + if (ret < 0) + goto out; + filp = get_empty_filp(); + if (!filp) { + put_unused_fd(ret); + ret = -ENFILE; + goto out; + } + filp->f_op = &futex_fops; + filp->f_path.mnt = mntget(futex_mnt); + filp->f_path.dentry = dget(futex_mnt->mnt_root); + filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; + + if (signal) { + err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1); + if (err < 0) { + goto error; + } + filp->f_owner.signum = signal; + } + + q = kmalloc(sizeof(*q), GFP_KERNEL); + if (!q) { + err = -ENOMEM; + goto error; + } + q->pi_state = NULL; + + fshared = ¤t->mm->mmap_sem; + down_read(fshared); + err = get_futex_key(uaddr, fshared, &q->key); + + if (unlikely(err != 0)) { + up_read(fshared); + kfree(q); + goto error; + } + + /* + * queue_me() must be called before releasing mmap_sem, because + * key->shared.inode needs to be referenced while holding it. + */ + filp->private_data = q; + + queue_me(q, ret, filp); + up_read(fshared); + + /* Now we map fd to filp, so userspace can access it */ + fd_install(ret, filp); +out: + return ret; +error: + put_unused_fd(ret); + put_filp(filp); + ret = err; + goto out; +} + +/* + * Support for robust futexes: the kernel cleans up held futexes at + * thread exit time. + * + * Implementation: user-space maintains a per-thread list of locks it + * is holding. Upon do_exit(), the kernel carefully walks this list, + * and marks all locks that are owned by this thread with the + * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is + * always manipulated with the lock held, so the list is private and + * per-thread. Userspace also maintains a per-thread 'list_op_pending' + * field, to allow the kernel to clean up if the thread dies after + * acquiring the lock, but just before it could have added itself to + * the list. There can only be one such pending lock. + */ + +/** + * sys_set_robust_list - set the robust-futex list head of a task + * @head: pointer to the list-head + * @len: length of the list-head, as userspace expects + */ +asmlinkage long +sys_set_robust_list(struct robust_list_head __user *head, + size_t len) +{ + /* + * The kernel knows only one size for now: + */ + if (unlikely(len != sizeof(*head))) + return -EINVAL; + + current->robust_list = head; + + return 0; +} + +/** + * sys_get_robust_list - get the robust-futex list head of a task + * @pid: pid of the process [zero for current task] + * @head_ptr: pointer to a list-head pointer, the kernel fills it in + * @len_ptr: pointer to a length field, the kernel fills in the header size + */ +asmlinkage long +sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, + size_t __user *len_ptr) +{ + struct robust_list_head __user *head; + unsigned long ret; + + if (!pid) + head = current->robust_list; + else { + struct task_struct *p; + + ret = -ESRCH; + rcu_read_lock(); + p = find_task_by_pid(pid); + if (!p) + goto err_unlock; + ret = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_PTRACE)) + goto err_unlock; + head = p->robust_list; + rcu_read_unlock(); + } + + if (put_user(sizeof(*head), len_ptr)) + return -EFAULT; + return put_user(head, head_ptr); + +err_unlock: + rcu_read_unlock(); + + return ret; +} + +/* + * Process a futex-list entry, check whether it's owned by the + * dying task, and do notification if so: + */ +int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) +{ + u32 uval, nval, mval; + +retry: + if (get_user(uval, uaddr)) + return -1; + + if ((uval & FUTEX_TID_MASK) == curr->pid) { + /* + * Ok, this dying thread is truly holding a futex + * of interest. Set the OWNER_DIED bit atomically + * via cmpxchg, and if the value had FUTEX_WAITERS + * set, wake up a waiter (if any). (We have to do a + * futex_wake() even if OWNER_DIED is already set - + * to handle the rare but possible case of recursive + * thread-death.) The rest of the cleanup is done in + * userspace. + */ + mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; + nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); + + if (nval == -EFAULT) + return -1; + + if (nval != uval) + goto retry; + + /* + * Wake robust non-PI futexes here. The wakeup of + * PI futexes happens in exit_pi_state(): + */ + if (!pi) { + if (uval & FUTEX_WAITERS) + futex_wake(uaddr, &curr->mm->mmap_sem, 1); + } + } + return 0; +} + +/* + * Fetch a robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int fetch_robust_entry(struct robust_list __user **entry, + struct robust_list __user * __user *head, + int *pi) +{ + unsigned long uentry; + + if (get_user(uentry, (unsigned long __user *)head)) + return -EFAULT; + + *entry = (void __user *)(uentry & ~1UL); + *pi = uentry & 1; + + return 0; +} + +/* + * Walk curr->robust_list (very carefully, it's a userspace list!) + * and mark any locks found there dead, and notify any waiters. + * + * We silently return on any sign of list-walking problem. + */ +void exit_robust_list(struct task_struct *curr) +{ + struct robust_list_head __user *head = curr->robust_list; + struct robust_list __user *entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; + unsigned long futex_offset; + + /* + * Fetch the list head (which was registered earlier, via + * sys_set_robust_list()): + */ + if (fetch_robust_entry(&entry, &head->list.next, &pi)) + return; + /* + * Fetch the relative futex offset: + */ + if (get_user(futex_offset, &head->futex_offset)) + return; + /* + * Fetch any possibly pending lock-add first, and handle it + * if it exists: + */ + if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) + return; + + if (pending) + handle_futex_death((void __user *)pending + futex_offset, + curr, pip); + + while (entry != &head->list) { + /* + * A pending lock might already be on the list, so + * don't process it twice: + */ + if (entry != pending) + if (handle_futex_death((void __user *)entry + futex_offset, + curr, pi)) + return; + /* + * Fetch the next entry in the list: + */ + if (fetch_robust_entry(&entry, &entry->next, &pi)) + return; + /* + * Avoid excessively long or circular lists: + */ + if (!--limit) + break; + + cond_resched(); + } +} + +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, + u32 __user *uaddr2, u32 val2, u32 val3) +{ + int ret; + int cmd = op & FUTEX_CMD_MASK; + struct rw_semaphore *fshared = NULL; + + if (!(op & FUTEX_PRIVATE_FLAG)) + fshared = ¤t->mm->mmap_sem; + + switch (cmd) { + case FUTEX_WAIT: + ret = futex_wait(uaddr, fshared, val, timeout); + break; + case FUTEX_WAKE: + ret = futex_wake(uaddr, fshared, val); + break; + case FUTEX_FD: + /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ + ret = futex_fd(uaddr, val); + break; + case FUTEX_REQUEUE: + ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL); + break; + case FUTEX_CMP_REQUEUE: + ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3); + break; + case FUTEX_WAKE_OP: + ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); + break; + case FUTEX_LOCK_PI: + ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); + break; + case FUTEX_UNLOCK_PI: + ret = futex_unlock_pi(uaddr, fshared); + break; + case FUTEX_TRYLOCK_PI: + ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); + break; + default: + ret = -ENOSYS; + } + return ret; +} + + +asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, + struct timespec __user *utime, u32 __user *uaddr2, + u32 val3) +{ + struct timespec ts; + ktime_t t, *tp = NULL; + u32 val2 = 0; + int cmd = op & FUTEX_CMD_MASK; + + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI)) { + if (copy_from_user(&ts, utime, sizeof(ts)) != 0) + return -EFAULT; + if (!timespec_valid(&ts)) + return -EINVAL; + + t = timespec_to_ktime(ts); + if (cmd == FUTEX_WAIT) + t = ktime_add(ktime_get(), t); + tp = &t; + } + /* + * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE. + * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. + */ + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || + cmd == FUTEX_WAKE_OP) + val2 = (u32) (unsigned long) utime; + + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); +} + +static int futexfs_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + struct vfsmount *mnt) +{ + return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt); +} + +static struct file_system_type futex_fs_type = { + .name = "futexfs", + .get_sb = futexfs_get_sb, + .kill_sb = kill_anon_super, +}; + +static int __init init(void) +{ + int i = register_filesystem(&futex_fs_type); + + if (i) + return i; + + futex_mnt = kern_mount(&futex_fs_type); + if (IS_ERR(futex_mnt)) { + unregister_filesystem(&futex_fs_type); + return PTR_ERR(futex_mnt); + } + + for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { + plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); + spin_lock_init(&futex_queues[i].lock); + } + return 0; +} +__initcall(init); diff -urN linux-2.6.22.5/kernel/hrtimer.c linux-2.6.22.5-android/kernel/hrtimer.c --- linux-2.6.22.5/kernel/hrtimer.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/kernel/hrtimer.c 2007-11-20 08:46:07.744255655 +1100 @@ -1124,8 +1124,14 @@ * If the timer was rearmed on another CPU, reprogram * the event device. */ - if (timer->base->first == &timer->node) - hrtimer_reprogram(timer, timer->base); + if (timer->base->first == &timer->node) { + if(hrtimer_reprogram(timer, timer->base)) { + __remove_hrtimer(timer, timer->base, + HRTIMER_STATE_PENDING, 0); + list_add_tail(&timer->cb_entry, + &cpu_base->cb_pending); + } + } } } spin_unlock_irq(&cpu_base->lock); diff -urN linux-2.6.22.5/kernel/hrtimer.c.orig linux-2.6.22.5-android/kernel/hrtimer.c.orig --- linux-2.6.22.5/kernel/hrtimer.c.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/kernel/hrtimer.c.orig 2007-11-20 08:23:56.293337270 +1100 @@ -0,0 +1,1446 @@ +/* + * linux/kernel/hrtimer.c + * + * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar + * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner + * + * High-resolution kernel timers + * + * In contrast to the low-resolution timeout API implemented in + * kernel/timer.c, hrtimers provide finer resolution and accuracy + * depending on system configuration and capabilities. + * + * These timers are currently used for: + * - itimers + * - POSIX timers + * - nanosleep + * - precise in-kernel timing + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * Credits: + * based on kernel/timer.c + * + * Help, testing, suggestions, bugfixes, improvements were + * provided by: + * + * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel + * et. al. + * + * For licencing details see kernel-base/COPYING + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/** + * ktime_get - get the monotonic time in ktime_t format + * + * returns the time in ktime_t format + */ +ktime_t ktime_get(void) +{ + struct timespec now; + + ktime_get_ts(&now); + + return timespec_to_ktime(now); +} +EXPORT_SYMBOL_GPL(ktime_get); + +/** + * ktime_get_real - get the real (wall-) time in ktime_t format + * + * returns the time in ktime_t format + */ +ktime_t ktime_get_real(void) +{ + struct timespec now; + + getnstimeofday(&now); + + return timespec_to_ktime(now); +} + +EXPORT_SYMBOL_GPL(ktime_get_real); + +/* + * The timer bases: + * + * Note: If we want to add new timer bases, we have to skip the two + * clock ids captured by the cpu-timers. We do this by holding empty + * entries rather than doing math adjustment of the clock ids. + * This ensures that we capture erroneous accesses to these clock ids + * rather than moving them into the range of valid clock id's. + */ +DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = +{ + + .clock_base = + { + { + .index = CLOCK_REALTIME, + .get_time = &ktime_get_real, + .resolution = KTIME_LOW_RES, + }, + { + .index = CLOCK_MONOTONIC, + .get_time = &ktime_get, + .resolution = KTIME_LOW_RES, + }, + } +}; + +/** + * ktime_get_ts - get the monotonic clock in timespec format + * @ts: pointer to timespec variable + * + * The function calculates the monotonic clock from the realtime + * clock and the wall_to_monotonic offset and stores the result + * in normalized timespec format in the variable pointed to by @ts. + */ +void ktime_get_ts(struct timespec *ts) +{ + struct timespec tomono; + unsigned long seq; + + do { + seq = read_seqbegin(&xtime_lock); + getnstimeofday(ts); + tomono = wall_to_monotonic; + + } while (read_seqretry(&xtime_lock, seq)); + + set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, + ts->tv_nsec + tomono.tv_nsec); +} +EXPORT_SYMBOL_GPL(ktime_get_ts); + +/* + * Get the coarse grained time at the softirq based on xtime and + * wall_to_monotonic. + */ +static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) +{ + ktime_t xtim, tomono; + struct timespec xts, tom; + unsigned long seq; + + do { + seq = read_seqbegin(&xtime_lock); +#ifdef CONFIG_NO_HZ + getnstimeofday(&xts); +#else + xts = xtime; +#endif + tom = wall_to_monotonic; + } while (read_seqretry(&xtime_lock, seq)); + + xtim = timespec_to_ktime(xts); + tomono = timespec_to_ktime(tom); + base->clock_base[CLOCK_REALTIME].softirq_time = xtim; + base->clock_base[CLOCK_MONOTONIC].softirq_time = + ktime_add(xtim, tomono); +} + +/* + * Helper function to check, whether the timer is running the callback + * function + */ +static inline int hrtimer_callback_running(struct hrtimer *timer) +{ + return timer->state & HRTIMER_STATE_CALLBACK; +} + +/* + * Functions and macros which are different for UP/SMP systems are kept in a + * single place + */ +#ifdef CONFIG_SMP + +/* + * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock + * means that all timers which are tied to this base via timer->base are + * locked, and the base itself is locked too. + * + * So __run_timers/migrate_timers can safely modify all timers which could + * be found on the lists/queues. + * + * When the timer's base is locked, and the timer removed from list, it is + * possible to set timer->base = NULL and drop the lock: the timer remains + * locked. + */ +static +struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, + unsigned long *flags) +{ + struct hrtimer_clock_base *base; + + for (;;) { + base = timer->base; + if (likely(base != NULL)) { + spin_lock_irqsave(&base->cpu_base->lock, *flags); + if (likely(base == timer->base)) + return base; + /* The timer has migrated to another CPU: */ + spin_unlock_irqrestore(&base->cpu_base->lock, *flags); + } + cpu_relax(); + } +} + +/* + * Switch the timer base to the current CPU when possible. + */ +static inline struct hrtimer_clock_base * +switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) +{ + struct hrtimer_clock_base *new_base; + struct hrtimer_cpu_base *new_cpu_base; + + new_cpu_base = &__get_cpu_var(hrtimer_bases); + new_base = &new_cpu_base->clock_base[base->index]; + + if (base != new_base) { + /* + * We are trying to schedule the timer on the local CPU. + * However we can't change timer's base while it is running, + * so we keep it on the same CPU. No hassle vs. reprogramming + * the event source in the high resolution case. The softirq + * code will take care of this when the timer function has + * completed. There is no conflict as we hold the lock until + * the timer is enqueued. + */ + if (unlikely(hrtimer_callback_running(timer))) + return base; + + /* See the comment in lock_timer_base() */ + timer->base = NULL; + spin_unlock(&base->cpu_base->lock); + spin_lock(&new_base->cpu_base->lock); + timer->base = new_base; + } + return new_base; +} + +#else /* CONFIG_SMP */ + +static inline struct hrtimer_clock_base * +lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) +{ + struct hrtimer_clock_base *base = timer->base; + + spin_lock_irqsave(&base->cpu_base->lock, *flags); + + return base; +} + +# define switch_hrtimer_base(t, b) (b) + +#endif /* !CONFIG_SMP */ + +/* + * Functions for the union type storage format of ktime_t which are + * too large for inlining: + */ +#if BITS_PER_LONG < 64 +# ifndef CONFIG_KTIME_SCALAR +/** + * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable + * @kt: addend + * @nsec: the scalar nsec value to add + * + * Returns the sum of kt and nsec in ktime_t format + */ +ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) +{ + ktime_t tmp; + + if (likely(nsec < NSEC_PER_SEC)) { + tmp.tv64 = nsec; + } else { + unsigned long rem = do_div(nsec, NSEC_PER_SEC); + + tmp = ktime_set((long)nsec, rem); + } + + return ktime_add(kt, tmp); +} + +EXPORT_SYMBOL_GPL(ktime_add_ns); +# endif /* !CONFIG_KTIME_SCALAR */ + +/* + * Divide a ktime value by a nanosecond value + */ +unsigned long ktime_divns(const ktime_t kt, s64 div) +{ + u64 dclc, inc, dns; + int sft = 0; + + dclc = dns = ktime_to_ns(kt); + inc = div; + /* Make sure the divisor is less than 2^32: */ + while (div >> 32) { + sft++; + div >>= 1; + } + dclc >>= sft; + do_div(dclc, (unsigned long) div); + + return (unsigned long) dclc; +} +#endif /* BITS_PER_LONG >= 64 */ + +/* High resolution timer related functions */ +#ifdef CONFIG_HIGH_RES_TIMERS + +/* + * High resolution timer enabled ? + */ +static int hrtimer_hres_enabled __read_mostly = 1; + +/* + * Enable / Disable high resolution mode + */ +static int __init setup_hrtimer_hres(char *str) +{ + if (!strcmp(str, "off")) + hrtimer_hres_enabled = 0; + else if (!strcmp(str, "on")) + hrtimer_hres_enabled = 1; + else + return 0; + return 1; +} + +__setup("highres=", setup_hrtimer_hres); + +/* + * hrtimer_high_res_enabled - query, if the highres mode is enabled + */ +static inline int hrtimer_is_hres_enabled(void) +{ + return hrtimer_hres_enabled; +} + +/* + * Is the high resolution mode active ? + */ +static inline int hrtimer_hres_active(void) +{ + return __get_cpu_var(hrtimer_bases).hres_active; +} + +/* + * Reprogram the event source with checking both queues for the + * next event + * Called with interrupts disabled and base->lock held + */ +static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) +{ + int i; + struct hrtimer_clock_base *base = cpu_base->clock_base; + ktime_t expires; + + cpu_base->expires_next.tv64 = KTIME_MAX; + + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { + struct hrtimer *timer; + + if (!base->first) + continue; + timer = rb_entry(base->first, struct hrtimer, node); + expires = ktime_sub(timer->expires, base->offset); + if (expires.tv64 < cpu_base->expires_next.tv64) + cpu_base->expires_next = expires; + } + + if (cpu_base->expires_next.tv64 != KTIME_MAX) + tick_program_event(cpu_base->expires_next, 1); +} + +/* + * Shared reprogramming for clock_realtime and clock_monotonic + * + * When a timer is enqueued and expires earlier than the already enqueued + * timers, we have to check, whether it expires earlier than the timer for + * which the clock event device was armed. + * + * Called with interrupts disabled and base->cpu_base.lock held + */ +static int hrtimer_reprogram(struct hrtimer *timer, + struct hrtimer_clock_base *base) +{ + ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; + ktime_t expires = ktime_sub(timer->expires, base->offset); + int res; + + /* + * When the callback is running, we do not reprogram the clock event + * device. The timer callback is either running on a different CPU or + * the callback is executed in the hrtimer_interupt context. The + * reprogramming is handled either by the softirq, which called the + * callback or at the end of the hrtimer_interrupt. + */ + if (hrtimer_callback_running(timer)) + return 0; + + if (expires.tv64 >= expires_next->tv64) + return 0; + + /* + * Clockevents returns -ETIME, when the event was in the past. + */ + res = tick_program_event(expires, 0); + if (!IS_ERR_VALUE(res)) + *expires_next = expires; + return res; +} + + +/* + * Retrigger next event is called after clock was set + * + * Called with interrupts disabled via on_each_cpu() + */ +static void retrigger_next_event(void *arg) +{ + struct hrtimer_cpu_base *base; + struct timespec realtime_offset; + unsigned long seq; + + if (!hrtimer_hres_active()) + return; + + do { + seq = read_seqbegin(&xtime_lock); + set_normalized_timespec(&realtime_offset, + -wall_to_monotonic.tv_sec, + -wall_to_monotonic.tv_nsec); + } while (read_seqretry(&xtime_lock, seq)); + + base = &__get_cpu_var(hrtimer_bases); + + /* Adjust CLOCK_REALTIME offset */ + spin_lock(&base->lock); + base->clock_base[CLOCK_REALTIME].offset = + timespec_to_ktime(realtime_offset); + + hrtimer_force_reprogram(base); + spin_unlock(&base->lock); +} + +/* + * Clock realtime was set + * + * Change the offset of the realtime clock vs. the monotonic + * clock. + * + * We might have to reprogram the high resolution timer interrupt. On + * SMP we call the architecture specific code to retrigger _all_ high + * resolution timer interrupts. On UP we just disable interrupts and + * call the high resolution interrupt code. + */ +void clock_was_set(void) +{ + /* Retrigger the CPU local events everywhere */ + on_each_cpu(retrigger_next_event, NULL, 0, 1); +} + +/* + * During resume we might have to reprogram the high resolution timer + * interrupt (on the local CPU): + */ +void hres_timers_resume(void) +{ + WARN_ON_ONCE(num_online_cpus() > 1); + + /* Retrigger the CPU local events: */ + retrigger_next_event(NULL); +} + +/* + * Check, whether the timer is on the callback pending list + */ +static inline int hrtimer_cb_pending(const struct hrtimer *timer) +{ + return timer->state & HRTIMER_STATE_PENDING; +} + +/* + * Remove a timer from the callback pending list + */ +static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) +{ + list_del_init(&timer->cb_entry); +} + +/* + * Initialize the high resolution related parts of cpu_base + */ +static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) +{ + base->expires_next.tv64 = KTIME_MAX; + base->hres_active = 0; + INIT_LIST_HEAD(&base->cb_pending); +} + +/* + * Initialize the high resolution related parts of a hrtimer + */ +static inline void hrtimer_init_timer_hres(struct hrtimer *timer) +{ + INIT_LIST_HEAD(&timer->cb_entry); +} + +/* + * When High resolution timers are active, try to reprogram. Note, that in case + * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry + * check happens. The timer gets enqueued into the rbtree. The reprogramming + * and expiry check is done in the hrtimer_interrupt or in the softirq. + */ +static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, + struct hrtimer_clock_base *base) +{ + if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { + + /* Timer is expired, act upon the callback mode */ + switch(timer->cb_mode) { + case HRTIMER_CB_IRQSAFE_NO_RESTART: + /* + * We can call the callback from here. No restart + * happens, so no danger of recursion + */ + BUG_ON(timer->function(timer) != HRTIMER_NORESTART); + return 1; + case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: + /* + * This is solely for the sched tick emulation with + * dynamic tick support to ensure that we do not + * restart the tick right on the edge and end up with + * the tick timer in the softirq ! The calling site + * takes care of this. + */ + return 1; + case HRTIMER_CB_IRQSAFE: + case HRTIMER_CB_SOFTIRQ: + /* + * Move everything else into the softirq pending list ! + */ + list_add_tail(&timer->cb_entry, + &base->cpu_base->cb_pending); + timer->state = HRTIMER_STATE_PENDING; + raise_softirq(HRTIMER_SOFTIRQ); + return 1; + default: + BUG(); + } + } + return 0; +} + +/* + * Switch to high resolution mode + */ +static int hrtimer_switch_to_hres(void) +{ + struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); + unsigned long flags; + + if (base->hres_active) + return 1; + + local_irq_save(flags); + + if (tick_init_highres()) { + local_irq_restore(flags); + return 0; + } + base->hres_active = 1; + base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; + base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; + + tick_setup_sched_timer(); + + /* "Retrigger" the interrupt to get things going */ + retrigger_next_event(NULL); + local_irq_restore(flags); + printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", + smp_processor_id()); + return 1; +} + +#else + +static inline int hrtimer_hres_active(void) { return 0; } +static inline int hrtimer_is_hres_enabled(void) { return 0; } +static inline int hrtimer_switch_to_hres(void) { return 0; } +static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } +static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, + struct hrtimer_clock_base *base) +{ + return 0; +} +static inline int hrtimer_cb_pending(struct hrtimer *timer) { return 0; } +static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) { } +static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } +static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } + +#endif /* CONFIG_HIGH_RES_TIMERS */ + +#ifdef CONFIG_TIMER_STATS +void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) +{ + if (timer->start_site) + return; + + timer->start_site = addr; + memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); + timer->start_pid = current->pid; +} +#endif + +/* + * Counterpart to lock_timer_base above: + */ +static inline +void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) +{ + spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); +} + +/** + * hrtimer_forward - forward the timer expiry + * @timer: hrtimer to forward + * @now: forward past this time + * @interval: the interval to forward + * + * Forward the timer expiry so it will expire in the future. + * Returns the number of overruns. + */ +unsigned long +hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) +{ + unsigned long orun = 1; + ktime_t delta; + + delta = ktime_sub(now, timer->expires); + + if (delta.tv64 < 0) + return 0; + + if (interval.tv64 < timer->base->resolution.tv64) + interval.tv64 = timer->base->resolution.tv64; + + if (unlikely(delta.tv64 >= interval.tv64)) { + s64 incr = ktime_to_ns(interval); + + orun = ktime_divns(delta, incr); + timer->expires = ktime_add_ns(timer->expires, incr * orun); + if (timer->expires.tv64 > now.tv64) + return orun; + /* + * This (and the ktime_add() below) is the + * correction for exact: + */ + orun++; + } + timer->expires = ktime_add(timer->expires, interval); + /* + * Make sure, that the result did not wrap with a very large + * interval. + */ + if (timer->expires.tv64 < 0) + timer->expires = ktime_set(KTIME_SEC_MAX, 0); + + return orun; +} +EXPORT_SYMBOL_GPL(hrtimer_forward); + +/* + * enqueue_hrtimer - internal function to (re)start a timer + * + * The timer is inserted in expiry order. Insertion into the + * red black tree is O(log(n)). Must hold the base lock. + */ +static void enqueue_hrtimer(struct hrtimer *timer, + struct hrtimer_clock_base *base, int reprogram) +{ + struct rb_node **link = &base->active.rb_node; + struct rb_node *parent = NULL; + struct hrtimer *entry; + + /* + * Find the right place in the rbtree: + */ + while (*link) { + parent = *link; + entry = rb_entry(parent, struct hrtimer, node); + /* + * We dont care about collisions. Nodes with + * the same expiry time stay together. + */ + if (timer->expires.tv64 < entry->expires.tv64) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + /* + * Insert the timer to the rbtree and check whether it + * replaces the first pending timer + */ + if (!base->first || timer->expires.tv64 < + rb_entry(base->first, struct hrtimer, node)->expires.tv64) { + /* + * Reprogram the clock event device. When the timer is already + * expired hrtimer_enqueue_reprogram has either called the + * callback or added it to the pending list and raised the + * softirq. + * + * This is a NOP for !HIGHRES + */ + if (reprogram && hrtimer_enqueue_reprogram(timer, base)) + return; + + base->first = &timer->node; + } + + rb_link_node(&timer->node, parent, link); + rb_insert_color(&timer->node, &base->active); + /* + * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the + * state of a possibly running callback. + */ + timer->state |= HRTIMER_STATE_ENQUEUED; +} + +/* + * __remove_hrtimer - internal function to remove a timer + * + * Caller must hold the base lock. + * + * High resolution timer mode reprograms the clock event device when the + * timer is the one which expires next. The caller can disable this by setting + * reprogram to zero. This is useful, when the context does a reprogramming + * anyway (e.g. timer interrupt) + */ +static void __remove_hrtimer(struct hrtimer *timer, + struct hrtimer_clock_base *base, + unsigned long newstate, int reprogram) +{ + /* High res. callback list. NOP for !HIGHRES */ + if (hrtimer_cb_pending(timer)) + hrtimer_remove_cb_pending(timer); + else { + /* + * Remove the timer from the rbtree and replace the + * first entry pointer if necessary. + */ + if (base->first == &timer->node) { + base->first = rb_next(&timer->node); + /* Reprogram the clock event device. if enabled */ + if (reprogram && hrtimer_hres_active()) + hrtimer_force_reprogram(base->cpu_base); + } + rb_erase(&timer->node, &base->active); + } + timer->state = newstate; +} + +/* + * remove hrtimer, called with base lock held + */ +static inline int +remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) +{ + if (hrtimer_is_queued(timer)) { + int reprogram; + + /* + * Remove the timer and force reprogramming when high + * resolution mode is active and the timer is on the current + * CPU. If we remove a timer on another CPU, reprogramming is + * skipped. The interrupt event on this CPU is fired and + * reprogramming happens in the interrupt handler. This is a + * rare case and less expensive than a smp call. + */ + timer_stats_hrtimer_clear_start_info(timer); + reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); + __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, + reprogram); + return 1; + } + return 0; +} + +/** + * hrtimer_start - (re)start an relative timer on the current CPU + * @timer: the timer to be added + * @tim: expiry time + * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) + * + * Returns: + * 0 on success + * 1 when the timer was active + */ +int +hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) +{ + struct hrtimer_clock_base *base, *new_base; + unsigned long flags; + int ret; + + base = lock_hrtimer_base(timer, &flags); + + /* Remove an active timer from the queue: */ + ret = remove_hrtimer(timer, base); + + /* Switch the timer base, if necessary: */ + new_base = switch_hrtimer_base(timer, base); + + if (mode == HRTIMER_MODE_REL) { + tim = ktime_add(tim, new_base->get_time()); + /* + * CONFIG_TIME_LOW_RES is a temporary way for architectures + * to signal that they simply return xtime in + * do_gettimeoffset(). In this case we want to round up by + * resolution when starting a relative timer, to avoid short + * timeouts. This will go away with the GTOD framework. + */ +#ifdef CONFIG_TIME_LOW_RES + tim = ktime_add(tim, base->resolution); +#endif + } + timer->expires = tim; + + timer_stats_hrtimer_set_start_info(timer); + + /* + * Only allow reprogramming if the new base is on this CPU. + * (it might still be on another CPU if the timer was pending) + */ + enqueue_hrtimer(timer, new_base, + new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); + + unlock_hrtimer_base(timer, &flags); + + return ret; +} +EXPORT_SYMBOL_GPL(hrtimer_start); + +/** + * hrtimer_try_to_cancel - try to deactivate a timer + * @timer: hrtimer to stop + * + * Returns: + * 0 when the timer was not active + * 1 when the timer was active + * -1 when the timer is currently excuting the callback function and + * cannot be stopped + */ +int hrtimer_try_to_cancel(struct hrtimer *timer) +{ + struct hrtimer_clock_base *base; + unsigned long flags; + int ret = -1; + + base = lock_hrtimer_base(timer, &flags); + + if (!hrtimer_callback_running(timer)) + ret = remove_hrtimer(timer, base); + + unlock_hrtimer_base(timer, &flags); + + return ret; + +} +EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); + +/** + * hrtimer_cancel - cancel a timer and wait for the handler to finish. + * @timer: the timer to be cancelled + * + * Returns: + * 0 when the timer was not active + * 1 when the timer was active + */ +int hrtimer_cancel(struct hrtimer *timer) +{ + for (;;) { + int ret = hrtimer_try_to_cancel(timer); + + if (ret >= 0) + return ret; + cpu_relax(); + } +} +EXPORT_SYMBOL_GPL(hrtimer_cancel); + +/** + * hrtimer_get_remaining - get remaining time for the timer + * @timer: the timer to read + */ +ktime_t hrtimer_get_remaining(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base; + unsigned long flags; + ktime_t rem; + + base = lock_hrtimer_base(timer, &flags); + rem = ktime_sub(timer->expires, base->get_time()); + unlock_hrtimer_base(timer, &flags); + + return rem; +} +EXPORT_SYMBOL_GPL(hrtimer_get_remaining); + +#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ) +/** + * hrtimer_get_next_event - get the time until next expiry event + * + * Returns the delta to the next expiry event or KTIME_MAX if no timer + * is pending. + */ +ktime_t hrtimer_get_next_event(void) +{ + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + struct hrtimer_clock_base *base = cpu_base->clock_base; + ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; + unsigned long flags; + int i; + + spin_lock_irqsave(&cpu_base->lock, flags); + + if (!hrtimer_hres_active()) { + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { + struct hrtimer *timer; + + if (!base->first) + continue; + + timer = rb_entry(base->first, struct hrtimer, node); + delta.tv64 = timer->expires.tv64; + delta = ktime_sub(delta, base->get_time()); + if (delta.tv64 < mindelta.tv64) + mindelta.tv64 = delta.tv64; + } + } + + spin_unlock_irqrestore(&cpu_base->lock, flags); + + if (mindelta.tv64 < 0) + mindelta.tv64 = 0; + return mindelta; +} +#endif + +/** + * hrtimer_init - initialize a timer to the given clock + * @timer: the timer to be initialized + * @clock_id: the clock to be used + * @mode: timer mode abs/rel + */ +void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, + enum hrtimer_mode mode) +{ + struct hrtimer_cpu_base *cpu_base; + + memset(timer, 0, sizeof(struct hrtimer)); + + cpu_base = &__raw_get_cpu_var(hrtimer_bases); + + if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) + clock_id = CLOCK_MONOTONIC; + + timer->base = &cpu_base->clock_base[clock_id]; + hrtimer_init_timer_hres(timer); + +#ifdef CONFIG_TIMER_STATS + timer->start_site = NULL; + timer->start_pid = -1; + memset(timer->start_comm, 0, TASK_COMM_LEN); +#endif +} +EXPORT_SYMBOL_GPL(hrtimer_init); + +/** + * hrtimer_get_res - get the timer resolution for a clock + * @which_clock: which clock to query + * @tp: pointer to timespec variable to store the resolution + * + * Store the resolution of the clock selected by @which_clock in the + * variable pointed to by @tp. + */ +int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) +{ + struct hrtimer_cpu_base *cpu_base; + + cpu_base = &__raw_get_cpu_var(hrtimer_bases); + *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); + + return 0; +} +EXPORT_SYMBOL_GPL(hrtimer_get_res); + +#ifdef CONFIG_HIGH_RES_TIMERS + +/* + * High resolution timer interrupt + * Called with interrupts disabled + */ +void hrtimer_interrupt(struct clock_event_device *dev) +{ + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + struct hrtimer_clock_base *base; + ktime_t expires_next, now; + int i, raise = 0; + + BUG_ON(!cpu_base->hres_active); + cpu_base->nr_events++; + dev->next_event.tv64 = KTIME_MAX; + + retry: + now = ktime_get(); + + expires_next.tv64 = KTIME_MAX; + + base = cpu_base->clock_base; + + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { + ktime_t basenow; + struct rb_node *node; + + spin_lock(&cpu_base->lock); + + basenow = ktime_add(now, base->offset); + + while ((node = base->first)) { + struct hrtimer *timer; + + timer = rb_entry(node, struct hrtimer, node); + + if (basenow.tv64 < timer->expires.tv64) { + ktime_t expires; + + expires = ktime_sub(timer->expires, + base->offset); + if (expires.tv64 < expires_next.tv64) + expires_next = expires; + break; + } + + /* Move softirq callbacks to the pending list */ + if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { + __remove_hrtimer(timer, base, + HRTIMER_STATE_PENDING, 0); + list_add_tail(&timer->cb_entry, + &base->cpu_base->cb_pending); + raise = 1; + continue; + } + + __remove_hrtimer(timer, base, + HRTIMER_STATE_CALLBACK, 0); + timer_stats_account_hrtimer(timer); + + /* + * Note: We clear the CALLBACK bit after + * enqueue_hrtimer to avoid reprogramming of + * the event hardware. This happens at the end + * of this function anyway. + */ + if (timer->function(timer) != HRTIMER_NORESTART) { + BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); + enqueue_hrtimer(timer, base, 0); + } + timer->state &= ~HRTIMER_STATE_CALLBACK; + } + spin_unlock(&cpu_base->lock); + base++; + } + + cpu_base->expires_next = expires_next; + + /* Reprogramming necessary ? */ + if (expires_next.tv64 != KTIME_MAX) { + if (tick_program_event(expires_next, 0)) + goto retry; + } + + /* Raise softirq ? */ + if (raise) + raise_softirq(HRTIMER_SOFTIRQ); +} + +static void run_hrtimer_softirq(struct softirq_action *h) +{ + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + + spin_lock_irq(&cpu_base->lock); + + while (!list_empty(&cpu_base->cb_pending)) { + enum hrtimer_restart (*fn)(struct hrtimer *); + struct hrtimer *timer; + int restart; + + timer = list_entry(cpu_base->cb_pending.next, + struct hrtimer, cb_entry); + + timer_stats_account_hrtimer(timer); + + fn = timer->function; + __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); + spin_unlock_irq(&cpu_base->lock); + + restart = fn(timer); + + spin_lock_irq(&cpu_base->lock); + + timer->state &= ~HRTIMER_STATE_CALLBACK; + if (restart == HRTIMER_RESTART) { + BUG_ON(hrtimer_active(timer)); + /* + * Enqueue the timer, allow reprogramming of the event + * device + */ + enqueue_hrtimer(timer, timer->base, 1); + } else if (hrtimer_active(timer)) { + /* + * If the timer was rearmed on another CPU, reprogram + * the event device. + */ + if (timer->base->first == &timer->node) + hrtimer_reprogram(timer, timer->base); + } + } + spin_unlock_irq(&cpu_base->lock); +} + +#endif /* CONFIG_HIGH_RES_TIMERS */ + +/* + * Expire the per base hrtimer-queue: + */ +static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, + int index) +{ + struct rb_node *node; + struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; + + if (!base->first) + return; + + if (base->get_softirq_time) + base->softirq_time = base->get_softirq_time(); + + spin_lock_irq(&cpu_base->lock); + + while ((node = base->first)) { + struct hrtimer *timer; + enum hrtimer_restart (*fn)(struct hrtimer *); + int restart; + + timer = rb_entry(node, struct hrtimer, node); + if (base->softirq_time.tv64 <= timer->expires.tv64) + break; + +#ifdef CONFIG_HIGH_RES_TIMERS + WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ); +#endif + timer_stats_account_hrtimer(timer); + + fn = timer->function; + __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); + spin_unlock_irq(&cpu_base->lock); + + restart = fn(timer); + + spin_lock_irq(&cpu_base->lock); + + timer->state &= ~HRTIMER_STATE_CALLBACK; + if (restart != HRTIMER_NORESTART) { + BUG_ON(hrtimer_active(timer)); + enqueue_hrtimer(timer, base, 0); + } + } + spin_unlock_irq(&cpu_base->lock); +} + +/* + * Called from timer softirq every jiffy, expire hrtimers: + * + * For HRT its the fall back code to run the softirq in the timer + * softirq context in case the hrtimer initialization failed or has + * not been done yet. + */ +void hrtimer_run_queues(void) +{ + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + int i; + + if (hrtimer_hres_active()) + return; + + /* + * This _is_ ugly: We have to check in the softirq context, + * whether we can switch to highres and / or nohz mode. The + * clocksource switch happens in the timer interrupt with + * xtime_lock held. Notification from there only sets the + * check bit in the tick_oneshot code, otherwise we might + * deadlock vs. xtime_lock. + */ + if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) + if (hrtimer_switch_to_hres()) + return; + + hrtimer_get_softirq_time(cpu_base); + + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) + run_hrtimer_queue(cpu_base, i); +} + +/* + * Sleep related functions: + */ +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) +{ + struct hrtimer_sleeper *t = + container_of(timer, struct hrtimer_sleeper, timer); + struct task_struct *task = t->task; + + t->task = NULL; + if (task) + wake_up_process(task); + + return HRTIMER_NORESTART; +} + +void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) +{ + sl->timer.function = hrtimer_wakeup; + sl->task = task; +#ifdef CONFIG_HIGH_RES_TIMERS + sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; +#endif +} + +static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) +{ + hrtimer_init_sleeper(t, current); + + do { + set_current_state(TASK_INTERRUPTIBLE); + hrtimer_start(&t->timer, t->timer.expires, mode); + + if (likely(t->task)) + schedule(); + + hrtimer_cancel(&t->timer); + mode = HRTIMER_MODE_ABS; + + } while (t->task && !signal_pending(current)); + + return t->task == NULL; +} + +long __sched hrtimer_nanosleep_restart(struct restart_block *restart) +{ + struct hrtimer_sleeper t; + struct timespec __user *rmtp; + struct timespec tu; + ktime_t time; + + restart->fn = do_no_restart_syscall; + + hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS); + t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; + + if (do_nanosleep(&t, HRTIMER_MODE_ABS)) + return 0; + + rmtp = (struct timespec __user *) restart->arg1; + if (rmtp) { + time = ktime_sub(t.timer.expires, t.timer.base->get_time()); + if (time.tv64 <= 0) + return 0; + tu = ktime_to_timespec(time); + if (copy_to_user(rmtp, &tu, sizeof(tu))) + return -EFAULT; + } + + restart->fn = hrtimer_nanosleep_restart; + + /* The other values in restart are already filled in */ + return -ERESTART_RESTARTBLOCK; +} + +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid) +{ + struct restart_block *restart; + struct hrtimer_sleeper t; + struct timespec tu; + ktime_t rem; + + hrtimer_init(&t.timer, clockid, mode); + t.timer.expires = timespec_to_ktime(*rqtp); + if (do_nanosleep(&t, mode)) + return 0; + + /* Absolute timers do not update the rmtp value and restart: */ + if (mode == HRTIMER_MODE_ABS) + return -ERESTARTNOHAND; + + if (rmtp) { + rem = ktime_sub(t.timer.expires, t.timer.base->get_time()); + if (rem.tv64 <= 0) + return 0; + tu = ktime_to_timespec(rem); + if (copy_to_user(rmtp, &tu, sizeof(tu))) + return -EFAULT; + } + + restart = ¤t_thread_info()->restart_block; + restart->fn = hrtimer_nanosleep_restart; + restart->arg0 = (unsigned long) t.timer.base->index; + restart->arg1 = (unsigned long) rmtp; + restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF; + restart->arg3 = t.timer.expires.tv64 >> 32; + + return -ERESTART_RESTARTBLOCK; +} + +asmlinkage long +sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) +{ + struct timespec tu; + + if (copy_from_user(&tu, rqtp, sizeof(tu))) + return -EFAULT; + + if (!timespec_valid(&tu)) + return -EINVAL; + + return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); +} + +/* + * Functions related to boot-time initialization: + */ +static void __devinit init_hrtimers_cpu(int cpu) +{ + struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); + int i; + + spin_lock_init(&cpu_base->lock); + lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); + + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) + cpu_base->clock_base[i].cpu_base = cpu_base; + + hrtimer_init_hres(cpu_base); +} + +#ifdef CONFIG_HOTPLUG_CPU + +static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, + struct hrtimer_clock_base *new_base) +{ + struct hrtimer *timer; + struct rb_node *node; + + while ((node = rb_first(&old_base->active))) { + timer = rb_entry(node, struct hrtimer, node); + BUG_ON(hrtimer_callback_running(timer)); + __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); + timer->base = new_base; + /* + * Enqueue the timer. Allow reprogramming of the event device + */ + enqueue_hrtimer(timer, new_base, 1); + } +} + +static void migrate_hrtimers(int cpu) +{ + struct hrtimer_cpu_base *old_base, *new_base; + int i; + + BUG_ON(cpu_online(cpu)); + old_base = &per_cpu(hrtimer_bases, cpu); + new_base = &get_cpu_var(hrtimer_bases); + + tick_cancel_sched_timer(cpu); + + local_irq_disable(); + double_spin_lock(&new_base->lock, &old_base->lock, + smp_processor_id() < cpu); + + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { + migrate_hrtimer_list(&old_base->clock_base[i], + &new_base->clock_base[i]); + } + + double_spin_unlock(&new_base->lock, &old_base->lock, + smp_processor_id() < cpu); + local_irq_enable(); + put_cpu_var(hrtimer_bases); +} +#endif /* CONFIG_HOTPLUG_CPU */ + +static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + + switch (action) { + + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + init_hrtimers_cpu(cpu); + break; + +#ifdef CONFIG_HOTPLUG_CPU + case CPU_DEAD: + case CPU_DEAD_FROZEN: + clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); + migrate_hrtimers(cpu); + break; +#endif + + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata hrtimers_nb = { + .notifier_call = hrtimer_cpu_notify, +}; + +void __init hrtimers_init(void) +{ + hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + register_cpu_notifier(&hrtimers_nb); +#ifdef CONFIG_HIGH_RES_TIMERS + open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL); +#endif +} + diff -urN linux-2.6.22.5/kernel/panic.c linux-2.6.22.5-android/kernel/panic.c --- linux-2.6.22.5/kernel/panic.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/kernel/panic.c 2007-11-20 08:46:07.764256720 +1100 @@ -26,7 +26,10 @@ static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); -int panic_timeout; +#ifndef CONFIG_PANIC_TIMEOUT +#define CONFIG_PANIC_TIMEOUT 0 +#endif +int panic_timeout = CONFIG_PANIC_TIMEOUT; ATOMIC_NOTIFIER_HEAD(panic_notifier_list); diff -urN linux-2.6.22.5/kernel/power/console.c linux-2.6.22.5-android/kernel/power/console.c --- linux-2.6.22.5/kernel/power/console.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/kernel/power/console.c 2007-11-20 08:46:07.774257253 +1100 @@ -52,6 +52,12 @@ acquire_console_sem(); set_console(orig_fgconsole); release_console_sem(); + + if (vt_waitactive(orig_fgconsole)) { + pr_debug("Resume: Can't switch VCs."); + return; + } + kmsg_redirect = orig_kmsg; return; } diff -urN linux-2.6.22.5/kernel/power/process.c linux-2.6.22.5-android/kernel/power/process.c --- linux-2.6.22.5/kernel/power/process.c 2007-08-23 09:23:54.000000000 +1000 +++ linux-2.6.22.5-android/kernel/power/process.c 2007-11-20 08:46:07.774257253 +1100 @@ -13,6 +13,9 @@ #include #include #include +#ifdef CONFIG_ANDROID_POWER +#include +#endif /* * Timeout for stopping processes diff -urN linux-2.6.22.5/kernel/power/process.c.orig linux-2.6.22.5-android/kernel/power/process.c.orig --- linux-2.6.22.5/kernel/power/process.c.orig 1970-01-01 10:00:00.000000000 +1000 +++ linux-2.6.22.5-android/kernel/power/process.c.orig 2007-11-20 08:23:56.383342063 +1100 @@ -0,0 +1,230 @@ +/* + * drivers/power/process.c - Functions for starting/stopping processes on + * suspend transitions. + * + * Originally from swsusp. + */ + + +#undef DEBUG + +#include +#include +#include +#include +#include + +/* + * Timeout for stopping processes + */ +#define TIMEOUT (20 * HZ) + +#define FREEZER_KERNEL_THREADS 0 +#define FREEZER_USER_SPACE 1 + +static inline int freezeable(struct task_struct * p) +{ + if ((p == current) || + (p->flags & PF_NOFREEZE) || + (p->exit_state != 0)) + return 0; + return 1; +} + +/* + * freezing is complete, mark current process as frozen + */ +static inline void frozen_process(void) +{ + if (!unlikely(current->flags & PF_NOFREEZE)) { + current->flags |= PF_FROZEN; + wmb(); + } + clear_tsk_thread_flag(current, TIF_FREEZE); +} + +/* Refrigerator is place where frozen processes are stored :-). */ +void refrigerator(void) +{ + /* Hmm, should we be allowed to suspend when there are realtime + processes around? */ + long save; + + task_lock(current); + if (freezing(current)) { + frozen_process(); + task_unlock(current); + } else { + task_unlock(current); + return; + } + save = current->state; + pr_debug("%s entered refrigerator\n", current->comm); + + spin_lock_irq(¤t->sighand->siglock); + recalc_sigpending(); /* We sent fake signal, clean it up */ + spin_unlock_irq(¤t->sighand->siglock); + + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!frozen(current)) + break; + schedule(); + } + pr_debug("%s left refrigerator\n", current->comm); + current->state = save; +} + +static inline void freeze_process(struct task_struct *p) +{ + unsigned long flags; + + if (!freezing(p)) { + rmb(); + if (!frozen(p)) { + if (p->state == TASK_STOPPED) + force_sig_specific(SIGSTOP, p); + + freeze(p); + spin_lock_irqsave(&p->sighand->siglock, flags); + signal_wake_up(p, p->state == TASK_STOPPED); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } + } +} + +static void cancel_freezing(struct task_struct *p) +{ + unsigned long flags; + + if (freezing(p)) { + pr_debug(" clean up: %s\n", p->comm); + do_not_freeze(p); + spin_lock_irqsave(&p->sighand->siglock, flags); + recalc_sigpending_and_wake(p); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } +} + +static inline int is_user_space(struct task_struct *p) +{ + return p->mm && !(p->flags & PF_BORROWED_MM); +} + +static unsigned int try_to_freeze_tasks(int freeze_user_space) +{ + struct task_struct *g, *p; + unsigned long end_time; + unsigned int todo; + + end_time = jiffies + TIMEOUT; + do { + todo = 0; + read_lock(&tasklist_lock); + do_each_thread(g, p) { + if (!freezeable(p)) + continue; + + if (frozen(p)) + continue; + + if (p->state == TASK_TRACED && frozen(p->parent)) { + cancel_freezing(p); + continue; + } + if (freeze_user_space && !is_user_space(p)) + continue; + + freeze_process(p); + if (!freezer_should_skip(p)) + todo++; + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + yield(); /* Yield is okay here */ + if (todo && time_after(jiffies, end_time)) + break; + } while (todo); + + if (todo) { + /* This does not unfreeze processes that are already frozen + * (we have slightly ugly calling convention in that respect, + * and caller must call thaw_processes() if something fails), + * but it cleans up leftover PF_FREEZE requests. + */ + printk("\n"); + printk(KERN_ERR "Stopping %s timed out after %d seconds " + "(%d tasks refusing to freeze):\n", + freeze_user_space ? "user space processes" : + "kernel threads", + TIMEOUT / HZ, todo); + read_lock(&tasklist_lock); + do_each_thread(g, p) { + if (freeze_user_space && !is_user_space(p)) + continue; + + task_lock(p); + if (freezeable(p) && !frozen(p) && + !freezer_should_skip(p)) + printk(KERN_ERR " %s\n", p->comm); + + cancel_freezing(p); + task_unlock(p); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + } + + return todo; +} + +/** + * freeze_processes - tell processes to enter the refrigerator + * + * Returns 0 on success, or the number of processes that didn't freeze, + * although they were told to. + */ +int freeze_processes(void) +{ + unsigned int nr_unfrozen; + + printk("Stopping tasks ... "); + nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE); + if (nr_unfrozen) + return nr_unfrozen; + + sys_sync(); + nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS); + if (nr_unfrozen) + return nr_unfrozen; + + printk("done.\n"); + BUG_ON(in_atomic()); + return 0; +} + +static void thaw_tasks(int thaw_user_space) +{ + struct task_struct *g, *p; + + read_lock(&tasklist_lock); + do_each_thread(g, p) { + if (!freezeable(p)) + continue; + + if (is_user_space(p) == !thaw_user_space) + continue; + + thaw_process(p); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +void thaw_processes(void) +{ + printk("Restarting tasks ... "); + thaw_tasks(FREEZER_KERNEL_THREADS); + thaw_tasks(FREEZER_USER_SPACE); + schedule(); + printk("done.\n"); +} + +EXPORT_SYMBOL(refrigerator);