diff -Nru linux-2.6.23/arch/arm/kernel/process.c kernel.android/arch/arm/kernel/process.c --- linux-2.6.23/arch/arm/kernel/process.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/arch/arm/kernel/process.c 2007-11-12 07:49:02.000000000 +1100 @@ -396,6 +396,16 @@ } EXPORT_SYMBOL(dump_thread); +/* + * Capture the user space registers if the task is not running (in user space) + */ +int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) +{ + struct pt_regs ptregs = *task_pt_regs(tsk); + elf_core_copy_regs(regs, &ptregs); + return 1; +} + /* * Shuffle the argument into the correct register before calling the * thread function. r1 is the thread argument, r2 is the pointer to diff -Nru linux-2.6.23/arch/arm/kernel/signal.c kernel.android/arch/arm/kernel/signal.c --- linux-2.6.23/arch/arm/kernel/signal.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/arch/arm/kernel/signal.c 2007-11-12 07:49:02.000000000 +1100 @@ -534,6 +534,14 @@ static inline void restart_syscall(struct pt_regs *regs) { + if (regs->ARM_ORIG_r0 == -ERESTARTNOHAND || + regs->ARM_ORIG_r0 == -ERESTARTSYS || + regs->ARM_ORIG_r0 == -ERESTARTNOINTR || + regs->ARM_ORIG_r0 == -ERESTART_RESTARTBLOCK) { + /* the syscall cannot be safely restarted, return -EINTR instead */ + regs->ARM_r0 = -EINTR; + return; + } regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; } @@ -650,6 +658,7 @@ */ if (syscall) { if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { + regs->ARM_r0 = -EAGAIN; /* prevent multiple restarts */ if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; diff -Nru linux-2.6.23/drivers/binder/LICENSE kernel.android/drivers/binder/LICENSE --- linux-2.6.23/drivers/binder/LICENSE 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/LICENSE 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,281 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + \ No newline at end of file diff -Nru linux-2.6.23/drivers/binder/Makefile kernel.android/drivers/binder/Makefile --- linux-2.6.23/drivers/binder/Makefile 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/Makefile 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,4 @@ + +obj-$(CONFIG_BINDER) = binderdev.o + +binderdev-objs := iobuffer.o binder.o binder_node.o binder_proc.o binder_thread.o binder_transaction.o \ No newline at end of file diff -Nru linux-2.6.23/drivers/binder/binder.c kernel.android/drivers/binder/binder.c --- linux-2.6.23/drivers/binder/binder.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,691 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include +#include +#include +#include +#include +#include // includes +#include // for 'current' +#include // for vma, etc. +#include +#include +#include +#include "binder_defs.h" +#include "binder_proc.h" +#include "binder_thread.h" +#include "binder_node.h" +#include "binder_transaction.h" +#include "iobuffer.h" + +MODULE_LICENSE("GPL"); // class_* symbols get exported GPL +MODULE_AUTHOR("PalmSource, Inc."); +MODULE_DESCRIPTION("Capability-based IPC"); + +#define BINDER_MINOR 0 +#define BINDER_NUM_DEVS 1 +#define BINDER_NAME "binder" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) +#define CLASS_SIMPLE class_simple +#define CLASS_SIMPLE_CREATE class_simple_create +#define CLASS_SIMPLE_DEVICE_ADD class_simple_device_add +#define CLASS_SIMPLE_DESTROY class_simple_destroy +#define CLASS_SIMPLE_DEVICE_REMOVE class_simple_device_remove +#else +#define CLASS_SIMPLE class +#define CLASS_SIMPLE_CREATE class_create +#define CLASS_SIMPLE_DEVICE_ADD class_device_create +#define CLASS_SIMPLE_DESTROY class_destroy +#define CLASS_SIMPLE_DEVICE_REMOVE(a) class_device_destroy(binder_class, a) +#endif + +/* + * Prototypes + */ + +struct binder_thread* find_thread(pid_t pid, binder_proc_t *proc, bool remove); + +#if HAVE_UNLOCKED_IOCTL +#define USE_UNLOCKED_IOCTL 1 +#else +#define USE_UNLOCKED_IOCTL 0 +#endif +#if USE_UNLOCKED_IOCTL +static long binder_unlocked_ioctl(struct file *, unsigned int, unsigned long); +#else +static int binder_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +#endif +static int binder_open(struct inode *, struct file *); +static int binder_release(struct inode *, struct file *); +static int binder_mmap(struct file *, struct vm_area_struct *); + +/* + * Globals + */ + +struct binder_dev { + struct cdev cdev; +}; + +static int binder_major = 0; +static char const * const binder_name = BINDER_NAME; +static struct binder_dev binder_device; +static struct CLASS_SIMPLE *binder_class; + +static struct file_operations binder_fops = { + .owner = THIS_MODULE, +#if USE_UNLOCKED_IOCTL + .unlocked_ioctl = binder_unlocked_ioctl, +#else + .ioctl = binder_ioctl, +#endif + .mmap = binder_mmap, + .open = binder_open, + .release = binder_release +}; + +static void binder_vma_open(struct vm_area_struct * area); +static void binder_vma_close(struct vm_area_struct * area); +static struct page * binder_vma_nopage(struct vm_area_struct * area, unsigned long address, int *type); + +static struct vm_operations_struct binder_vm_ops = { + .open = binder_vma_open, + .close = binder_vma_close, + .nopage = binder_vma_nopage +}; + +struct kmem_cache *transaction_cache = NULL; +struct kmem_cache *thread_cache = NULL; +struct kmem_cache *node_cache = NULL; +struct kmem_cache *local_mapping_cache = NULL; +struct kmem_cache *reverse_mapping_cache = NULL; +struct kmem_cache *range_map_cache = NULL; + +spinlock_t cmpxchg32_spinner = SPIN_LOCK_UNLOCKED; +static DECLARE_MUTEX(maps_lock); + +/* + * The kernel sizes its process hash table based up on the amount of RAM, with + * a lower limit of 4 bits and an upper limit of 12 bits. We probably don't + * need 8 bits worth of entries on PDAs, but it make it very likely we will + * have chain lengths of one. + */ + +#define PID_HASH_BITS (8) +static int pid_hash_bits = PID_HASH_BITS; +#define hash_proc_id(pid) hash_long(pid, pid_hash_bits) + +static struct hlist_head *pid_table = NULL; + +static inline binder_thread_t * +binder_thread_alloc(pid_t pid, binder_proc_t *proc, int index) +{ + binder_thread_t *thread = binder_thread_init(pid, proc); + if (thread) { + if (proc) { + if(!binder_proc_AddThread(proc, thread)) + return NULL; // binder_proc_AddThread will cause the thread to be deleted if the process is dying + } + hlist_add_head(&(thread->node), pid_table + index); + } + DPRINTF(5, (KERN_WARNING "%s(%u, %p, %d): %p\n", __func__, pid, proc, index, thread)); + return thread; +} + +struct binder_thread * +core_find_thread(pid_t pid, binder_proc_t *proc, bool remove) +{ + binder_thread_t *thread; + struct hlist_node *_p; + const int index = hash_proc_id(pid); + + DPRINTF(5, (KERN_WARNING "%s(%u, %p, %s): index %d\n", __func__, pid, proc, remove ? "TRUE" : "FALSE", index)); + hlist_for_each_entry(thread, _p, pid_table + index, node) { + DPRINTF(5, (KERN_WARNING "thread: %p, thread->m_thid: %u\n", thread, thread->m_thid)); + if (thread->m_thid == pid) { + DPRINTF(5, (KERN_WARNING "found thread %p, proc=%p\n", thread, thread->m_team)); + if (remove) { + thread->attachedToThread = FALSE; + hlist_del(&thread->node); + } else if (proc) { + if (thread->m_team == NULL) { + binder_thread_AttachProcess(thread, proc); + } else { + BND_ASSERT(thread->m_team == proc, "proc changed"); + } + } + return thread; + } + } + + return NULL; +} + +binder_thread_t * +find_thread(pid_t pid, binder_proc_t *proc, bool remove) +{ + binder_thread_t *thread; + + DPRINTF(5, (KERN_WARNING "%s(%u, %p, %s)\n", __func__, pid, proc, remove ? "TRUE" : "FALSE")); + thread = core_find_thread(pid, proc, remove); + + /* binder_thread_alloc() fails for -ENOMEM only */ + if (thread == NULL && remove == FALSE) thread = binder_thread_alloc(pid, proc, hash_proc_id(pid)); + return thread; +} + +struct binder_thread * +check_for_thread(pid_t pid, bool create) +{ + binder_thread_t *thread; + int rv; + + rv = down_interruptible(&maps_lock); + if(rv != 0) + return NULL; + if (create) + thread = find_thread(pid, NULL, FALSE); + else + thread = core_find_thread(pid, NULL, FALSE); + if(thread != NULL) + BND_FIRST_ACQUIRE(binder_thread, thread, STRONG, thread); + up(&maps_lock); + + return thread; +} + +binder_thread_t * +attach_child_thread(pid_t child_pid, binder_thread_t *parent) +{ + binder_thread_t *thread; + int rv; + bool failed = FALSE; + + rv = down_interruptible(&maps_lock); + if(rv != 0) + return NULL; + thread = find_thread(child_pid, NULL, FALSE); + if(thread != NULL) { + BND_FIRST_ACQUIRE(binder_thread, thread, STRONG, parent); + // Note: it is important this be done with the lock + // held. See binder_thread_WaitForParent(). + failed = !binder_thread_SetParentThread(thread, parent); + } + up(&maps_lock); + + if (failed) { + forget_thread(thread); + thread = NULL; + } + + return thread; +} + +void +forget_thread(struct binder_thread *thread) +{ + pid_t pid; + bool attached; + int rv; + + rv = down_interruptible(&maps_lock); + if(rv != 0) + return; + pid = thread->m_thid; + attached = thread->attachedToThread; + if(BND_RELEASE(binder_thread, thread, STRONG, thread) == 1) { + // Remove it if not yet accessed by user space... + if (!attached) { + find_thread(pid, NULL, TRUE); + } + } + up(&maps_lock); +} + +#if BND_MEM_DEBUG +typedef struct dbg_mem_header_s { + unsigned long state; + kmem_cache_t *slab; + struct dbg_mem_header_s *next; + struct dbg_mem_header_s *prev; +} dbg_mem_header_t ; +static dbg_mem_header_t *dbg_active_memory; +#endif + +void generic_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ +#if BND_MEM_DEBUG + dbg_mem_header_t *h = p; + if(flags & SLAB_CTOR_CONSTRUCTOR) { + h->state = 0; + h->slab = slab; + h->next = dbg_active_memory; + if(h->next) + h->next->prev = h; + h->prev = NULL; + dbg_active_memory = h; + } + else { + BND_ASSERT(h->state == 0 || h->state == 0x22222222, "memory still in use"); + if(h->next) + h->next->prev = h->prev; + if(h->prev) + h->prev->next = h->next; + else + dbg_active_memory = h->next; + } +#endif +} + +void transaction_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void thread_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void node_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void local_mapping_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void reverse_mapping_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +void range_map_slab_xtor(void *p, struct kmem_cache *slab, unsigned long flags) +{ + DIPRINTF(10, (KERN_WARNING "%s(%p, %p, %08lx)\n", __func__, p, slab, flags)); + generic_slab_xtor(p, slab, flags); +} + +static int /*__init*/ create_pools(void) +{ + //long cache_flags = /*SLAB_DEBUG_FREE | SLAB_DEBUG_INITIAL | SLAB_RED_ZONE |*/ SLAB_POISON; + //long cache_flags = SLAB_RECLAIM_ACCOUNT | SLAB_NO_REAP; + long cache_flags = 0; +#if BND_MEM_DEBUG + size_t pad = sizeof(dbg_mem_header_t); +#else + size_t pad = 0; +#endif + DPRINTF(4, (KERN_WARNING "%s()\n", __func__)); + + // small object pools + transaction_cache = kmem_cache_create("binder_transaction_t", sizeof(binder_transaction_t)+pad, 0, cache_flags, transaction_slab_xtor); + if (!transaction_cache) return -ENOMEM; + thread_cache = kmem_cache_create("binder_thread_t", sizeof(binder_thread_t)+pad, 0, cache_flags, thread_slab_xtor); + if (!thread_cache) return -ENOMEM; + node_cache = kmem_cache_create("binder_node_t", sizeof(binder_node_t)+pad, 0, cache_flags, node_slab_xtor); + if (!node_cache) return -ENOMEM; + local_mapping_cache = kmem_cache_create("local_mapping_t", sizeof(local_mapping_t)+pad, 0, cache_flags, local_mapping_slab_xtor); + if (!local_mapping_cache) return -ENOMEM; + reverse_mapping_cache = kmem_cache_create("reverse_mapping_t", sizeof(reverse_mapping_t)+pad, 0, cache_flags, reverse_mapping_slab_xtor); + if (!reverse_mapping_cache) return -ENOMEM; + range_map_cache = kmem_cache_create("range_map_t", sizeof(range_map_t)+pad, 0, cache_flags, range_map_slab_xtor); + if (!range_map_cache) return -ENOMEM; + + // hash tables + pid_table = kmalloc(sizeof(void *) << PID_HASH_BITS, GFP_KERNEL); + if (!pid_table) return -ENOMEM; + memset(pid_table, 0, sizeof(void *) << PID_HASH_BITS); + return 0; +} + +static int destroy_pools(void) +{ + int res = 0; +#if BND_MEM_DEBUG + dbg_mem_header_t *m, *mn; +#endif + DPRINTF(4, (KERN_WARNING "%s()\n", __func__)); + + /* + * These can fail if we haven't free'd all of the objects we've allocated. + */ + +#if BND_MEM_DEBUG + + + DPRINTF(4, (KERN_WARNING "%s() dbg_active_memory = %p\n", __func__, dbg_active_memory)); + m = dbg_active_memory; + while(m) { + mn = m->next; + if(m->state == 0x11111111) { + printk(KERN_WARNING "%s() memory still in use: %p slab %p\n", __func__, m + 1, m->slab); + dbg_kmem_cache_free(m->slab, m + 1); + } + m = mn; + } +#endif + + kmem_cache_destroy(transaction_cache); + kmem_cache_destroy(thread_cache); + kmem_cache_destroy(node_cache); + kmem_cache_destroy(local_mapping_cache); + kmem_cache_destroy(reverse_mapping_cache); + kmem_cache_destroy(range_map_cache); + if (pid_table) kfree(pid_table); + return res; +} + +static int __init init_binder(void) +{ + struct class_device *simple; + int result; + dev_t dev = 0; + + result = create_pools(); + if (result) { + goto free_pools; + } + + result = alloc_chrdev_region(&dev, BINDER_MINOR, BINDER_NUM_DEVS, binder_name); + if (result < 0) { + printk(KERN_WARNING "init_binder: alloc_chrdev_region() failed: %d\n", result); + return result; + } + + binder_major = MAJOR(dev); + binder_class = CLASS_SIMPLE_CREATE(THIS_MODULE, "binderipc"); + if (IS_ERR(binder_class)) { + result = PTR_ERR(binder_class); + printk(KERN_WARNING "init_binder: CLASS_SIMPLE_CREATE() failed: %d\n", result); + goto unalloc; + } + + memset(&binder_device, 0, sizeof(binder_device)); // overkill, but we don't care + cdev_init(&binder_device.cdev, &binder_fops); + binder_device.cdev.owner = THIS_MODULE; + result = cdev_add(&binder_device.cdev, dev, BINDER_NUM_DEVS); + if (result < 0) { + printk(KERN_WARNING "init_binder: cdev_add() failed: %d\n", result); + goto unregister_class; + } + + void* mem = kzalloc(sizeof(*simple), GFP_KERNEL); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) + simple = CLASS_SIMPLE_DEVICE_ADD(binder_class, dev, NULL, "%s", BINDER_NAME); +#else + // ARGH API CHANGE!!! + simple = CLASS_SIMPLE_DEVICE_ADD(binder_class, NULL, dev, NULL, "%s", BINDER_NAME); +#endif + if (IS_ERR(simple)) { + result = PTR_ERR(simple); + goto unadd_cdev; + } + + goto exit0; + +unadd_cdev: + cdev_del(&binder_device.cdev); +unregister_class: + CLASS_SIMPLE_DESTROY(binder_class); +unalloc: + unregister_chrdev_region(binder_major, BINDER_NUM_DEVS); +free_pools: + destroy_pools(); +exit0: + return result; +} + +static void __exit cleanup_binder(void) +{ + CLASS_SIMPLE_DEVICE_REMOVE(MKDEV(binder_major, 0)); + cdev_del(&binder_device.cdev); + CLASS_SIMPLE_DESTROY(binder_class); + unregister_chrdev_region(binder_major, BINDER_NUM_DEVS); + destroy_pools(); +} + + +module_init(init_binder); +module_exit(cleanup_binder); + +static int binder_open(struct inode *nodp, struct file *filp) +{ + binder_proc_t *proc; + + //printk(KERN_WARNING "%s(%p %p) (pid %d)\n", __func__, nodp, filp, current->pid); + // We only have one device, so we don't have to dig into the inode for it. + + down(&maps_lock); + proc = new_binder_proc(); + filp->private_data = proc; + up(&maps_lock); + printk(KERN_WARNING "%s(%p %p) (pid %d) got %p\n", __func__, nodp, filp, current->pid, proc); + if(proc == NULL) + return -ENOMEM; + return 0; +} + +static int binder_release(struct inode *nodp, struct file *filp) +{ + binder_proc_t *that; + binder_thread_t *thread; + struct hlist_node *_p, *_pp; + int index; + printk(KERN_WARNING "%s(%p %p) (pid %d) pd %p\n", __func__, nodp, filp, current->pid, filp->private_data); + that = filp->private_data; + if (that) { + filp->private_data = NULL; + + // ensure the process stays around until we can verify termination + index = 1 << pid_hash_bits; + + DPRINTF(5, (KERN_WARNING "%s(%p) freeing threads\n", __func__, that)); + + down(&maps_lock); + while (index--) { + hlist_for_each_entry_safe(thread, _p, _pp, pid_table + index, node) { + if (thread->m_team == that) { + DPRINTF(5, (KERN_WARNING "%s(%p) freeing thread %d\n", __func__, that, thread->m_thid)); + hlist_del(&thread->node); + BND_RELEASE(binder_thread, thread, STRONG, that); + //BND_RELEASE(binder_thread, thread, WEAK, that); + } + } + } + DPRINTF(5, (KERN_WARNING "%s(%p) done freeing threads\n", __func__, that)); + up(&maps_lock); + + binder_proc_Die(that, FALSE); + BND_RELEASE(binder_proc, that, STRONG, that); + } + else printk(KERN_WARNING "%s(pid %d): couldn't find binder_proc to Die()\n", __func__, current->pid); + return 0; +} + +#if USE_UNLOCKED_IOCTL +static long binder_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +#else +static int binder_ioctl(struct inode *nodp, struct file *filp, unsigned int cmd, unsigned long arg) +#endif +{ + binder_thread_t *thread; + struct binder_proc *proc; + int rv; + + if (_IOC_TYPE(cmd) != BINDER_IOC_MAGIC) return -ENOTTY; + if (_IOC_NR(cmd) > BINDER_IOC_MAXNR) return -ENOTTY; + + DPRINTF(5, (KERN_WARNING "%s: %08x\n", __func__, cmd)); + + // find the thread tracking structure + rv = down_interruptible(&maps_lock); + if(rv != 0) + return rv; + proc = filp->private_data; + BND_ASSERT(proc != NULL, "ioctl called after release"); + if(proc == NULL || !binder_proc_IsAlive(proc)) + thread = NULL; + else + thread = find_thread(current->pid, proc, cmd == BINDER_THREAD_EXIT); + if(thread != NULL) { + BND_ACQUIRE(binder_thread, thread, WEAK, thread); + thread->attachedToThread = TRUE; + } + + up(&maps_lock); + if(proc == NULL || !binder_proc_IsAlive(proc)) + return -ECONNREFUSED; + if (thread == NULL) + return -ENOMEM; + + //BND_ASSERT(thread->m_team == proc, "bad thread process ptr"); + if(thread->m_team != proc) { + printk( KERN_WARNING "%s: cmd %08x process ptr mismatch, " + "thread has %p, expected %p\n", + __func__, cmd, thread->m_team, proc ); + return -EIO; + } + + rv = binder_thread_Control(thread, cmd, (void*)arg); + BND_RELEASE(binder_thread, thread, WEAK, thread); + return rv; +} + +static int binder_mmap(struct file * filp, struct vm_area_struct * vma) +{ + // FIXME: Unil we see a device with ZONE_HIGH memory (currently, greater + // than 896MB RAM) we don't need to worry about alloc_page. + vma->vm_ops = &binder_vm_ops; + vma->vm_flags |= VM_RESERVED | VM_READ | VM_RAND_READ | VM_IO | VM_DONTCOPY | VM_DONTEXPAND; + vma->vm_flags &= ~(VM_SHARED); + vma->vm_private_data = filp->private_data; + binder_vma_open(vma); + return 0; +} + +static void binder_vma_open(struct vm_area_struct * area) +{ + binder_proc_t *that; + DPRINTF(5, (KERN_WARNING "binder_vma_open()\n")); + // Do we have to watch for clone()'d processes and hunt down the + // appropriate binder_proc_t? + + that = area->vm_private_data; + // initialize our free space map + if (that->m_freeMap.rb_node == NULL) { + range_map_t *rm = kmem_cache_alloc(range_map_cache, GFP_KERNEL); + that->m_mmap_start = rm->start = area->vm_start; + rm->end = area->vm_end; + rm->page = NULL; + rm->team = that; + BND_LOCK(that->m_map_pool_lock); + binder_proc_free_map_insert(that, rm); + BND_UNLOCK(that->m_map_pool_lock); + DPRINTF(5, (KERN_WARNING "vma(%08lx, %08lx) for %08x\n", rm->start, rm->end, (unsigned int)that)); + } +#if 0 + else printk(KERN_WARNING " --- didn't reconstruct the initial free-map\n"); +#endif +} + +static void binder_vma_close(struct vm_area_struct * area) +{ + // Uh, what? + DPRINTF(5, (KERN_WARNING "binder_vma_close() for %08x\n", (unsigned int)area->vm_private_data)); +} + +static struct page * binder_vma_nopage(struct vm_area_struct * area, unsigned long address, int *type) +{ + struct page *pageptr = NULL; + // the private data holds a pointer to owning binder_proc + binder_proc_t *bp = (binder_proc_t *)area->vm_private_data; + DPRINTF(5, ("binder_vma_nopage(%p, %08lx)\n", bp, address)); + // make sure this address corresponds to a valid transaction + if (!binder_proc_ValidTransactionAddress(bp, address, &pageptr)) + return NOPAGE_SIGBUS; + // bump the kernel reference counts + get_page(pageptr); + // record the fault type + if (type) *type = VM_FAULT_MINOR; + // return the page + return pageptr; +} + +void my_dump_stack(void) { printk(KERN_WARNING ""); dump_stack(); } + +void soft_yield() +{ + static int i = 0; + i++; + if(i < 10) + return; + i = 0; + yield(); +} + +#if BND_MEM_DEBUG + +#undef kmem_cache_alloc +#undef kmem_cache_free + +void *dbg_kmem_cache_alloc(struct kmem_cache *a, unsigned int b) +{ + dbg_mem_header_t *p; + p = kmem_cache_alloc(a, b); + BND_ASSERT(p != NULL, "memory allocation failed"); + if(p == NULL) + return NULL; + if(p->state != 0x00000000) { + if(p->state != 0x22222222) + DPRINTF(5, (KERN_WARNING "%s: kmem_cache_alloc(%p, %d) BAD PTR %p = 0x%08lx\n", __func__, a, b, p, p->state)); + else + DPRINTF(6, (KERN_WARNING "%s: kmem_cache_alloc(%p, %d) NEW PTR %p = 0x%08lx\n", __func__, a, b, p, p->state)); + } + p->state = 0x11111111; + p++; + DPRINTF(6, (KERN_WARNING "%s: kmem_cache_alloc(%p, %d) returned %p\n", __func__, a, b, p)); + return p; +} + +void dbg_kmem_cache_free(struct kmem_cache *a, void *b) +{ + dbg_mem_header_t *p = b; + DPRINTF(6, (KERN_WARNING "%s: kmem_cache_free(%p, %p)\n", __func__, a, p)); + p--; + if(p->state != 0x11111111) { + printk(KERN_WARNING "%s: kmem_cache_free(%p, %p) BAD ARG 0x%08lx\n", __func__, a, p, p->state); + dump_stack(); + return; + } + + p->state = 0x22222222; + kmem_cache_free(a, p); +} + +#endif diff -Nru linux-2.6.23/drivers/binder/binder_defs.h kernel.android/drivers/binder/binder_defs.h --- linux-2.6.23/drivers/binder/binder_defs.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_defs.h 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,340 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_DEFS_H +#define BINDER_DEFS_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(CONFIG_ARM) +/* Define this if you want to use the linux threads hack on ARM */ +#define USE_LINUXTHREADS +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) +#define assert_spin_locked(x) +#endif + +extern struct kmem_cache *transaction_cache; +extern struct kmem_cache *thread_cache; +extern struct kmem_cache *node_cache; +extern struct kmem_cache *local_mapping_cache; +extern struct kmem_cache *reverse_mapping_cache; +extern struct kmem_cache *range_map_cache; + +#define HASH_BITS 8 +#define HASH_SIZE (1 << HASH_BITS) + +enum ref_count_type { + STRONG = 1, + WEAK = 2 +}; + +/* ------------------------------------------------------------------ */ +/* --------------------- COMPILING AS A DRIVER ---------------------- */ +/* ------------------------------------------------------------------ */ + +void soft_yield(void); + +#define STOP_ON_ASSERT //msleep_interruptible(1000*60*60*24*7) + +#define BND_MEM_DEBUG 0 // slab destructors are no longer available + +#if 0 + +#define BINDER_DEBUG 1 +#define VALIDATES_BINDER 0 +#define DIPRINTF(level,a) do { if (level <= 9) printk a; } while(0) +#define DPRINTF(level,a) do { if (level <= 9) { printk a; soft_yield();} } while(0) +#define BND_FAIL(msg) +#define BND_ASSERT(cond, msg) do { if (!(cond)) { printk(KERN_WARNING "BND_ASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg); dump_stack(); STOP_ON_ASSERT;} } while (FALSE) +#define DBTRANSACT(x) printk x +#define DBSHUTDOWN(x) printk x +#define DBSPAWN(x) printk x +#define DBSTACK(x) printk x +#define DBLOCK(x) printk x +#define DBREFS(x) printk x +#define DBREAD(x) printk x +#define DBDEATH(x) printk x + +#else +#define BINDER_DEBUG 0 +#define DIPRINTF(level,a) +#define DPRINTF(level,a) +#define BND_FAIL(msg) +//#define BND_ASSERT(cond, msg) if (!(cond)) printk(KERN_WARNING "BND_ASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg) +#define BND_ASSERT(cond, msg) do { if (!(cond)) { printk(KERN_WARNING "BND_ASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg); dump_stack(); } } while (FALSE) +#define DBTRANSACT(x) +#define DBSHUTDOWN(x) //printk x +#define DBSPAWN(x) +#define DBSTACK(x) +#define DBLOCK(x) +#define DBREFS(x) +#define DBREAD(x) +#define DBDEATH(x) +#endif + +// errors triggered by userspace bugs +#define UPRINTF(a) do { printk a; } while(0) +#define BND_UASSERT(cond, msg) if (!(cond)) printk(KERN_WARNING "BND_UASSERT file %s line %d: %s\n", __FILE__, __LINE__, msg) + +#if BND_MEM_DEBUG +void *dbg_kmem_cache_alloc(struct kmem_cache *a, unsigned int b); +void dbg_kmem_cache_free(struct kmem_cache *a, void *b); + +#define kmem_cache_alloc dbg_kmem_cache_alloc +#define kmem_cache_free dbg_kmem_cache_free +#endif + +struct binder_thread; + +typedef ssize_t status_t; + +//typedef unsigned int bool; +#define FALSE (0) +#define TRUE (~FALSE) + +/* Special function, implemented in binder.c, to try to find + a binder_thread structure for a pid. If 'create' is TRUE, + a new structure will be created for you (unattached to + a process) if it doesn't already exist; otherwise it will + return NULL. Returns with a strong reference held on the + thread. + + *** NOTE: Must not call this while holding a thread or + process lock! */ +struct binder_thread * check_for_thread(pid_t thread_pid, bool create); + +/* Special function, implemented in binder.c, for a parent to + lookup (or pre-create) the state for main thread of a child + process it is spawning. This function calls + binder_thread_SetParentThread() for you on the child thread, + and returns with a strong reference held on the thread. + + *** NOTE: Must not call this while holding a thread or + process lock! */ +struct binder_thread * attach_child_thread(pid_t child_pid, struct binder_thread *parent); + +/* Special function, implemented in binder.c, to remove a + thread structure from the global list. This needs to be + called when using the above two functions to create such + a structure, to remove it from the list when it is no + longer used. A strong reference is removed from the thread + and, if the strong count goes to zero AND the structure has + not yet been accessed by its user space thread, then the + thread structure will be removed from the list. + + *** NOTE: Must not call this while holding a thread or + process lock! */ +void forget_thread(struct binder_thread *thread); + +// Perform an accuire/release on an object. +#define BND_ACQUIRE(cname, that, type, id) cname##_Acquire(that, type) +#define BND_ATTEMPT_ACQUIRE(cname, that, type, id) cname##_AttemptAcquire(that, type) +#define BND_FIRST_ACQUIRE(cname, that, type, id) cname##_ForceAcquire(that, type) +#define BND_FORCE_ACQUIRE(cname, that, id) cname##_ForceAcquire(that, STRONG) +#define BND_RELEASE(cname, that, type, id) cname##_Release(that, type) + +// Declare acquire/release methods for a class. +#define BND_DECLARE_ACQUIRE_RELEASE(cname) \ + void cname##_Acquire(cname##_t *that, s32 type); \ + int cname##_ForceAcquire(cname##_t *that, s32 type); \ + int cname##_Release(cname##_t *that, s32 type); \ +/**/ + +// Declare attempt acquire method for a class. +#define BND_DECLARE_ATTEMPT_ACQUIRE(cname) \ + int cname##_AttemptAcquire(cname##_t *that, s32 type); \ +/**/ + +extern void dump_stack(void); +// Implement acquire/release methods for a class. +#define BND_IMPLEMENT_ACQUIRE_RELEASE(cname) \ +void \ +cname##_Acquire(cname##_t *that, s32 type) \ +{ \ + int res; \ + if (type == STRONG) { \ + res = atomic_inc_return(&that->m_primaryRefs); \ + BND_ASSERT(res > 1, "STRONG Acquire without strong ref"); \ + } \ + res = atomic_inc_return(&that->m_secondaryRefs); \ + if (type == STRONG) { \ + BND_ASSERT(res > 1, "STRONG Acquire without weak ref"); \ + } \ + else { \ + BND_ASSERT(res > 1, "WEAK Acquire without weak ref"); \ + } \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + /*dump_stack()*/;\ +} \ +int \ +cname##_ForceAcquire(cname##_t *that, s32 type) \ +{ \ + int res; \ + res = atomic_inc_return(&that->m_secondaryRefs); \ + if (type == STRONG) { \ + res = atomic_inc_return(&that->m_primaryRefs); \ + } \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + return res-1; \ +} \ +int \ +cname##_Release(cname##_t *that, s32 type) \ +{ \ + int rv1=-2, rv2=-2; \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + if(type == STRONG) { \ + BND_ASSERT(atomic_read(&that->m_primaryRefs) > 0, "Strong reference underflow");\ + } \ + BND_ASSERT(atomic_read(&that->m_secondaryRefs) > 0, "Weak reference underflow");\ + /*dump_stack()*/;\ + switch (type) { \ + case STRONG: \ + if ((rv1 = atomic_dec_return(&that->m_primaryRefs)) == 0) { \ + cname##_Released(that); \ + } \ + case WEAK: \ + if ((rv2 = atomic_dec_return(&that->m_secondaryRefs)) == 0) {\ + cname##_destroy(that); \ + } \ + } \ + return ((type == STRONG) ? rv1 : rv2) + 1; \ +} \ +/**/ + +// Implement attempt acquire method for a class. +#define BND_IMPLEMENT_ATTEMPT_ACQUIRE(cname) \ +int \ +cname##_AttemptAcquire(cname##_t *that, s32 type) \ +{ \ + int cur; \ + switch (type) { \ + case STRONG: \ + cur = atomic_read(&that->m_primaryRefs); \ + while (cur > 0 && \ + !cmpxchg32( &that->m_primaryRefs.counter, \ + &cur, cur+1)); \ + if (cur <= 0) {\ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) FAILED!\n", __func__, that, type == STRONG ? "STRONG" : "WEAK"));\ + /*dump_stack()*/;\ + return FALSE; \ + }\ + cur = atomic_inc_return(&that->m_secondaryRefs); \ + BND_ASSERT(cur > 1, "ATTEMPT ACQUIRE STONG without WEAK ref"); \ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + /*dump_stack()*/;\ + return TRUE; \ + case WEAK: \ + cur = atomic_read(&that->m_secondaryRefs); \ + while (cur > 0 && \ + !cmpxchg32( &that->m_secondaryRefs.counter, \ + &cur, cur+1)); \ + if (cur <= 0) {\ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) FAILED!\n", __func__, that, type == STRONG ? "STRONG" : "WEAK"));\ + /*dump_stack()*/;\ + return FALSE; \ + }\ + DPRINTF(5, (KERN_WARNING "%s(%p, %s) s:%d w:%d\n", __func__, that, type == STRONG ? "STRONG" : "WEAK", atomic_read(&that->m_primaryRefs), atomic_read(&that->m_secondaryRefs)));\ + /*dump_stack()*/;\ + return TRUE; \ + } \ + return FALSE; \ +} \ +/**/ + +extern spinlock_t cmpxchg32_spinner; + +// Quick hack -- should be checking for x86, not ARM. + +#if defined(CONFIG_ARM) + +static __inline__ int cmpxchg32(volatile int *atom, int *val, int newVal) { + unsigned long flags; + spin_lock_irqsave(&cmpxchg32_spinner, flags); + if (*atom == *val) { + *atom = newVal; + spin_unlock_irqrestore(&cmpxchg32_spinner, flags); + return 1; + } + *val = *atom; + spin_unlock_irqrestore(&cmpxchg32_spinner, flags); + return 0; +}; + +#else + +static __inline__ int compare_and_swap32(volatile int *location, int oldValue, int newValue) +{ + int success; + asm volatile("lock; cmpxchg %%ecx, (%%edx); sete %%al; andl $1, %%eax" + : "=a" (success) : "a" (oldValue), "c" (newValue), "d" (location)); + return success; +} + +static __inline__ bool cmpxchg32(volatile int *atom, int *value, int newValue) +{ + int success = compare_and_swap32(atom, *value, newValue); + if (!success) + *value = *atom; + + return success; +}; + +#endif + +#define BND_LOCK(x) do { down(&(x)); \ + BND_ASSERT(atomic_read(&((x).count)) <= 0, "BND_LOCK() lock still free"); } while (0) +#define BND_UNLOCK(x) do { \ + BND_ASSERT(atomic_read(&((x).count)) <= 0, "BND_UNLOCK() lock already free"); \ + up(&(x)); } while (0) + +#if defined(CONFIG_ARM) +// __cpuc_flush_user_range is arm specific, but the generic function need a +// vm_area_struct and will flush the entire page. +#define BND_FLUSH_CACHE(start, end) do { \ + __cpuc_flush_user_range((size_t)start & ~(L1_CACHE_BYTES-1), L1_CACHE_ALIGN((size_t)end), 0); \ + } while(0) +#else +#define BND_FLUSH_CACHE(start, end) +#endif + +#define B_CAN_INTERRUPT (1) + +#define B_INFINITE_TIMEOUT ((~(0ULL))>>1) +#define B_ABSOLUTE_TIMEOUT (1) + +#define B_BAD_THREAD_ID ((pid_t)0) +#define B_REAL_TIME_PRIORITY (10) +#define B_NORMAL_PRIORITY (80) +#define B_LOW_PRIORITY (100) + +#define B_MIN_PRIORITY_VAL (5) +#define B_MAX_PRIORITY_VAL (100) + +#endif // BINDER_DEFS_H diff -Nru linux-2.6.23/drivers/binder/binder_node.c kernel.android/drivers/binder/binder_node.c --- linux-2.6.23/drivers/binder/binder_node.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_node.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,140 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "binder_node.h" +#include "binder_proc.h" +#include "binder_transaction.h" + +#define PURGATORY 0 +#if PURGATORY +static DECLARE_MUTEX(sem); +static binder_node_t* head = NULL; +static binder_node_t** tail = &head; +static int count = 0; + +static void my_free_node(binder_node_t *t) +{ + down(&sem); + *tail = t; + tail = (binder_node_t**)&t->m_ptr; + count++; + if (count > 20) { + t = head; + head = (binder_node_t*)head->m_ptr; + kmem_cache_free(node_cache, t); + count--; + } + up(&sem); +} +#define ALLOC_NODE kmem_cache_alloc(node_cache, GFP_KERNEL) +#define FREE_NODE(x) my_free_node(x) +#else +#define ALLOC_NODE kmem_cache_alloc(node_cache, GFP_KERNEL) +#define FREE_NODE(x) kmem_cache_free(node_cache, x) +#endif + +static atomic_t g_count = ATOMIC_INIT(0); + +int +binder_node_GlobalCount() +{ + return atomic_read(&g_count); +} + +BND_IMPLEMENT_ACQUIRE_RELEASE(binder_node); +BND_IMPLEMENT_ATTEMPT_ACQUIRE(binder_node); +// BND_IMPLEMENT_FORCE_ACQUIRE(binder_node); + +/* + * For the process which manages the contexts, we treat ptr == NULL specially. + * In particular, all transactions with a target descriptor of 0 get routed to + * the manager process and the target pointer the process receives gets set to + * NULL. We don't permit any team to send a binder with a NULL ptr, so we can + * never confuse the mappings. + */ +binder_node_t *binder_node_init(binder_proc_t *team, void *ptr, void *cookie) +{ + binder_node_t *that = ALLOC_NODE; + atomic_inc(&g_count); + DPRINTF(5, (KERN_WARNING "%s(team=%p, ptr=%p, cookie=%p): %p\n", __func__, + team, ptr, cookie, that)); + atomic_set(&that->m_primaryRefs, 0); + atomic_set(&that->m_secondaryRefs, 0); + that->m_ptr = ptr; + that->m_cookie = cookie; + that->m_home = team; + if (that->m_home) BND_ACQUIRE(binder_proc, that->m_home, WEAK, that); + return that; +} + +void binder_node_destroy(binder_node_t *that) +{ + atomic_dec(&g_count); + DPRINTF(4, (KERN_WARNING "%s(%p): ptr=%p, cookie=%p\n", __func__, that, + that->m_ptr, that->m_cookie)); + if (that->m_home) { + if (that->m_ptr) { + binder_proc_t* proc = binder_node_AcquireHome(that, that); + if (proc) { + binder_proc_Transact(proc, binder_transaction_CreateRef(tfDecRefs, that->m_ptr, that->m_cookie, proc)); + binder_proc_RemoveLocalMapping(proc, that->m_ptr, that); + BND_RELEASE(binder_proc, proc, STRONG, that); + } + } + BND_RELEASE(binder_proc, that->m_home, WEAK, that); + } + FREE_NODE(that); +} + +void +binder_node_Released(binder_node_t *that) +{ + binder_proc_t* proc = binder_node_AcquireHome(that, that); + DPRINTF(4, (KERN_WARNING "%s(%p): ptr=%p\n", __func__, that, that->m_ptr)); + if (proc) { + DPRINTF(5, (KERN_WARNING " -- m_secondaryRefs=%d\n",atomic_read(&that->m_secondaryRefs))); + binder_proc_Transact(proc, binder_transaction_CreateRef(tfRelease,that->m_ptr,that->m_cookie,proc)); + binder_proc_RemoveLocalStrongRef(proc, that); + BND_RELEASE(binder_proc, proc, STRONG, that); + } +} + +binder_proc_t* +binder_node_AcquireHome(binder_node_t *that, const void *id) +{ + if (that->m_home && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_home, STRONG, id)) { + return that->m_home; + } + return NULL; +} + +status_t +binder_node_Send(binder_node_t *that, struct binder_transaction *t) +{ + binder_proc_t* proc = binder_node_AcquireHome(that, that); + if (proc) { + status_t res = binder_proc_Transact(proc, t); + BND_RELEASE(binder_proc, proc, STRONG, that); + return res; + } + + if (t->sender) binder_thread_ReplyDead(t->sender); + binder_transaction_Destroy(t); + return 0; +} + diff -Nru linux-2.6.23/drivers/binder/binder_node.h kernel.android/drivers/binder/binder_node.h --- linux-2.6.23/drivers/binder/binder_node.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_node.h 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,70 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER2_NODE_H +#define BINDER2_NODE_H + +#include "binder_defs.h" +#include "binder_proc.h" + +typedef struct binder_node { + atomic_t m_primaryRefs; + atomic_t m_secondaryRefs; + void * m_ptr; + void * m_cookie; + binder_proc_t * m_home; +} binder_node_t; + +int binder_node_GlobalCount(void); + +binder_node_t * binder_node_init(binder_proc_t *team, void *ptr, void *cookie); +void binder_node_destroy(binder_node_t *that); + +void binder_node_Released(binder_node_t *that); + +// Return a new strong reference on the node's home team, or NULL +// if the team no longer exists. Be sure to release the reference +// (via BND_RELEASE(binder_proc, team, STRONG, id)) if the return is non-NULL. +binder_proc_t* binder_node_AcquireHome(binder_node_t *that, const void *id); + +// Dispatch a transaction to the node's process. +status_t binder_node_Send(binder_node_t *that, struct binder_transaction *t); + +BND_DECLARE_ACQUIRE_RELEASE(binder_node); +// BND_DECLARE_FORCE_ACQUIRE(binder_node); + +/* Super-special AttemptAcquire() that also lets you attempt + to acquire a secondary ref. But note that binder_proc_t is + the ONLY one who can attempt a secondary, ONLY while holding + its lock, for the simple reason that binder_node's destructor + unregisters itself from the team. In other words, it's a + dihrty hawck. +*/ +BND_DECLARE_ATTEMPT_ACQUIRE(binder_node); + +/* Send a transaction to this node. */ +// void binder_node_Send(struct binder_transaction *t); +// void * binder_node_Ptr(binder_node_t *that); +// binder_proc_t * binder_node_Home(binder_node_t *that); + +#define binder_node_Ptr(that) ((that)->m_ptr) +#define binder_node_Cookie(that) ((that)->m_cookie) +#define binder_node_IsAlive(that) (binder_proc_IsAlive((that)->m_home)) +#define binder_node_IsRoot(that) ((that)->m_isRoot) + +#endif // BINDER2_NODE_H diff -Nru linux-2.6.23/drivers/binder/binder_proc.c kernel.android/drivers/binder/binder_proc.c --- linux-2.6.23/drivers/binder/binder_proc.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_proc.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,2215 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include +// #include +#include +#include +#include + +#include "binder_defs.h" +#include "binder_proc.h" +#include "binder_thread.h" +#include "binder_node.h" +#include "binder_transaction.h" +#include "iobuffer.h" + +#define BND_PROC_MAX_IDLE_THREADS 3 + +static inline unsigned long calc_order_from_size(unsigned long size) +{ +#if 0 + unsigned long order = 0; + if (size) { + size -= 1; + size *= 2; + } + size >>= PAGE_SHIFT+1; + while (size) { + order++; + size >>= 1; + } + return order; +#else + return size ? get_order(size) : 0; +#endif +} + +static void binder_proc_init(binder_proc_t *that); +static void binder_proc_spawn_looper(binder_proc_t *that); +static void binder_proc_wakeup_timer(unsigned long); +static void binder_proc_idle_timer(unsigned long); +static void binder_proc_send_death_notification(binder_proc_t *that, death_notification_t *death); +static void binder_proc_death_notification_dec_ref(binder_proc_t *that, death_notification_t *death, bool locked); +static void binder_proc_RemoveThreadFromWaitStack(binder_proc_t *that, binder_thread_t *thread); + +static void set_thread_priority(pid_t thread, int priority) +{ + int nice; + + // The following must match SysThreadChangePriority in libbinder. + if(priority >= 80) + { + // Normal to low priority + // map 80..100 to 0..19 + nice = priority - 80; + if(nice > 19) + nice = 19; + } + else + { + // Normal priority or better + // map 0..79 to -20..-1 + nice = priority-3 - 80; + nice /= 4; + } + //printk("set_thread_priority tid %d pri %d == nice %d\n", thread, priority, nice); + set_user_nice(find_task_by_pid(thread), nice); +} + + +void binder_proc_init(binder_proc_t *that) +{ + int i; + atomic_set(&that->m_primaryRefs, 0); + atomic_set(&that->m_secondaryRefs, 0); + init_MUTEX(&that->m_lock); + spin_lock_init(&that->m_spin_lock); + init_MUTEX(&that->m_map_pool_lock); + that->m_threads = NULL; + INIT_LIST_HEAD(&that->m_waitStack); + that->m_waitStackCount = 0; + that->m_wakeThreadMask = 0; + that->m_wakeupTime = B_INFINITE_TIMEOUT; + that->m_wakeupPriority = 10; + init_timer(&that->m_wakeupTimer); + that->m_wakeupTimer.function = &binder_proc_wakeup_timer; + that->m_wakeupTimer.data = (unsigned long)that; + init_timer(&that->m_idleTimer); + that->m_idleTimer.function = &binder_proc_idle_timer; + that->m_idleTimer.data = (unsigned long)that; + that->m_idleTimeout = 5*HZ; + that->m_replyTimeout = 5*HZ; + //that->m_idleTimeout = 5*60*HZ; + //that->m_replyTimeout = 5*60*HZ; + that->m_syncCount = 0; + that->m_freeCount = 0; + that->m_head = NULL; + that->m_tail = &that->m_head; + that->m_needFree = NULL; + that->m_state = 0; + for (i=0;im_localHash[i] = NULL; + that->m_reverseHash[i] = NULL; + } + that->m_numRemoteStrongRefs = 0; + that->m_rootObject = NULL; + that->m_rootStopsProcess = 0; + that->m_descriptors = NULL; + that->m_descriptorCount = 0; + that->m_waitingThreads = 0; + that->m_nonblockedThreads = 0; + that->m_maxThreads = 5; + //that->m_idlePriority = B_REAL_TIME_PRIORITY; + that->m_idlePriority = B_NORMAL_PRIORITY; + atomic_set(&that->m_loopingThreads, 0); +#if 0 + that->m_spawningThreads = 0; +#endif + that->m_rangeMap = RB_ROOT; + that->m_freeMap = RB_ROOT; + BND_FIRST_ACQUIRE(binder_proc, that, STRONG, that); + that->m_eventTransaction = binder_transaction_CreateEmpty(); + binder_transaction_SetEvent(that->m_eventTransaction, TRUE); + that->m_pool = NULL; + that->m_pool_active = 0; + INIT_HLIST_HEAD(&that->m_incoming_death_notifications); + INIT_HLIST_HEAD(&that->m_outgoing_death_notifications); + INIT_HLIST_HEAD(&that->m_pending_death_notifications); + INIT_HLIST_HEAD(&that->m_active_death_notifications); + INIT_HLIST_HEAD(&that->m_deleted_death_notifications); +} + +binder_proc_t * +new_binder_proc() +{ + // allocate a binder_proc_t from the slab allocator + binder_proc_t *that = (binder_proc_t*)kmalloc(sizeof(binder_proc_t), GFP_KERNEL); + BND_ASSERT(that != NULL, "failed to allocate binder_proc"); + if(that == NULL) + return NULL; + binder_proc_init(that); + DPRINTF(2, (KERN_WARNING "************* Creating binder_proc %p *************\n", that)); + return that; +} + +void +binder_proc_destroy(binder_proc_t *that) +{ + local_mapping_t *lm; + reverse_mapping_t *rm; + local_mapping_t *localMappings; + reverse_mapping_t *reverseMappings; + range_map_t *r; + struct rb_node *n; + int i; + bool first; + + DPRINTF(2, (KERN_WARNING "************* Destroying binder_proc %p *************\n", that)); + + BND_ASSERT(that->m_state & btCleaned, "binder_proc_Die wns not done"); + BND_ASSERT(!(that->m_state & btFreed), "already free"); + if(that->m_state & btFreed) + return; + + //DPRINTF(5, (KERN_WARNING "Binder team %p: collecting mappings.\n", that)); + lm = localMappings = NULL; + rm = reverseMappings = NULL; + for (i=0;im_localHash[i]) { + // mark the front of the list + if (!localMappings) lm = localMappings = that->m_localHash[i]; + // or tack this chain on the end + else lm->next = that->m_localHash[i]; + // run to the end of the chain + while (lm->next) lm = lm->next; + // mark this chain handled + that->m_localHash[i] = NULL; + } + if (that->m_reverseHash[i]) { + // ditto for reverse mappings + if (!reverseMappings) rm = reverseMappings = that->m_reverseHash[i]; + else rm->next = that->m_reverseHash[i]; + while (rm->next) rm = rm->next; + that->m_reverseHash[i] = NULL; + } + } + + first = TRUE; + while ((lm = localMappings)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: cleaning up local mappings.\n", that)); + } + localMappings = lm->next; + // FIXME: send death notification + kmem_cache_free(local_mapping_cache, lm); + } + + first = TRUE; + while ((rm = reverseMappings)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: cleaning up reverse mappings.\n", that)); + } + reverseMappings = rm->next; + DBSHUTDOWN((KERN_WARNING "Removed reverse mapping from node %p to descriptor %d\n", + rm->node, rm->descriptor+1)); + // FIXME: decrement use count and possibly notify owner. It seems like we do this below. + kmem_cache_free(reverse_mapping_cache, rm); + } + + /* + for (i=0; im_localHash[i] == NULL, "Leaking some local mappings!"); + BND_ASSERT(that->m_reverseHash[i] == NULL, "Leaking some reverse mappings!"); + } + */ + + // Free up any items in the transaction data pool. + BND_LOCK(that->m_map_pool_lock); + n = rb_first(&that->m_rangeMap); + while (n) { + r = rb_entry(n, range_map_t, rm_rb); + n = rb_next(n); + + rb_erase(&r->rm_rb, &that->m_rangeMap); + //__free_pages(r->page, calc_order_from_size(r->end - r->start)); + kmem_cache_free(range_map_cache, r); + } + n = rb_first(&that->m_freeMap); + while (n) { + r = rb_entry(n, range_map_t, rm_rb); + n = rb_next(n); + rb_erase(&r->rm_rb, &that->m_rangeMap); + kmem_cache_free(range_map_cache, r); + } + BND_UNLOCK(that->m_map_pool_lock); + + // free_lock(&that->m_lock); + that->m_state |= btFreed; + kfree(that); +} + +void +binder_proc_SetRootObject(binder_proc_t *that, struct binder_node *node) +{ + BND_LOCK(that->m_lock); + if (that->m_rootObject == NULL) that->m_rootObject = node; + BND_UNLOCK(that->m_lock); +} + +void +binder_proc_Stop(binder_proc_t *that, bool now) +{ + bool goodbye; + + DBLOCK((KERN_WARNING "binder_proc_Stop() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + that->m_rootStopsProcess = TRUE; + goodbye = now || that->m_rootObject == (binder_node_t*)-1; + + BND_UNLOCK(that->m_lock); + + if (goodbye) binder_proc_Die(that, FALSE); +} + +bool +binder_proc_AddThread(binder_proc_t *that, binder_thread_t *t) +{ + BND_FIRST_ACQUIRE(binder_thread, t, STRONG, 0); + BND_LOCK(that->m_lock); + if (binder_proc_IsAlive(that)) { + t->next = that->m_threads; + that->m_threads = t; + BND_UNLOCK(that->m_lock); + } else { + BND_UNLOCK(that->m_lock); + BND_RELEASE(binder_thread, t, STRONG, that); + t = NULL; + } + DBSHUTDOWN((KERN_WARNING "%s(%p): %p\n", __func__, that, t)); + return t != NULL; +} + +void +binder_proc_RemoveThread(binder_proc_t *that, binder_thread_t *t) +{ + binder_thread_t **thread; + DBSHUTDOWN((KERN_WARNING "%s(%p): %p\n", __func__, that, t)); + DBLOCK((KERN_WARNING "RemoveThread() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + for (thread = &that->m_threads; *thread && *thread != t; thread = &(*thread)->next) + ; + if (*thread) { + *thread = (*thread)->next; + } else { + DPRINTF(5, (KERN_WARNING "binder_team %p: RemoveThread of %d does not exist\n", that, t->m_thid)); + } + + // If this is the last thread, the team is dead. + if (!that->m_threads) binder_proc_Die(that, TRUE); + else BND_UNLOCK(that->m_lock); +} + +void +binder_proc_Released(binder_proc_t *that) +{ + DBSHUTDOWN((KERN_WARNING "%s(%p)\n", __func__, that)); + binder_proc_Die(that, FALSE); +} + +void +binder_proc_Die(binder_proc_t *that, bool locked) +{ + binder_transaction_t *cmd; + binder_node_t *n; + binder_thread_t *thr; + descriptor_t *descriptors; + bool dying; + bool first; + binder_transaction_t *cmdHead; + binder_transaction_t *freeCmdHead; + s32 descriptorCount; + binder_thread_t *threads; + bool acquired; + struct hlist_node *_p, *_p2; + death_notification_t *death; + + DBSHUTDOWN((KERN_WARNING "*****************************************\n")); + DBSHUTDOWN((KERN_WARNING "**** %s(%p, %s)\n", __func__, that, locked ? "locked" : "unlocked")); + + // Make sure our destructor doesn't get called until Die() is done. + BND_ACQUIRE(binder_proc, that, WEAK, that); + + // Make sure that Released() doesn't get called if we are dying + // before all primary references have been removed. + acquired = BND_ATTEMPT_ACQUIRE(binder_proc, that, STRONG, that); + + if (!locked) { + DBLOCK((KERN_WARNING "%s() going to lock %p in %d\n", __func__, that, current->pid)); + BND_LOCK(that->m_lock); + } + dying = that->m_state&btDying; + that->m_state |= btDying; + BND_UNLOCK(that->m_lock); + + if (dying) { + DBSHUTDOWN((KERN_WARNING "racing to kill %p\n", that)); + while (!(that->m_state&btDead)) msleep(10); + BND_RELEASE(binder_proc, that, WEAK, that); + if (acquired) BND_RELEASE(binder_proc, that, STRONG, that); + DBSHUTDOWN((KERN_WARNING "race finished\n")); + return; + } + + /* + DPRINTF(5, (KERN_WARNING "Binder team %p: removing from driver.\n", that)); + remove_team(that->tgid); + delete_sem(that->m_spawnerSem); + that->m_spawnerSem = B_BAD_SEM_ID; + */ + + DBLOCK((KERN_WARNING "%s() #2 going to lock %p in %d\n", __func__, that, current->pid)); + BND_LOCK(that->m_lock); + + while(!hlist_empty(&that->m_outgoing_death_notifications)) { + binder_proc_t *observer_proc; + death = hlist_entry(that->m_outgoing_death_notifications.first, typeof(*death), observed_or_active); + hlist_del(&death->observed_or_active); + DBDEATH((KERN_WARNING "DeathNot %p: removed from proc %p m_outgoing_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + death->observed_proc = NULL; + observer_proc = death->observer_proc; + BND_UNLOCK(that->m_lock); + binder_proc_send_death_notification(observer_proc, death); + binder_proc_death_notification_dec_ref(observer_proc, death, FALSE); + BND_LOCK(that->m_lock); + } + + while(!hlist_empty(&that->m_incoming_death_notifications)) { + binder_proc_t *observed_proc; + death = hlist_entry(that->m_incoming_death_notifications.first, typeof(*death), observer); + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_incoming_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + observed_proc = death->observed_proc; + if(observed_proc != NULL) { + if(observed_proc != that) { + // We need to grab the observed process' lock since the record + // is on the outgoing list on that process. + BND_UNLOCK(that->m_lock); + BND_LOCK(observed_proc->m_lock); + } + if(death->observed_proc != NULL) { + // If we are removing the record from the outgoing list it may + // have already been removed by the time we get the lock. + hlist_del(&death->observed_or_active); + DBDEATH((KERN_WARNING "DeathNot %p: removed from proc %p observed_or_active, refcnt=%d\n", + death, death->observed_proc, atomic_read(&death->ref_count))); + } + if(observed_proc != that) { + // Reacquire our own process lock. + BND_UNLOCK(observed_proc->m_lock); + BND_LOCK(that->m_lock); + } + if(death->observed_proc != NULL) { + // Release the reference we got from the list before we + // switched the locks back. + death->observed_proc = NULL; + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + } + DBDEATH((KERN_WARNING "DeathNot %p: finishing remove from proc %p m_incoming_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observer); + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + hlist_for_each_entry_safe(death, _p, _p2, &that->m_pending_death_notifications, observed_or_active) { + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_pending_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + hlist_for_each_entry_safe(death, _p, _p2, &that->m_active_death_notifications, observed_or_active) { + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_active_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + binder_proc_death_notification_dec_ref(that, death, TRUE); + } + hlist_for_each_entry_safe(death, _p, _p2, &that->m_deleted_death_notifications, observed_or_active) { + DBDEATH((KERN_WARNING "DeathNot %p: removing from proc %p m_deleted_death_notifications and freeing, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + kfree(death); + } + + // Now collect everything we have to clean up. We don't want to + // do stuff on these until after our own lock is released, to avoid + // various horrible deadlock situations. + + del_timer_sync(&that->m_wakeupTimer); + del_timer_sync(&that->m_idleTimer); + + freeCmdHead = that->m_needFree; + that->m_needFree = NULL; + + cmdHead = that->m_head; + that->m_head = NULL; + that->m_tail = &that->m_head; + cmd = cmdHead; + while (cmd) { + // If a pending transaction is the event transaction, remove + // our global pointer so that nobody else tries to use it. + if (cmd == that->m_eventTransaction) that->m_eventTransaction = NULL; + cmd = cmd->next; + } + + descriptors = that->m_descriptors; + descriptorCount = that->m_descriptorCount; + that->m_descriptors = NULL; + that->m_descriptorCount = 0; + + threads = that->m_threads; + that->m_threads = NULL; + for (thr = threads; thr != NULL; thr = thr->next) BND_ACQUIRE(binder_thread, thr, WEAK, that); + + that->m_state |= btDead; + + BND_UNLOCK(that->m_lock); + + // Now do all the cleanup! + + first = TRUE; + while ((thr = threads)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: removing remaining threads.\n", that)); + } + threads = thr->next; + DBSHUTDOWN((KERN_WARNING "Killing thread %p (%d)\n", thr, binder_thread_Thid(thr))); + binder_thread_Die(thr); + BND_RELEASE(binder_thread, thr, WEAK, that); + } + + first = TRUE; + while ((cmd=freeCmdHead)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: detaching free transactions.\n", that)); + } + DBSHUTDOWN((KERN_WARNING "Detaching transaction %p from thread %p (%d) to thread %p (%d) node %p\n", + cmd, cmd->sender, cmd->sender ? binder_thread_Thid(cmd->sender) : -1, + cmd->receiver, cmd->receiver ? binder_thread_Thid(cmd->receiver) : -1, + cmd->target)); + + // XXX The old implementation of this would call ReleaseTeam() + // here to keep the transaction around so that user space could + // hold on to it after replying. For some reason this would + // cause leaks (if the process never got destroyed), and this + // system doesn't use this feature, so now we just destroy it. + freeCmdHead = cmd->next; + binder_transaction_Destroy(cmd); + /* + binder_transaction_ReleaseTeam(cmd); + cmd = cmd->next; + */ + } + + first = TRUE; + while ((cmd = cmdHead)) { + if (first) { + first = FALSE; + DBSHUTDOWN((KERN_WARNING "Binder team %p: cleaning up pending commands.\n", that)); + } + if (cmd->sender) { + DBSHUTDOWN((KERN_WARNING "Returning transaction %p to thread %p (%d)\n", cmd, cmd->sender, binder_thread_Thid(cmd->sender))); + binder_thread_ReplyDead(cmd->sender); + } + cmdHead = cmd->next; + binder_transaction_Destroy(cmd); + } + + first = TRUE; + if (descriptors) { + int i; + for (i=0;im_eventTransaction) binder_transaction_Destroy(that->m_eventTransaction); + that->m_eventTransaction = NULL; + + DBSHUTDOWN((KERN_WARNING "Binder process %p: DEAD!\n", that)); + + BND_ASSERT(that->m_head == NULL, "that->m_head != NULL"); + + that->m_state |= btCleaned; + BND_RELEASE(binder_proc, that, WEAK, that); + if (acquired) BND_RELEASE(binder_proc, that, STRONG, that); + + DBSHUTDOWN((KERN_WARNING "**** %s(%p, %s) done dying!\n", __func__, that, locked ? "locked" : "unlocked")); + DBSHUTDOWN((KERN_WARNING "*****************************************\n")); +} + +status_t +binder_proc_RequestDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie) +{ + bool already_dead = FALSE; + death_notification_t *death = kmalloc(sizeof(death_notification_t), GFP_KERNEL); + if(death == NULL) + return -ENOMEM; + DBDEATH((KERN_WARNING "DeathNot %p: RequestDeathNotification created proc %p watching proc %p\n", + death, client, that)); + atomic_set(&death->ref_count, 1); + death->observer_proc = client; + death->observed_proc = NULL; + death->cookie = cookie; + BND_LOCK(that->m_lock); + if(binder_proc_IsAlive(that)) { + atomic_inc(&death->ref_count); + death->observed_proc = that; + hlist_add_head(&death->observed_or_active, &that->m_outgoing_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: added to proc %p m_outgoing_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + } + else { + DBDEATH((KERN_WARNING "DeathNot %p: already dead!\n", death)); + already_dead = TRUE; + } + BND_UNLOCK(that->m_lock); + BND_LOCK(client->m_lock); + if(binder_proc_IsAlive(client)) { + atomic_inc(&death->ref_count); + hlist_add_head(&death->observer, &client->m_incoming_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: added to proc %p m_incoming_death_notifications, refcnt=%d\n", + death, client, atomic_read(&death->ref_count))); + } + BND_UNLOCK(client->m_lock); + + if(already_dead) + binder_proc_send_death_notification(client, death); + binder_proc_death_notification_dec_ref(client, death, FALSE); + return 0; +} + +status_t +binder_proc_ClearDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie) +{ + struct hlist_node *_p; + death_notification_t *death = NULL; + + BND_LOCK(client->m_lock); + hlist_for_each_entry(death, _p, &client->m_incoming_death_notifications, observer) { + if(death->cookie == cookie) { + hlist_del(&death->observer); + break; + } + } + BND_UNLOCK(client->m_lock); + + DBDEATH((KERN_WARNING "DeathNot %p: ClearDeathNotification for cookie %p\n", death, cookie)); + if(death == NULL) + return -ENOENT; + BND_LOCK(that->m_lock); + if(death->observed_proc == that) { + hlist_del(&death->observed_or_active); + binder_proc_death_notification_dec_ref(client, death, FALSE); // this is holding the wrong lock, but we have a second reference + DBDEATH((KERN_WARNING "DeathNot %p: removed from proc %p m_incoming_death_notifications, refcnt=%d\n", + death, client, atomic_read(&death->ref_count))); + death->observed_proc = NULL; + } + else { + DBDEATH((KERN_WARNING "DeathNot %p ClearDeathNotification: already pending or sent!\n", death)); + } + BND_UNLOCK(that->m_lock); + binder_proc_death_notification_dec_ref(client, death, FALSE); // from hlist_del(&death->observer); + return 0; +} + +status_t +binder_proc_DeadBinderDone(binder_proc_t *that, void *cookie) +{ + struct hlist_node *_p; + death_notification_t *death = NULL; + BND_LOCK(that->m_lock); + hlist_for_each_entry(death, _p, &that->m_active_death_notifications, observed_or_active) { + if(death->cookie == cookie) { + DBDEATH((KERN_WARNING "DeathNot %p DeadBinderDone: removing from proc %p m_active_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + hlist_del(&death->observed_or_active); + death->observed_proc = NULL; + binder_proc_death_notification_dec_ref(that, death, TRUE); + break; + } + } + BND_UNLOCK(that->m_lock); + DBDEATH((KERN_WARNING "DeathNot %p: DeadBinderDone completed on cookie %p\n", death, cookie)); + if(death == NULL) + return -ENOENT; + return 0; +} + +static void +binder_proc_activate_death_processing_thread(binder_proc_t *that) +{ + binder_thread_t *thread; + + assert_spin_locked(&that->m_spin_lock); + if(!list_empty(&that->m_waitStack)) { + // TODO: pop thread from wait stack here + thread = list_entry(that->m_waitStack.next, binder_thread_t, waitStackEntry); + DBDEATH((KERN_WARNING "Activating death processing thread pid %d (proc %p)\n", + thread->m_thid, that)); + binder_proc_RemoveThreadFromWaitStack(that, thread); + thread->wakeReason = WAKE_REASON_PROCESS_DEATH; + BND_ASSERT(thread->nextRequest == NULL, "Thread has a request!"); + binder_thread_Wakeup(thread); + } + else { + BND_ASSERT((that->m_wakeThreadMask & WAKE_THREAD_FOR_PROCESS_DEATH) == 0, "WAKE_THREAD_FOR_PROCESS_DEATH already set"); + that->m_wakeThreadMask |= WAKE_THREAD_FOR_PROCESS_DEATH; + DBSPAWN((KERN_WARNING "%s(%p) empty waitstack\n", __func__, that)); + } +} + +void +binder_proc_send_death_notification(binder_proc_t *that, death_notification_t *death) +{ + unsigned long flags; + bool first; + + DIPRINTF(0, (KERN_WARNING "%s(%p)\n", __func__, that)); + + BND_LOCK(that->m_lock); + + DBDEATH((KERN_WARNING "DeathNot %p: Sending death notification to %p (alive=%d)\n", + death, that, binder_proc_IsAlive(that))); + + if(binder_proc_IsAlive(that)) { + spin_lock_irqsave(&that->m_spin_lock, flags); + first = hlist_empty(&that->m_pending_death_notifications) && hlist_empty(&that->m_deleted_death_notifications); + + atomic_inc(&death->ref_count); + hlist_add_head(&death->observed_or_active, &that->m_pending_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: adding to proc %p m_pending_death_notifications, refcnt=%d, first=%d\n", + death, that, atomic_read(&death->ref_count), first)); + death->observed_proc = that; + + if(first) { + binder_proc_activate_death_processing_thread(that); + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); + } + + BND_UNLOCK(that->m_lock); +} + +void +binder_proc_death_notification_dec_ref(binder_proc_t *that, death_notification_t *death, bool locked) +{ + DBDEATH((KERN_WARNING "DeathNot %p: decrementing refcnt, cur=%d\n", + death, atomic_read(&death->ref_count))); + if(atomic_dec_return(&death->ref_count) == 0) { + BND_ASSERT(death->observed_proc == NULL, "freeing death_notification_t with observed_proc still set"); + if(!locked) + BND_LOCK(that->m_lock); + if(binder_proc_IsAlive(that)) { + unsigned long flags; + bool first; + spin_lock_irqsave(&that->m_spin_lock, flags); + first = hlist_empty(&that->m_pending_death_notifications) && hlist_empty(&that->m_deleted_death_notifications); +#if BINDER_DEBUG + struct hlist_node *_p, *_p2; + death_notification_t *node; + hlist_for_each_entry_safe(node, _p, _p2, &that->m_outgoing_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_outgoing_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_incoming_death_notifications, observer) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_incoming_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_pending_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_pending_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_active_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_active_death_notifications list"); + } + hlist_for_each_entry_safe(node, _p, _p2, &that->m_deleted_death_notifications, observed_or_active) { + BND_ASSERT(node != death, "Death ref count reached 0 while still on m_deleted_death_notifications list"); + } + DBDEATH((KERN_WARNING "DeathNot %p: observer.next=%p, active.next=%p\n", + death, death->observer.next, death->observed_or_active.next)); +#endif + BND_ASSERT(death->observer.next == LIST_POISON1, "death ref count reached 0 while still on observer list"); + BND_ASSERT(death->observed_or_active.next == LIST_POISON1, "death ref count reached 0 while still on observed_or_active list"); + DBDEATH((KERN_WARNING "DeathNot %p: adding to deleted list, first=%d\n", death, first)); + hlist_add_head(&death->observed_or_active, &that->m_deleted_death_notifications); + if(first) + binder_proc_activate_death_processing_thread(that); + spin_unlock_irqrestore(&that->m_spin_lock, flags); + } + else { + kfree(death); + } + if(!locked) + BND_UNLOCK(that->m_lock); + } +} + +void +binder_proc_GetPendingDeathNotifications(binder_proc_t *that, binder_thread_t *thread, iobuffer_t *io) +{ + struct hlist_node *_p, *_p2; + death_notification_t *death; + BND_LOCK(that->m_lock); + + hlist_for_each_entry_safe(death, _p, _p2, &that->m_deleted_death_notifications, observed_or_active) { + if(iobuffer_remaining(io) < 8) + goto buffer_full; + DBDEATH((KERN_WARNING "DeathNot %p: GetPending removing from proc %p m_deleted_death_notifications and freeing\n", + death, that)); + hlist_del(&death->observed_or_active); + iobuffer_write_u32(io, brCLEAR_DEATH_NOTIFICATION_DONE); + iobuffer_write_u32(io, (int32_t)death->cookie); + kfree(death); + } + + hlist_for_each_entry_safe(death, _p, _p2, &that->m_pending_death_notifications, observed_or_active) { + if(iobuffer_remaining(io) < 8) + goto buffer_full; + hlist_del(&death->observed_or_active); + iobuffer_write_u32(io, brDEAD_BINDER); + iobuffer_write_u32(io, (int32_t)death->cookie); + hlist_add_head(&death->observed_or_active, &that->m_active_death_notifications); + DBDEATH((KERN_WARNING "DeathNot %p: moved from proc %p m_pending_death_notifications to m_active_death_notifications, refcnt=%d\n", + death, that, atomic_read(&death->ref_count))); + } + thread->wakeReason = WAKE_REASON_NONE; +buffer_full: + BND_UNLOCK(that->m_lock); +} + +status_t +binder_proc_AddToNeedFreeList(binder_proc_t *that, binder_transaction_t *t) +{ + BND_ACQUIRE(binder_proc, that, WEAK, that); + + binder_transaction_ReleaseTarget(t); + + DBLOCK((KERN_WARNING "AddToNeedFreeList() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + DPRINTF(2, (KERN_WARNING "AddToNeedFreeList %p for team %p\n",t,that)); + if (!binder_proc_IsAlive(that)) { + // Don't call this with lock held -- it could cause all other + // sorts of things to happen. + BND_UNLOCK(that->m_lock); + binder_transaction_ReleaseTeam(t); + BND_LOCK(that->m_lock); + } + t->next = that->m_needFree; + that->m_needFree = t; + that->m_freeCount++; + BND_UNLOCK(that->m_lock); + + BND_RELEASE(binder_proc, that, WEAK, that); + + return 0; +} + +BND_IMPLEMENT_ACQUIRE_RELEASE(binder_proc); +BND_IMPLEMENT_ATTEMPT_ACQUIRE(binder_proc); + +s32 +binder_proc_Node2Descriptor(binder_proc_t *that, binder_node_t *n, bool ref, s32 type) +{ + s32 desc=-2; + reverse_mapping_t **head; + + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s, %s)\n", __func__, that, n, ref ? "true" : "false", type == STRONG ? "STRONG" : "WEAK")); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + u32 bucket = hash_ptr(n, HASH_BITS); + DPRINTF(5, (KERN_WARNING " -- node(%p) mapping to descr bucket %d\n",n,bucket)); + head = &that->m_reverseHash[bucket]; + while (*head && (n < (*head)->node)) head = &(*head)->next; + if (*head && (n == (*head)->node)) { + desc = (*head)->descriptor; + DPRINTF(5, (KERN_WARNING "node(%p) found map to descriptor(%d), strong=%d\n",n,desc+1,that->m_descriptors[desc].priRef)); + if (!ref || type == WEAK || that->m_descriptors[desc].priRef > 0 + || BND_ATTEMPT_ACQUIRE(binder_node, n, STRONG, that)) { + if (ref) { + DPRINTF(5, (KERN_WARNING "Incrementing descriptor %d %s: strong=%d weak=%d in team %p\n", desc+1, type == STRONG ? "STRONG" : "WEAK", that->m_descriptors[desc].priRef, that->m_descriptors[desc].secRef, that)); + if (type == STRONG) that->m_descriptors[desc].priRef++; + else that->m_descriptors[desc].secRef++; + } + DPRINTF(5, (KERN_WARNING "node(%p) mapped to descriptor(%d) in team %p\n",n,desc+1,that)); + } else { + // No longer exists! + desc = -2; + } + } else if (ref && (type != STRONG || BND_ATTEMPT_ACQUIRE(binder_node, n, STRONG, that))) { + reverse_mapping_t *map; + int i; + if (type != STRONG) BND_ACQUIRE(binder_node, n, WEAK, that); + for (i=0;im_descriptorCount;i++) { + if (that->m_descriptors[i].node == NULL) { + that->m_descriptors[i].node = n; + if (type == STRONG) { + that->m_descriptors[i].priRef = 1; + that->m_descriptors[i].secRef = 0; + } else { + that->m_descriptors[i].priRef = 0; + that->m_descriptors[i].secRef = 1; + } + desc = i; + // DPRINTF(5, (KERN_WARNING "Initializing descriptor %d: strong=%d weak=%d in team %p\n", i+1, that->m_descriptors[i].priRef,that->m_descriptors[i].secRef,that)); + DPRINTF(5, (KERN_WARNING "node(%p) mapped to NEW descriptor(%d) in team %p\n",n,desc+1,that)); + break; + } + } + + if (desc < 0) { + int i; + s32 newCount = that->m_descriptorCount*2; + if (!newCount) newCount = 32; + // that->m_descriptors = (descriptor_t*)kernel_realloc(that->m_descriptors,sizeof(descriptor_t)*newCount,"descriptors"); + { + descriptor_t *d = kmalloc(sizeof(descriptor_t)*newCount, GFP_KERNEL); + // FIXME: BeOS code did not deal with allocation failures + memcpy(d, that->m_descriptors, that->m_descriptorCount*sizeof(descriptor_t)); + kfree(that->m_descriptors); + that->m_descriptors = d; + } + for (i=newCount-1;i>=that->m_descriptorCount;i--) that->m_descriptors[i].node = NULL; + desc = that->m_descriptorCount; + DPRINTF(5, (KERN_WARNING "Initializing descriptor %d: strong=%d weak=%d in team %p\n", desc+1, that->m_descriptors[desc].priRef,that->m_descriptors[desc].secRef,that)); + that->m_descriptors[desc].node = n; + if (type == STRONG) { + that->m_descriptors[desc].priRef = 1; + that->m_descriptors[desc].secRef = 0; + } else { + that->m_descriptors[desc].priRef = 0; + that->m_descriptors[desc].secRef = 1; + } + that->m_descriptorCount = newCount; + DPRINTF(5, (KERN_WARNING "node(%p) mapped to NEW descriptor(%d) in team %p\n",n,desc+1,that)); + } + + map = (reverse_mapping_t*)kmem_cache_alloc(reverse_mapping_cache, GFP_KERNEL); + map->node = n; + map->descriptor = desc; + map->next = *head; + *head = map; + } + } + + BND_UNLOCK(that->m_lock); + return desc+1; +} + +binder_node_t * +binder_proc_Descriptor2Node(binder_proc_t *that, s32 descriptor, const void* id, s32 type) +{ + binder_node_t *n; + (void)id; + + descriptor--; + + DBLOCK((KERN_WARNING "Descriptor2Node() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + n = NULL; + if (binder_proc_IsAlive(that)) { + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + (that->m_descriptors[descriptor].node != NULL)) { + if (type == STRONG) { + if (that->m_descriptors[descriptor].priRef > 0) { + n = that->m_descriptors[descriptor].node; + BND_ACQUIRE(binder_node, n, STRONG, id); + } else { + UPRINTF((KERN_WARNING "Descriptor2Node failed primary: desc=%d, max=%d, node=%p, strong=%d\n", + descriptor+1, that->m_descriptorCount, + that->m_descriptors[descriptor].node, + that->m_descriptors[descriptor].priRef)); + } + } else { + if (that->m_descriptors[descriptor].secRef > 0) { + n = that->m_descriptors[descriptor].node; + BND_ACQUIRE(binder_node, n, WEAK, id); + } else { + UPRINTF((KERN_WARNING "Descriptor2Node failed secondary: desc=%d, max=%d, node=%p, weak=%d\n", + descriptor+1, that->m_descriptorCount, + that->m_descriptors[descriptor].node , + that->m_descriptors[descriptor].secRef)); + } + } + } else { + UPRINTF((KERN_WARNING "Descriptor2Node failed: desc=%d, max=%d, node=%p, strong=%d\n", + descriptor+1, that->m_descriptorCount, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].node : NULL, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].priRef : 0)); + } + } + + BND_UNLOCK(that->m_lock); + return n; +} + +status_t +binder_proc_Ptr2Node(binder_proc_t *that, void *ptr, void *cookie, binder_node_t **n, iobuffer_t *io, const void* id, s32 type) +{ + u32 bucket; + local_mapping_t **head; + local_mapping_t *newMapping; + (void)id; + + if (ptr == NULL) { + DPRINTF(5, (KERN_WARNING "ptr(%p) mapping to NULL node in team %p\n",ptr,that)); + *n = NULL; + return 0; + } + + DBLOCK((KERN_WARNING "Ptr2Node() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + bucket = hash_ptr(ptr, HASH_BITS); + DPRINTF(9, (KERN_WARNING "ptr(%p) mapping to ptr bucket %u (value %p) in team %p\n",ptr,bucket,that->m_localHash[bucket],that)); + head = &that->m_localHash[bucket]; + while (*head && (ptr < (*head)->ptr)) head = &(*head)->next; + if (*head && (ptr == (*head)->ptr)) { + if ((type == STRONG) && BND_ATTEMPT_ACQUIRE(binder_node, (*head)->node, STRONG, id)) { + *n = (*head)->node; + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (OLD)\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", *n)); + BND_UNLOCK(that->m_lock); + return 0; + } else if (BND_ATTEMPT_ACQUIRE(binder_node, (*head)->node, WEAK, id)) { + if((*head)->next) + BND_ASSERT(io || (*head)->next->ptr != ptr || atomic_read(&((*head)->next->node->m_secondaryRefs)) == 0, "May remove wrong node"); + + *n = (*head)->node; + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (OLD)\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", *n)); + if (type == STRONG) { + /* Other teams have a secondary reference on this node, but no + primary reference. We need to make the node alive again, and + tell the calling team that the driver now has a primary + reference on it. The two calls below will force a new primary + reference on the node, and remove the secondary reference we + just acquired above. All the trickery with the secondary reference + is protection against a race condition where another team removes + the last secondary reference on the object, while we are here + trying to add one. + */ + int count; + DPRINTF(9, (KERN_WARNING "Apply a new primary reference to node (%p) in team %p\n",*n,that)); + count = BND_FORCE_ACQUIRE(binder_node, *n, id); + BND_RELEASE(binder_node, *n, WEAK, id); + + BND_ASSERT(io != NULL, "Acquiring new strong reference without io"); + if (count == 0) { + that->m_numRemoteStrongRefs++; + if (io) { + BND_ACQUIRE(binder_node, *n, STRONG, that); // add a second reference to avoid the node being released before the aquire has finished + iobuffer_write_u32(io, brACQUIRE); + iobuffer_write_void(io, ptr); + iobuffer_write_void(io, (*head)->cookie); + DPRINTF(5, (KERN_WARNING " -- wrote brACQUIRE: %p\n", ptr)); + } + } + else { + printk(KERN_WARNING "%s(%p, %p, %s): %p Reaquired strong reference, but someone beat us to it\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", (*head)->node); + } + } + BND_UNLOCK(that->m_lock); + return 0; + } +#if 1 + else { + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (OLD) FAILED AttempAcquire!\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", (*head)->node)); + } +#endif + } + + { + local_mapping_t **thead; + thead = &that->m_localHash[hash_ptr(ptr, HASH_BITS)]; + while (*thead) { + if((*thead)->ptr == ptr) { + BND_ASSERT(atomic_read(&((*head)->node->m_primaryRefs)) == 0, "Creating new node when a node with strong refs already exists"); + BND_ASSERT(atomic_read(&((*head)->node->m_secondaryRefs)) == 0, "Creating new node when a node with weak refs already exists"); + } + thead = &(*thead)->next; + } + } + + if (io && (iobuffer_remaining(io) < 8)) { + BND_UNLOCK(that->m_lock); + return -EINVAL; + } + + if (!binder_proc_IsAlive(that)) { + BND_UNLOCK(that->m_lock); + return -ENOENT; + } + + newMapping = (local_mapping_t*)kmem_cache_alloc(local_mapping_cache, GFP_KERNEL); + newMapping->ptr = ptr; + newMapping->cookie = cookie; + newMapping->node = binder_node_init(that,ptr,cookie); + *n = newMapping->node; + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %s): %p (NEW)\n", __func__, that, ptr, type == STRONG ? "STRONG" : "WEAK", *n)); + BND_FIRST_ACQUIRE(binder_node, *n, type, id); + newMapping->next = *head; + *head = newMapping; + + if (io) { + if (type == STRONG) { + BND_ACQUIRE(binder_node, *n, STRONG, that); // add a second reference to avoid the node being released before the aquire has finished + that->m_numRemoteStrongRefs++; + iobuffer_write_u32(io, brACQUIRE); + iobuffer_write_void(io, ptr); + iobuffer_write_void(io, cookie); + DPRINTF(5, (KERN_WARNING " -- wrote brACQUIRE: %p\n", ptr)); + } + BND_ACQUIRE(binder_node, *n, WEAK, that); // add a second reference to avoid the node being released before the aquire has finished + iobuffer_write_u32(io, brINCREFS); + iobuffer_write_void(io, ptr); + iobuffer_write_void(io, cookie); + DPRINTF(5, (KERN_WARNING " -- wrote brINCREFS: %p\n", ptr)); + } + else { + if (type == STRONG) + printk(KERN_WARNING "%s() creating new node without brACQUIRE\n", __func__); + else + printk(KERN_WARNING "%s() creating new node without brINCREFS\n", __func__); + } + + BND_UNLOCK(that->m_lock); + return 0; +} + +bool +binder_proc_RefDescriptor(binder_proc_t *that, s32 descriptor, s32 type) +{ + bool r=FALSE; + + descriptor--; + + DBLOCK((KERN_WARNING "RefDescriptor() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + r = TRUE; + DPRINTF(5, (KERN_WARNING "Incrementing descriptor %d %s: strong=%d weak=%d in team %p\n", descriptor+1, type == STRONG ? "STRONG" : "WEAK", d->priRef,d->secRef,that)); + if (type == STRONG) { + if (d->priRef > 0) d->priRef++; + else { + UPRINTF((KERN_WARNING "No strong references exist for descriptor: desc=%d, max=%d, node=%p, weak=%d\n", + descriptor+1, that->m_descriptorCount, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].node : NULL, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].secRef : 0)); + r = FALSE; + } + } else if (type == WEAK) { + if (d->secRef > 0) d->secRef++; + else if (d->priRef > 0) { + // Note that we allow the acquisition of a weak reference if only holding + // a strong because for transactions we only increment the strong ref + // count when sending a strong reference... so we need to be able to recover + // weak reference here. + d->secRef++; BND_ACQUIRE(binder_node, d->node, WEAK, that); + } else { + UPRINTF((KERN_WARNING "No weak references exist for descriptor: desc=%d, max=%d, node=%p, strong=%d\n", + descriptor+1, that->m_descriptorCount, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].node : NULL, + (descriptor >= 0 && descriptor < that->m_descriptorCount) ? that->m_descriptors[descriptor].priRef : 0)); + r = FALSE; + } + } + } + } + + BND_UNLOCK(that->m_lock); + return r; +} + +bool +binder_proc_UnrefDescriptor(binder_proc_t *that, s32 descriptor, s32 type) +{ + binder_node_t *n = NULL; + bool r=FALSE; + + descriptor--; + + DPRINTF(4, (KERN_WARNING "%s(%p, %d, %s)\n", __func__, that, descriptor, type == STRONG ? "STRONG" : "WEAK")); + + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + bool remove = FALSE; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + r = TRUE; + DPRINTF(5, (KERN_WARNING "Decrementing descriptor %d %s: strong=%d weak=%d in team %p\n", descriptor+1, type == STRONG ? "STRONG" : "WEAK", d->priRef,d->secRef,that)); + if (type == STRONG) { + if (--d->priRef == 0) n = d->node; + } else { + if (--d->secRef == 0) n = d->node; + } + DPRINTF(5, (KERN_WARNING "Descriptor %d is now: strong=%d weak=%d in team %p\n", descriptor+1, d->priRef,d->secRef,that)); + if (n && d->priRef <= 0 && d->secRef <= 0) { + d->node = NULL; + remove = TRUE; + } + } + + if (remove) { + reverse_mapping_t *entry,**head = &that->m_reverseHash[hash_ptr(n, HASH_BITS)]; + while (*head && (n < (*head)->node)) head = &(*head)->next; + if (*head && (n == (*head)->node)) { + entry = *head; + *head = entry->next; + kmem_cache_free(reverse_mapping_cache, entry); + } + } + } + + BND_UNLOCK(that->m_lock); + if (n) BND_RELEASE(binder_node, n, type, that); + return r; +} + +bool +binder_proc_RemoveLocalMapping(binder_proc_t *that, void *ptr, struct binder_node *node) +{ + local_mapping_t *entry=NULL; + + DBLOCK((KERN_WARNING "RemoveLocalMapping() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + local_mapping_t **head; + DPRINTF(5, (KERN_WARNING "RemoveLocalMapping %p in team %p\n", ptr, that)); + head = &that->m_localHash[hash_ptr(ptr, HASH_BITS)]; + while (*head) { +// (KERN_WARNING "RemoveLocalMapping %08x %08x\n",ptr,(*head)->ptr); + if (ptr >= (*head)->ptr && ((*head)->node == node || ptr > (*head)->ptr)) + break; + head = &(*head)->next; + } + +// while (*head && (ptr <= (*head)->ptr)) head = &(*head)->next; + if (*head && (ptr == (*head)->ptr)) { + entry = *head; + *head = entry->next; + } + BND_ASSERT(entry != NULL, "RemoveLocalMapping failed for live process"); + if(entry == NULL) { + head = &that->m_localHash[hash_ptr(ptr, HASH_BITS)]; + while (*head) { + if((*head)->node == node) + break; + head = &(*head)->next; + } + if(*head != NULL) + printk(KERN_WARNING "RemoveLocalMapping failed, but exists in the wrong place, ptr = %p node = %p node->ptr = %p\n", ptr, node, (*head)->ptr); + } + } + + BND_UNLOCK(that->m_lock); + + if (entry) { + kmem_cache_free(local_mapping_cache, entry); +// (KERN_WARNING "RemoveLocalMapping success\n"); + return TRUE; + } + + DPRINTF(0, (KERN_WARNING "RemoveLocalMapping failed for %p in team %p\n", ptr, that)); + return FALSE; +} + +void +binder_proc_RemoveLocalStrongRef(binder_proc_t *that, binder_node_t *node) +{ + bool goodbye; + + DBLOCK((KERN_WARNING "RemoveLocalStrongRef() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + // It is time for this process to go away if: + // (a) This is the last strong reference on it, and + // (b) The process published a root object. (If it didn't publish + // a root object, then someone else is responsible for managing its lifetime.) + goodbye = --that->m_numRemoteStrongRefs == 0 ? (that->m_rootObject != NULL) : FALSE; + + // Oh, and also, if the object being released -is- the root object, well that... + if (that->m_rootObject == node) { + that->m_rootObject = (binder_node_t*)-1; // something we know isn't a valid address. + if (that->m_rootStopsProcess) goodbye = TRUE; + } + + BND_UNLOCK(that->m_lock); + + if (goodbye) binder_proc_Die(that, FALSE); +} + +void +binder_proc_AddLocalStrongRef(binder_proc_t *that, binder_node_t *node) +{ + DBLOCK((KERN_WARNING "AddLocalStrongRef() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + that->m_numRemoteStrongRefs++; + BND_UNLOCK(that->m_lock); +} + +bool +binder_proc_AttemptRefDescriptor(binder_proc_t *that, s32 descriptor, binder_node_t **out_target) +{ + binder_node_t *n = NULL; + bool r=FALSE; + + descriptor--; + + DBLOCK((KERN_WARNING "AttemptRefDescriptor() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + r = TRUE; + DPRINTF(5, (KERN_WARNING "Attempt incrementing descriptor %d primary: strong=%d weak=%d in team %p\n", descriptor+1, d->priRef,d->secRef,that)); + if (d->priRef > 0 || (d->node && BND_ATTEMPT_ACQUIRE(binder_node, d->node, STRONG, that))) { + d->priRef++; + } else { + // If no strong references currently exist, we can't + // succeed. Instead return the node this attempt was + // made on. + r = FALSE; + if ((n=d->node) != NULL) BND_ACQUIRE(binder_node, n, WEAK, that); + } + } + } + + BND_UNLOCK(that->m_lock); + + *out_target = n; + return r; +} + +void +binder_proc_ForceRefNode(binder_proc_t *that, binder_node_t *node, iobuffer_t *io) +{ + bool recovered = FALSE; + const s32 descriptor = binder_proc_Node2Descriptor(that, node, FALSE, STRONG) - 1; + + DBLOCK((KERN_WARNING "ForceRefNode() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + if (binder_proc_IsAlive(that)) { + descriptor_t *d; + if ((descriptor >= 0) && + (descriptor < that->m_descriptorCount) && + ((d=&that->m_descriptors[descriptor])->node != NULL)) { + DPRINTF(5, (KERN_WARNING "Force incrementing descriptor %d: strong=%d weak=%d in team %p\n", descriptor+1, d->priRef, d->secRef,that)); + if (d->priRef == 0) { + if (BND_FORCE_ACQUIRE(binder_node, node, that) == 0) { + recovered = TRUE; + } + } + d->priRef++; + } else { + BND_ASSERT(FALSE, "ForceRefNode() got invalid descriptor!"); + } + } + + BND_UNLOCK(that->m_lock); + + // If this operation recovered a strong reference on the object, we + // need to tell its owning process for proper bookkeeping; + if (recovered) { + binder_proc_t* proc = binder_node_AcquireHome(node, that); + if (proc != NULL) { + binder_proc_AddLocalStrongRef(proc, node); + BND_RELEASE(binder_proc, proc, STRONG, that); + } + } else { + iobuffer_write_u32(io, brRELEASE); + iobuffer_write_void(io, binder_node_Ptr(node)); // binder object token + iobuffer_write_void(io, binder_node_Cookie(node)); // binder object cookie + } +} + +status_t +binder_proc_FreeBuffer(binder_proc_t *that, void *ptr) +{ + binder_transaction_t **p,*t; + DBLOCK((KERN_WARNING "FreeBuffer() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + for (p = &that->m_needFree; *p && (binder_transaction_UserData(*p) != ptr); p = &(*p)->next); + if ((t = *p)) *p = t->next; + if (t) that->m_freeCount--; + BND_UNLOCK(that->m_lock); + + if (t) { + DPRINTF(5, (KERN_WARNING "FreeBuffer %p in team %p, now have %d\n",ptr,that,that->m_freeCount)); + + binder_transaction_Destroy(t); + return 0; + } else { + BND_ASSERT(!binder_proc_IsAlive(that), "FreeBuffer failed"); + } + return -EINVAL; +} + +static void +binder_proc_RemoveThreadFromWaitStack(binder_proc_t *that, binder_thread_t *thread) +{ + assert_spin_locked(&that->m_spin_lock); + BND_ASSERT(!list_empty(&thread->waitStackEntry), "thread not on waitstack"); + + list_del_init(&thread->waitStackEntry); + that->m_waitStackCount--; + DIPRINTF(0, (KERN_WARNING "%s(%p) popped thread %p from waitStack %d threads left\n", __func__, that, thread, that->m_waitStackCount)); + if(thread->wakeReason == WAKE_REASON_IDLE && that->m_waitStackCount > BND_PROC_MAX_IDLE_THREADS) + mod_timer(&that->m_idleTimer, that->m_idleTimeout + jiffies); + else if(that->m_waitStackCount == BND_PROC_MAX_IDLE_THREADS) + del_timer(&that->m_idleTimer); +} + +static void +binder_proc_DeliverTransacton(binder_proc_t *that, binder_transaction_t *t) +{ + binder_thread_t *thread; + + assert_spin_locked(&that->m_spin_lock); + + if(!list_empty(&that->m_waitStack)) { + // TODO: pop thread from wait stack here + thread = list_entry(that->m_waitStack.next, binder_thread_t, waitStackEntry); + binder_proc_RemoveThreadFromWaitStack(that, thread); + BND_ASSERT(thread->nextRequest == NULL, "Thread already has a request!"); + //DBTRANSACT((KERN_WARNING "Delivering transaction %p to thread %d from thread %d!\n", + // t, binder_thread_Thid(thread), current->pid)); + thread->nextRequest = t; + set_thread_priority(binder_thread_Thid(thread), binder_transaction_Priority(t)); + binder_thread_Wakeup(thread); + } + else { + DBSPAWN((KERN_WARNING "%s(%p) empty waitstack\n", __func__, that)); + *that->m_tail = t; + that->m_tail = &t->next; + } +} + +status_t +binder_proc_Transact(binder_proc_t *that, binder_transaction_t *t) +{ + binder_thread_t *thread; + unsigned long flags; + + DBLOCK((KERN_WARNING "Transact() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + + DBTRANSACT((KERN_WARNING "Thread %d transacting %p to team %p, vthid=%d\n", + current->pid, t, that, t->sender ? binder_thread_VirtualThid(t->sender) : -1)); + + if (!binder_proc_IsAlive(that)) { + BND_UNLOCK(that->m_lock); + if (t->sender) binder_thread_ReplyDead(t->sender); + binder_transaction_Destroy(t); + return 0; + } + + BND_ASSERT(t->next == NULL, "Transaction not correctly initialized"); + + /* First check if the target team is already waiting on a reply from + this thread. If so, we must reflect this transaction directly + into the thread that is waiting for us. + */ + if (t->sender && binder_thread_VirtualThid(t->sender)) { + for (thread = that->m_threads; + thread && + (binder_thread_VirtualThid(thread) != binder_thread_VirtualThid(t->sender)) && + (binder_thread_Thid(thread) != binder_thread_VirtualThid(t->sender)); + thread = thread->next); + + if (thread) { + /* Make sure this thread starts out at the correct priority. + Its user-space looper will restore the old priority when done. */ + set_thread_priority(binder_thread_Thid(thread), binder_transaction_Priority(t)); + BND_UNLOCK(that->m_lock); + DBTRANSACT((KERN_WARNING "Thread %d reflecting %p!\n", current->pid, t)); + binder_thread_Reflect(thread, t); + return 0; + } + } + + spin_lock_irqsave(&that->m_spin_lock, flags); + /* Enqueue or deliver this transaction */ + binder_proc_DeliverTransacton(that, t); + that->m_syncCount++; + + BND_ASSERT(that->m_syncCount > 0, "Synchronous transaction count is bad!"); + // that->m_syncCount++; + + // DBTRANSACT((KERN_WARNING "Added to team %p queue -- needNewThread=%d, that->m_nonblockedThreads=%d\n", that, needNewThread, that->m_nonblockedThreads)); + + spin_unlock_irqrestore(&that->m_spin_lock, flags); + + if (that->m_nonblockedThreads <= 0) { + DBSPAWN((KERN_WARNING "*** TRANSACT NEEDS TO SPAWN NEW THREAD!\n")); + binder_proc_spawn_looper(that); + } + + BND_UNLOCK(that->m_lock); + + return 0; +} + +status_t +binder_proc_TakeMeOffYourList(binder_proc_t *that) +{ + DBLOCK((KERN_WARNING "binder_proc_TakeMeOffYourList() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + that->m_nonblockedThreads--; + DBSPAWN((KERN_WARNING "*** TAKE-ME-OFF-YOUR-LIST %p -- now have %d nonblocked\n", that, that->m_nonblockedThreads)); + BND_ASSERT(that->m_nonblockedThreads >= 0, "Nonblocked thread count is bad!"); + if ((that->m_nonblockedThreads <= 0) && that->m_syncCount) { + /* Spawn a thread if all blocked and synchronous transaction pending */ + DBSPAWN((KERN_WARNING "*** TAKE-ME-OFF-YOUR-LIST NEEDS TO SPAWN NEW THREAD!\n")); + binder_proc_spawn_looper(that); + } + BND_UNLOCK(that->m_lock); + return 0; +} + +status_t +binder_proc_PutMeBackInTheGameCoach(binder_proc_t *that) +{ + DBLOCK((KERN_WARNING "binder_proc_PutMeBackInTheGameCoach() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + BND_ASSERT(that->m_nonblockedThreads >= 0, "Nonblocked thread count is bad!"); + that->m_nonblockedThreads++; + DBSPAWN((KERN_WARNING "*** PUT-ME-BACK-IN-THE-GAME-COACH %p -- now have %d nonblocked\n", that, that->m_nonblockedThreads)); + BND_UNLOCK(that->m_lock); + return 0; +} + +status_t +binder_proc_WaitForRequest(binder_proc_t *that, binder_thread_t* who, binder_transaction_t **t) +{ + status_t err = 0; + unsigned long flags; + + if(that->m_wakeThreadMask) { + spin_lock_irqsave(&that->m_spin_lock, flags); + if(that->m_wakeThreadMask & WAKE_THREAD_FOR_PROCESS_DEATH) { + that->m_wakeThreadMask &= ~WAKE_THREAD_FOR_PROCESS_DEATH; + who->wakeReason = WAKE_REASON_PROCESS_DEATH; + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); + } + if(who->wakeReason == WAKE_REASON_PROCESS_DEATH) { + BND_LOCK(that->m_lock); + if(hlist_empty(&that->m_pending_death_notifications) && hlist_empty(&that->m_deleted_death_notifications)) { + printk(KERN_WARNING "%s() thread->wakeReason == WAKE_REASON_PROCESS_DEATH with no pending notifications\n", __func__); + who->wakeReason = WAKE_REASON_NONE; + } + BND_UNLOCK(that->m_lock); + return DEATH_NOTIFICATION_READY; + } + + DBLOCK((KERN_WARNING "WaitForRequest() going to lock %p in %d\n", that, binder_thread_Thid(who))); + BND_LOCK(that->m_lock); + + BND_ASSERT(atomic_read(&that->m_lock.count) <= 0, "WaitForRequest() lock still free after BND_LOCK"); + + if (who->m_isSpawned && who->m_firstLoop) { + /* This is a new thread that is waiting for its first time. */ +#if 0 + DPRINTF(0, (KERN_WARNING "*** ENTERING SPAWNED THREAD! Now looping %d, spawning %d\n", + atomic_read(&that->m_loopingThreads), that->m_spawningThreads)); + that->m_spawningThreads--; +#else + DPRINTF(0, (KERN_WARNING "*** ENTERING SPAWNED THREAD! Now looping %d\n", atomic_read(&that->m_loopingThreads))); +#endif + who->m_firstLoop = FALSE; + } else { + /* This is an existing thread that is going to go back to waiting. */ + that->m_waitingThreads++; + } + + BND_ASSERT(who->nextRequest == NULL, "Thread already has a request!"); + BND_ASSERT(list_empty(&who->waitStackEntry), "Thread on wait stack!"); + + /* Look for a pending request to service. Only do this if we are not + yet on the wait stack, or are at the top of the stack -- otherwise, + we need to wait for the thread on top of us to execute. */ + spin_lock_irqsave(&that->m_spin_lock, flags); + if((*t = that->m_head) != NULL) { + DIPRINTF(5, (KERN_WARNING "Processing transaction %p, next is %p\n", *t, (*t)->next)); + that->m_head = (*t)->next; + if (that->m_tail == &(*t)->next) that->m_tail = &that->m_head; + (*t)->next = NULL; + set_thread_priority(binder_thread_Thid(who), binder_transaction_Priority(*t)); + } + else { + /* If there are no pending transactions, unlock the team state and + wait for next thing to do. */ + + // Add to wait stack. + DIPRINTF(5, (KERN_WARNING "Pushing thread %d on to wait stack.\n", binder_thread_Thid(who))); + #if VALIDATES_BINDER + binder_thread_t* pos; + list_for_each_entry(pos, &that->m_waitStack, waitStackEntry) { + DBSTACK((KERN_WARNING "Thread %ld looking through wait stack: %p (%ld)\n", + current, pos, binder_thread_Thid(pos))); + BND_ASSERT(pos != who, "Pushing thread already on wait stack!"); + } + #endif + list_add(&who->waitStackEntry, &that->m_waitStack); + that->m_waitStackCount++; + DIPRINTF(0, (KERN_WARNING "%s(%p) added thread %p to waitStack %d threads now waiting\n", __func__, that, who, that->m_waitStackCount)); + if(that->m_waitStackCount == BND_PROC_MAX_IDLE_THREADS + 1) { + mod_timer(&that->m_idleTimer, that->m_idleTimeout + jiffies); + } + set_thread_priority(binder_thread_Thid(who), that->m_idlePriority); + spin_unlock_irqrestore(&that->m_spin_lock, flags); + + BND_UNLOCK(that->m_lock); + err = binder_thread_AcquireIOSem(who); + DBLOCK((KERN_WARNING "WaitForRequest() #2 going to lock %p in %d\n", that, binder_thread_Thid(who))); + BND_LOCK(that->m_lock); + + //DPRINTF(5, (KERN_WARNING "Thread %d: err=0x%08x, wakeupTime=%Ld\n", binder_thread_Thid(who), err, who->wakeupTime)); + + spin_lock_irqsave(&that->m_spin_lock, flags); + if(err != 0) { + // wakeup or idle timer may have released the thread + atomic_set(&who->m_wake_count, 0); + } + if ((*t=who->nextRequest) != NULL) { + /* A request has been delivered directly to us. In this + case the thread has already been removed from the wait + stack. */ + DIPRINTF(1, (KERN_WARNING "Thread %d received transaction %p, err=0x%08x\n", binder_thread_Thid(who), *t, err)); + who->nextRequest = NULL; + err = 0; + + } else { + /* The snooze ended without a transaction being returned. + If the thread ends up returning at this point, we will + need to pop it off the wait stack. Make note of that, + find out what happened, and deal with it. + */ + + DBTRANSACT((KERN_WARNING "Thread %d snooze returned with err=0x%08x\n", + binder_thread_Thid(who), err)); + + switch(who->wakeReason) { + case WAKE_REASON_IDLE: + who->wakeReason = WAKE_REASON_NONE; // the main thread may ignore a request to die + err = -ETIMEDOUT; + DBSPAWN((KERN_WARNING "*** TIME TO DIE! waiting=%d, nonblocked=%d\n", + that->m_waitingThreads, that->m_nonblockedThreads)); + break; + + case WAKE_REASON_PROCESS_DEATH: + // the threads stays in this state until the pending list becomes empty + err = DEATH_NOTIFICATION_READY; + break; + + default: + BND_ASSERT(err < 0 || !binder_proc_IsAlive(that), "thread woke up without a reason"); + /* If this thread is still on the wait stack, remove it. */ + DBTRANSACT((KERN_WARNING "Popping thread %d from wait stack.\n", + binder_thread_Thid(who))); + binder_proc_RemoveThreadFromWaitStack(that, who); + } + } + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); + + //DBTRANSACT(if ((*t) != NULL) (KERN_WARNING "*** EXECUTING TRANSACTION %p FROM %ld IN %ld\n", *t, (*t)->sender ? binder_thread_Thid((*t)->sender) : -1, binder_thread_Thid(who))); + + if ((*t) != NULL) { + if (!binder_transaction_IsEvent(*t)) { + /* Removing a synchronous transaction from the queue */ + BND_ASSERT(that->m_syncCount >= 0, "Count of synchronous transactions is bad!"); + that->m_syncCount--; + } else { + BND_ASSERT(*t == that->m_eventTransaction, "Event thread is not the expected instance!"); + + /* Tell caller to process an event. */ + who->returnedEventPriority = binder_transaction_Priority(*t); + err = REQUEST_EVENT_READY; + *t = NULL; + + /* Clear out current event information. */ + that->m_state &= ~btEventInQueue; + } + } else { + if(err == -ERESTARTSYS) { + DBTRANSACT((KERN_WARNING "*** NON-TRANSACTION IN %d! Error=-ERESTARTSYS\n", binder_thread_Thid(who))); + } + else { + DBTRANSACT((KERN_WARNING "*** NON-TRANSACTION IN %d! Error=0x%08x\n", binder_thread_Thid(who), err)); + } + // By default (such as errors) run at normal priority. + set_thread_priority(binder_thread_Thid(who), B_NORMAL_PRIORITY); + } + + #if VALIDATES_BINDER + { + binder_thread_t* pos; + list_for_each_entry(pos, &that->m_waitStack, waitStackEntry) { + DBSTACK((KERN_WARNING "Thread %d looking through wait stack: %p (%d)\n", + current, pos, binder_thread_Thid(pos))); + BND_ASSERT(pos != who, "Thread still on wait stack!"); + } + } + #endif + + that->m_waitingThreads--; + + /* Spawn a new looper thread if there are no more waiting + and we have not yet reached our limit. */ +#if 1 + if ((that->m_waitingThreads <= 0) && (atomic_read(&that->m_loopingThreads) < that->m_maxThreads)) { + DBSPAWN((KERN_WARNING "*** I THINK I WANT TO SPAWN A LOOPER THREAD!\n")); + binder_proc_spawn_looper(that); + } +#endif + + BND_ASSERT(who->nextRequest == NULL, "Thread leaving with a request!"); + BND_ASSERT(list_empty(&who->waitStackEntry), "Thread left on wait stack!"); + + BND_UNLOCK(that->m_lock); + + return err; +} + +void +binder_proc_StartLooper(binder_proc_t *that, bool driver_spawned) +{ + DBLOCK((KERN_WARNING "StartLooper() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + /* When the driver spawns a thread, it incremements the non-blocked + count right away. Otherwise, we must do it now. */ + if (!driver_spawned) that->m_nonblockedThreads++; + atomic_inc(&that->m_loopingThreads); + DPRINTF(0, (KERN_WARNING "*** STARTING A LOOPER FOR %p! Now have %d waiting, %d nonblocked.\n", + that, that->m_waitingThreads, that->m_nonblockedThreads)); + BND_UNLOCK(that->m_lock); +} + +void +binder_proc_FinishLooper(binder_proc_t *that, bool driverSpawned) +{ + DBLOCK((KERN_WARNING "FinishLooper() going to lock %p in %d\n", that, current->pid)); + BND_LOCK(that->m_lock); + that->m_nonblockedThreads--; + DBSPAWN((KERN_WARNING "*** FINISHING A LOOPER FOR %p! Now have %d waiting, %d nonblocked, %d looping.\n", + that, that->m_waitingThreads, that->m_nonblockedThreads, atomic_read(&that->m_loopingThreads))); + if ((that->m_nonblockedThreads <= 1) && that->m_syncCount && binder_proc_IsAlive(that)) { + /* Spawn a thread if all blocked and synchronous transaction pending */ + DBSPAWN((KERN_WARNING "*** FINISH-LOOPER NEEDS TO SPAWN NEW THREAD!\n")); + binder_proc_spawn_looper(that); + } + BND_UNLOCK(that->m_lock); + + if (driverSpawned) { + atomic_dec(&that->m_loopingThreads); + BND_ASSERT(atomic_read(&that->m_loopingThreads) >= 0, "Looping thread count is bad!"); + } +} + +status_t +binder_proc_SetWakeupTime(binder_proc_t *that, bigtime_t time, s32 priority) +{ + unsigned long flags; + bool earlier; + if (time < 0) time = 0; + // convert to jiffies + do_div(time, TICK_NSEC); + time += get_jiffies_64(); + BND_LOCK(that->m_lock); + DPRINTF(4, (KERN_WARNING "%s(%p, %Ld, %d)\n", __func__, that, time, priority)); + spin_lock_irqsave(&that->m_spin_lock, flags); + if (time != that->m_wakeupTime && !(that->m_state & btEventInQueue)) { + DIPRINTF(9, (KERN_WARNING "-- previously %Ld\n", that->m_wakeupTime)); + earlier = time < that->m_wakeupTime; + that->m_wakeupTime = time; + mod_timer(&that->m_wakeupTimer, time); + } + that->m_wakeupPriority = priority; + spin_unlock_irqrestore(&that->m_spin_lock, flags); + BND_UNLOCK(that->m_lock); + return 0; +} + +status_t +binder_proc_SetIdleTimeout(binder_proc_t *that, bigtime_t timeDelta) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %Ld)\n", __func__, that, timeDelta)); + that->m_idleTimeout = timeDelta; + return 0; +} + +status_t +binder_proc_SetReplyTimeout(binder_proc_t *that, bigtime_t timeDelta) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %Ld)\n", __func__, that, timeDelta)); + that->m_replyTimeout = timeDelta; + return 0; +} + +status_t +binder_proc_SetMaxThreads(binder_proc_t *that, s32 num) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %d)\n", __func__, that, num)); + that->m_maxThreads = num; + return 0; +} + +status_t +binder_proc_SetIdlePriority(binder_proc_t *that, s32 pri) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %d)\n", __func__, that, pri)); + that->m_idlePriority = (pri > B_MIN_PRIORITY_VAL ? + (pri <= B_MAX_PRIORITY_VAL ? pri : B_MAX_PRIORITY_VAL) : + B_MIN_PRIORITY_VAL); + return 0; +} + +#define LARGE_TRANSACTION (64 * 1024) +static range_map_t * binder_proc_free_map_alloc_l(binder_proc_t *that, size_t length) +{ + bool large; + struct rb_node *n; + struct rb_node * (*rbstep)(struct rb_node *); + range_map_t *rm = NULL; + unsigned long avail; + + large = (length > LARGE_TRANSACTION ? TRUE : FALSE); + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) large = %d\n", __func__, that, length, large)); + n = large ? rb_last(&that->m_freeMap) : rb_first(&that->m_freeMap); + rbstep = large ? rb_prev : rb_next; + + while (n) { + rm = rb_entry(n, range_map_t, rm_rb); + avail = rm->end - rm->start; + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) rm = %p [%08lx-%08lx], avail %lu\n", __func__, that, length, rm, rm->start, rm->end, avail)); + if (avail >= length) { + avail -= length; + if (avail) { + range_map_t *newrm = kmem_cache_alloc(range_map_cache, GFP_KERNEL); + // use only part of range + if (large) { + // consume address space from the right + newrm->end = rm->end; + rm->end -= length; + newrm->start = rm->end; + newrm->page = NULL; + } else { + // consume address space from the left + newrm->start = rm->start; + rm->start += length; + newrm->end = rm->start; + } + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) newrm = %p [%08lx-%08lx]\n", __func__, that, length, newrm, newrm->start, newrm->end)); + DPRINTF(5, (KERN_WARNING "%s(%p, %08x) remaining rm = %p [%08lx-%08lx], avail %lu\n", __func__, that, length, rm, rm->start, rm->end, avail)); + newrm->team = that; + rm = newrm; + } else { + // use entire range + rb_erase(n, &that->m_freeMap); + } + break; + } + n = rbstep(n); + rm = NULL; + } + return rm; +} + +range_map_t * binder_proc_free_map_insert(binder_proc_t *that, range_map_t *buffer) +{ + struct rb_node ** p = &that->m_freeMap.rb_node; + struct rb_node * parent = NULL; + range_map_t *rm = NULL; + const unsigned long address = buffer->start; + struct rb_node *next; + struct rb_node *prev; + + DPRINTF(0, (KERN_WARNING "%s(%p, %p) %08lx::%08lx\n", __func__, that, buffer, buffer->start, buffer->end)); + + while (*p) + { + parent = *p; + rm = rb_entry(parent, range_map_t, rm_rb); + + if (address < rm->start) + p = &(*p)->rb_left; + else if (address >= rm->end) + p = &(*p)->rb_right; + else { + DPRINTF(0, (KERN_WARNING "%s found buffer already in the free list!\n", __func__)); + return rm; + } + } + + if (rm) { + if (rm->end == buffer->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the right\n", __func__)); + // merge to the right + rm->end = buffer->end; + kmem_cache_free(range_map_cache, buffer); + // try merge right again (did we fill up a hole?) + next = rb_next(parent); + if (next) { + range_map_t *rm_next = rb_entry(next, range_map_t, rm_rb); + if (rm->end == rm_next->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the left, too\n", __func__)); + rm->end = rm_next->end; + rb_erase(next, &that->m_freeMap); + kmem_cache_free(range_map_cache, rm_next); + } + } + return NULL; + } else if (buffer->end == rm->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the left\n", __func__)); + // merge to the left + rm->start = buffer->start; + kmem_cache_free(range_map_cache, buffer); + // try merge left again (did we fill up a hole?) + prev = rb_prev(parent); + if (prev) { + range_map_t *rm_prev = rb_entry(prev, range_map_t, rm_rb); + if (rm_prev->end == rm->start) { + DPRINTF(9, (KERN_WARNING "%s: buffer merges to the right, too\n", __func__)); + rm->start = rm_prev->start; + rb_erase(prev, &that->m_freeMap); + kmem_cache_free(range_map_cache, rm_prev); + } + } + return NULL; + } + } + DPRINTF(9, (KERN_WARNING "%s: buffer stands alone\n", __func__)); + + // default case: insert in the middle of nowhere + rb_link_node(&buffer->rm_rb, parent, p); + rb_insert_color(&buffer->rm_rb, &that->m_freeMap); + + return NULL; +} + +static inline range_map_t * binder_proc_range_map_insert(binder_proc_t *that, range_map_t *buffer) +{ + struct rb_node ** p = &that->m_rangeMap.rb_node; + struct rb_node * parent = NULL; + range_map_t *rm; + const unsigned long address = buffer->start; + + while (*p) + { + parent = *p; + rm = rb_entry(parent, range_map_t, rm_rb); + + if (address < rm->start) + p = &(*p)->rb_left; + else if (address >= rm->end) + p = &(*p)->rb_right; + else { + DPRINTF(1, (KERN_WARNING "%s: %p (%08lx::%08lx) overlaps with " + "existing entry %p (%08lx::%08lx)\n", + __func__, buffer, buffer->start, buffer->end, + rm, rm->start, rm->end)); + return rm; + } + } + + rb_link_node(&buffer->rm_rb, parent, p); + rb_insert_color(&buffer->rm_rb, &that->m_rangeMap); + + return NULL; +} + +static inline range_map_t * binder_proc_range_map_search(binder_proc_t *that, unsigned long address) +{ + struct rb_node * n = that->m_rangeMap.rb_node; + range_map_t *rm; + DPRINTF(0, (KERN_WARNING "%s(%p, %lu)\n", __func__, that, address)); + + while (n) + { + rm = rb_entry(n, range_map_t, rm_rb); + // range_map covers [start, end) + DPRINTF(9, (KERN_WARNING " -- trying %08lx::%08lx\n", rm->start, rm->end)); + if (address < rm->start) + n = n->rb_left; + else if (address >= rm->end) + n = n->rb_right; + else { + DPRINTF(9, (KERN_WARNING " -- found it!\n")); + return rm; + } + } + DPRINTF(0, (KERN_WARNING " -- failed to find containing range\n")); + return NULL; +} + +#if 0 +// Remove the buffer containing address from the tree. The caller owns the returned memory. +static inline range_map_t * binder_proc_range_map_remove(binder_proc_t *that, unsigned long address) +{ + range_map_t *rm = binder_proc_range_map_search(that, address); + if (rm) rb_erase(&rm->rm_rb, &that->m_rangeMap); + return rm; +} +#endif + +bool +binder_proc_ValidTransactionAddress(binder_proc_t *that, unsigned long address, struct page **pageptr) +{ + // Find the struct page* containing address in the process specified by + // that. Return FALSE and leave *pageptr unchanged if address doesn't + // represent a valid buffer. + + range_map_t *rm; + + BND_LOCK(that->m_map_pool_lock); + rm = binder_proc_range_map_search(that, address); + BND_UNLOCK(that->m_map_pool_lock); + + if (rm) { + unsigned int index = (address - rm->start) >> PAGE_SHIFT; + *pageptr = rm->page + index; + BND_ASSERT(rm->next == NULL, "binder_proc_ValidTransactionAddress found page in free pool"); + return TRUE; + } + return FALSE; +} + +// Alternatively, 2x number of active threads? +#define POOL_THRESHOLD 16 +// POOL_BUFFER_LIMIT should never exceed LARGE_TRANSACTION size, or things will get ugly +#define POOL_BUFFER_LIMIT LARGE_TRANSACTION +range_map_t * +binder_proc_AllocateTransactionBuffer(binder_proc_t *that, size_t size) +{ + // ensure order-sized allocations + unsigned long order = calc_order_from_size(size); + + range_map_t *rm; + unsigned long avail = ~0; + range_map_t **prev; + + BND_LOCK(that->m_map_pool_lock); + + rm = that->m_pool; + prev = &that->m_pool; + + size = (1 << order) << PAGE_SHIFT; + + DPRINTF(0, (KERN_WARNING "%s(%p, %u)\n", __func__, that, size)); + DPRINTF(9, (KERN_WARNING " -- order %lu produces size %u\n", order, size)); + // don't bother checking the pool for large buffers + //if (size < POOL_BUFFER_LIMIT) { + DPRINTF(9, (KERN_WARNING " -- searching the pool\n")); + while (rm && ((avail = rm->end - rm->start) < size)) { + prev = &rm->next; + rm = rm->next; + } + //} + + if (rm && (avail == size)) { + // unlink + *prev = rm->next; + rm->next = NULL; + // un-count + that->m_pool_active--; + DPRINTF(9, (KERN_WARNING " -- reusing transaction buffer\n")); + } else { + DPRINTF(9, (KERN_WARNING " -- allocating a new transaction buffer\n")); + // make a new one + rm = binder_proc_free_map_alloc_l(that, size); + if (rm) { + // allocate RAM for it + rm->page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, order); + if (!rm->page) { + binder_proc_free_map_insert(that, rm); + rm = 0; + DPRINTF(9, (KERN_WARNING " -- allocation failed\n")); + } else { + // add to the valid range maps + rm->next = NULL; + binder_proc_range_map_insert(that, rm); + } + } + } + DPRINTF(9, (KERN_WARNING " -- returning %p\n", rm)); + if (rm) { + DPRINTF(9, (KERN_WARNING " --- %08lx::%08lx\n", rm->start, rm->end)); + } + BND_UNLOCK(that->m_map_pool_lock); + return rm; +} + +void +binder_proc_FreeTransactionBuffer(binder_proc_t *that, range_map_t *buffer) +{ + unsigned long size = buffer->end - buffer->start; + range_map_t *rm; + range_map_t **prev; + + BND_LOCK(that->m_map_pool_lock); + + DPRINTF(5, (KERN_WARNING "%s(%p) m_pool_active: %d, size: %lu\n", __func__, that, that->m_pool_active, size)); + //if ((that->m_pool_active < POOL_THRESHOLD) && (size < POOL_BUFFER_LIMIT)) { + DPRINTF(5, (KERN_WARNING "%d putting %p (%08lx::%08lx) back in the pool\n", current->pid, buffer, buffer->start, buffer->end)); + rm = that->m_pool; + prev = &that->m_pool; + while (rm && ((rm->end - rm->start) < size)) { + prev = &rm->next; + rm = rm->next; + } + buffer->next = rm; + *prev = buffer; + that->m_pool_active++; +#if 0 // This is not safe to enable until we find some way to unmap the page from the userspace + } else { + DPRINTF(5, (KERN_WARNING "%d releasing %p (%08lx::%08lx) for later use\n", current->pid, buffer, buffer->start, buffer->end)); + // unmap the range +#if 0 + // FIXME: use unmap_mapping_range() to unmap pages + // FIXME: "as" always turns up NULL, so unmapping doesn't work + struct address_space *as = page_mapping(buffer->page); + DPRINTF(5, (KERN_WARNING " -- address_space: %p\n", as)); + if (as) unmap_mapping_range(as, buffer->start - that->m_mmap_start, buffer->end - buffer->start, 0); +#endif + // remove from the valid range maps + rb_erase(&buffer->rm_rb, &that->m_rangeMap); + // toss this range + __free_pages(buffer->page, calc_order_from_size(size)); + buffer->page = NULL; + // give back the address space + binder_proc_free_map_insert(that, buffer); + } +#endif + BND_UNLOCK(that->m_map_pool_lock); +} + +/* ALWAYS call this with that->m_lock held */ +void binder_proc_spawn_looper(binder_proc_t *that) +{ + DBSPAWN((KERN_WARNING "%s(%p)\n", __func__, that)); +#if 0 + if ((++that->m_spawningThreads == 1) && binder_proc_IsAlive(that)) { + atomic_inc(&that->m_noop_spawner); + DBSPAWN((KERN_WARNING " -- upped m_noop_spawner to %d\n", atomic_read(&that->m_noop_spawner))); + } +#else + if (binder_proc_IsAlive(that) && (test_and_set_bit(SPAWNING_BIT, &that->m_noop_spawner) == 0)) { + set_bit(DO_SPAWN_BIT, &that->m_noop_spawner); + DBSPAWN((KERN_WARNING " -- upped m_noop_spawner\n")); + ++that->m_waitingThreads; + ++that->m_nonblockedThreads; + } +#endif + DBSPAWN((KERN_WARNING "%s(%p) finished\n", __func__, that)); +} + +void binder_proc_wakeup_timer(unsigned long data) +{ + unsigned long flags; + binder_proc_t *that = (binder_proc_t *)data; + + DIPRINTF(0, (KERN_WARNING "%s(%p) -- Enqueueing handler transaction\n", __func__, that)); + + spin_lock_irqsave(&that->m_spin_lock, flags); + + BND_ASSERT(that->m_eventTransaction != NULL, "m_eventTransaction == NULL"); + + if(!(that->m_state & btEventInQueue)) { + BND_ASSERT(that->m_eventTransaction->next == NULL, "Event transaction already in queue!"); + binder_transaction_SetPriority(that->m_eventTransaction, (s16)that->m_wakeupPriority); + that->m_wakeupTime = B_INFINITE_TIMEOUT; + that->m_wakeupPriority = B_LOW_PRIORITY; // this value should not be used anywhere + that->m_state |= btEventInQueue; + + binder_proc_DeliverTransacton(that, that->m_eventTransaction); + } + else { + BND_ASSERT(0, "event already in queue"); + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); +} + +void binder_proc_idle_timer(unsigned long data) +{ + unsigned long flags; + binder_proc_t *that = (binder_proc_t *)data; + binder_thread_t *thread; + + DIPRINTF(0, (KERN_WARNING "%s(%p) -- Signal idle thread\n", __func__, that)); + + spin_lock_irqsave(&that->m_spin_lock, flags); + + if(that->m_waitStackCount > BND_PROC_MAX_IDLE_THREADS) { + BND_ASSERT(!list_empty(&that->m_waitStack), "bad m_waitStackCount"); + thread = list_entry(that->m_waitStack.prev, binder_thread_t, waitStackEntry); + thread->wakeReason = WAKE_REASON_IDLE; + binder_proc_RemoveThreadFromWaitStack(that, thread); + binder_thread_Wakeup(thread); + } + else { + DBSPAWN((KERN_WARNING "%s(%p) idle timer ignored\n", __func__, that)); + } + spin_unlock_irqrestore(&that->m_spin_lock, flags); +} + diff -Nru linux-2.6.23/drivers/binder/binder_proc.h kernel.android/drivers/binder/binder_proc.h --- linux-2.6.23/drivers/binder/binder_proc.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_proc.h 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,226 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_PROC_H +#define BINDER_PROC_H + +#include +#include +#include +#include +#include "binder_defs.h" +#include "binder_thread.h" +#include "iobuffer.h" + +// This "error" is returned by WaitForRequest() when a timed event +// is scheduled to happen. +enum { + REQUEST_EVENT_READY = 1, + DEATH_NOTIFICATION_READY = 2 +}; + +typedef struct descriptor { + struct binder_node *node; + s32 priRef; + s32 secRef; +} descriptor_t; + +typedef struct reverse_mapping { + struct reverse_mapping *next; + struct binder_node *node; + s32 descriptor; +} reverse_mapping_t; + +typedef struct local_mapping { + struct local_mapping *next; + void *ptr; // Unique token identifying this object (supplied by user space) + void *cookie; // Arbitrary data for user space to associate with the object/token + struct binder_node *node; +} local_mapping_t; + +typedef struct range_map { + unsigned long start; // inclusive + unsigned long end; // non-inclusive + struct page *page; + struct range_map* next; // next in the chain of free buffers + struct rb_node rm_rb; + struct binder_proc *team; +} range_map_t; + +typedef struct death_notification { + atomic_t ref_count; + struct hlist_node observer; + struct hlist_node observed_or_active; + void *cookie; + struct binder_proc *observer_proc; + struct binder_proc *observed_proc; // or NULL if already sent +} death_notification_t; + +enum { + btEventInQueue = 0x00000002, + btDying = 0x00000004, + btDead = 0x00000008, + btCleaned = 0x00000010, + btFreed = 0x00000020 +}; + +enum { + WAKE_THREAD_FOR_PROCESS_DEATH = 1 +}; + +typedef struct binder_proc { + atomic_t m_primaryRefs; + atomic_t m_secondaryRefs; + volatile unsigned long m_noop_spawner; +#define SPAWNING_BIT 0 +#define DO_SPAWN_BIT 1 + struct semaphore m_lock; + spinlock_t m_spin_lock; + struct semaphore m_map_pool_lock; + u32 m_state; + struct binder_thread * m_threads; + struct list_head m_waitStack; + int m_waitStackCount; + u32 m_wakeThreadMask; + bigtime_t m_wakeupTime; + s32 m_wakeupPriority; + struct timer_list m_wakeupTimer; + struct timer_list m_idleTimer; + bigtime_t m_idleTimeout; + bigtime_t m_replyTimeout; + s32 m_syncCount; + s32 m_freeCount; + struct binder_transaction * m_head; + struct binder_transaction ** m_tail; + struct binder_transaction * m_needFree; + struct binder_transaction * m_eventTransaction; + local_mapping_t * m_localHash[HASH_SIZE]; + struct binder_node * m_rootObject; // only use for comparison!! + s32 m_rootStopsProcess; + s32 m_numRemoteStrongRefs; + reverse_mapping_t * m_reverseHash[HASH_SIZE]; + descriptor_t * m_descriptors; + s32 m_descriptorCount; + s32 m_nonblockedThreads; + s32 m_waitingThreads; + s32 m_maxThreads; + s32 m_idlePriority; + atomic_t m_loopingThreads; + // s32 m_spawningThreads; + unsigned long m_mmap_start; // inclusive + struct rb_root m_rangeMap; + struct rb_root m_freeMap; + range_map_t *m_pool; + size_t m_pool_active; + struct hlist_head m_incoming_death_notifications; + struct hlist_head m_outgoing_death_notifications; + struct hlist_head m_pending_death_notifications; // ready to be sent to user space + struct hlist_head m_active_death_notifications; // already sent to user space + struct hlist_head m_deleted_death_notifications; +} binder_proc_t; + + +binder_proc_t * new_binder_proc(void); +#if 0 +binder_proc_t * new_binder_proc_with_parent(pid_t id, pid_t mainThid, struct binder_thread *parent); +#endif +void binder_proc_destroy(binder_proc_t *that); + +#define binder_proc_IsAlive(that) ((that->m_state&(btDying|btDead)) == 0) +// bool binder_proc_IsAlive(binder_proc_t *that) const; +void binder_proc_Released(binder_proc_t *that); + +void binder_proc_Die(binder_proc_t *that, bool locked /* = false */); + +BND_DECLARE_ACQUIRE_RELEASE(binder_proc); +BND_DECLARE_ATTEMPT_ACQUIRE(binder_proc); + +void binder_proc_SetRootObject(binder_proc_t *that, struct binder_node *node); + +void binder_proc_Stop(binder_proc_t *that, bool now); + +bool binder_proc_AddThread(binder_proc_t *that, binder_thread_t *t); +void binder_proc_RemoveThread(binder_proc_t *that, struct binder_thread *t); + +status_t binder_proc_WaitForRequest(binder_proc_t *that, struct binder_thread* who, struct binder_transaction **t); + +void binder_proc_GetPendingDeathNotifications(binder_proc_t *that, binder_thread_t *thread, iobuffer_t *io); + +/* Call when a thread receives its bcREGISTER_LOOPER command. */ +void binder_proc_StartLooper(binder_proc_t *that, bool driver_spawned); +/* Call when exiting a thread who has been told bcREGISTER_LOOPER. */ +void binder_proc_FinishLooper(binder_proc_t *that, bool driverSpawned); + +status_t binder_proc_SetWakeupTime(binder_proc_t *that, bigtime_t time, s32 priority); +status_t binder_proc_SetIdleTimeout(binder_proc_t *that, bigtime_t timeDelta); +status_t binder_proc_SetReplyTimeout(binder_proc_t *that, bigtime_t timeDelta); +status_t binder_proc_SetMaxThreads(binder_proc_t *that, s32 num); +status_t binder_proc_SetIdlePriority(binder_proc_t *that, s32 pri); + +/* Call to place a transaction in to this team's queue. */ +status_t binder_proc_Transact(binder_proc_t *that, struct binder_transaction *t); + +/* Management of transactions that are waiting to be deallocated. + These are safe to call with only a secondary reference on the + team. +*/ +status_t binder_proc_AddToNeedFreeList(binder_proc_t *that, struct binder_transaction *t); +status_t binder_proc_FreeBuffer(binder_proc_t *that, void *p); + +bool binder_proc_RefDescriptor(binder_proc_t *that, s32 descriptor, s32 type); +bool binder_proc_UnrefDescriptor(binder_proc_t *that, s32 descriptor, s32 type); +bool binder_proc_RemoveLocalMapping(binder_proc_t *that, void *ptr, struct binder_node *node); + +/* Called by binder_node when its last strong reference goes away, for the process to + do the appropriate bookkeeping. */ +void binder_proc_RemoveLocalStrongRef(binder_proc_t *that, struct binder_node *node); + +/* Called by binder_proc_ForceRefNode() if it is restoring the first strong reference + back on to the node. */ +void binder_proc_AddLocalStrongRef(binder_proc_t *that, struct binder_node *node); + +/* Attempt to acquire a primary reference on the given descriptor. + The result will be true if this succeeded, in which case you + can just continue with it. If the result is false, then + 'out_target' may be set to the binder_node_t the you are making + the attempt on. You can execute a transaction to the node + to attempt the acquire on it, and -must- release a SECONDARY + reference on the node which this function acquired. */ +bool binder_proc_AttemptRefDescriptor(binder_proc_t *that, s32 descriptor, struct binder_node **out_target); + +/* Forcibly increment the primary reference count of the given, + in response to a successful binder_proc_AttemptAcquire(). */ +void binder_proc_ForceRefNode(binder_proc_t *that, struct binder_node *node, iobuffer_t *io); + +s32 binder_proc_Node2Descriptor(binder_proc_t *that, struct binder_node *node, bool ref /* = true */, s32 type /* = PRIMARY */); +struct binder_node * binder_proc_Descriptor2Node(binder_proc_t *that, s32 descriptor, const void* id, s32 type /* = PRIMARY */); +status_t binder_proc_Ptr2Node(binder_proc_t *that, void *ptr, void *cookie, struct binder_node **n, iobuffer_t *io, const void* id, s32 type /* = PRIMARY */); + +/* death notifications */ +status_t binder_proc_RequestDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie); +status_t binder_proc_ClearDeathNotification(binder_proc_t *that, binder_proc_t *client, void *cookie); +status_t binder_proc_DeadBinderDone(binder_proc_t *that, void *cookie); // called on client proc + +status_t binder_proc_TakeMeOffYourList(binder_proc_t *that); +status_t binder_proc_PutMeBackInTheGameCoach(binder_proc_t *that); + +bool binder_proc_ValidTransactionAddress(binder_proc_t *that, unsigned long address, struct page **pageptr); +range_map_t * binder_proc_AllocateTransactionBuffer(binder_proc_t *that, size_t size); +void binder_proc_FreeTransactionBuffer(binder_proc_t *that, range_map_t *buffer); +range_map_t * binder_proc_free_map_insert(binder_proc_t *that, range_map_t *buffer); +#endif // BINDER_PROC_H diff -Nru linux-2.6.23/drivers/binder/binder_thread.c kernel.android/drivers/binder/binder_thread.c --- linux-2.6.23/drivers/binder/binder_thread.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_thread.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,1575 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "binder_defs.h" +#include "binder_thread.h" +#include "binder_proc.h" +#include "binder_node.h" +#include "binder_transaction.h" +#include "iobuffer.h" + +#include +#include +#include + +static void binder_thread_Cleanup(binder_thread_t *that); + +static status_t binder_thread_WaitForReply(binder_thread_t *that, iobuffer_t *io); +static status_t binder_thread_WaitForRequest(binder_thread_t *that, iobuffer_t *io); +static status_t binder_thread_ReturnTransaction(binder_thread_t *that, iobuffer_t *io, binder_transaction_t *t); + +// static void binder_thread_WriteReturn(binder_thread_t *that, void *buffer, int size); + +// static void binder_thread_EnqueueTransaction(binder_thread_t *that, binder_transaction_t *t); + +// Set non-zero to do the capable(CAP_SYS_ADMIN) check +#define CHECK_CAPS 0 + +static binder_node_t *gContextManagerNode = NULL; +static DECLARE_MUTEX(gContextManagerNodeLock); +static atomic_t g_count = ATOMIC_INIT(0); + +int +binder_thread_GlobalCount() +{ + return atomic_read(&g_count); +} + +binder_thread_t * binder_thread_init(int thid, binder_proc_t *team) +{ + binder_thread_t *that; + + that = (binder_thread_t*)kmem_cache_alloc(thread_cache, GFP_KERNEL); + if (that) { + atomic_inc(&g_count); + that->attachedToThread = FALSE; + that->next = NULL; + INIT_LIST_HEAD(&that->waitStackEntry); + that->pendingChild = NULL; + that->nextRequest = NULL; + that->wakeReason = WAKE_REASON_NONE; + that->virtualThid = 0; + atomic_set(&that->m_primaryRefs, 0); + atomic_set(&that->m_secondaryRefs, 0); + atomic_set(&that->m_wake_count, 0); + that->m_err = 0; + init_MUTEX(&that->m_lock); + init_waitqueue_head(&that->m_wait); + that->m_waitForReply = 0; + that->m_reply = NULL; + that->m_consume = 0; + that->m_thid = thid; + that->m_team = team; + if (team != NULL) + BND_ACQUIRE(binder_proc, that->m_team, WEAK, that); + that->m_pendingReply = NULL; + that->m_pendingRefResolution = NULL; + that->m_teamRefs = 0; + that->m_isSpawned = FALSE; + that->m_isLooping = FALSE; + that->m_firstLoop = TRUE; + that->m_shortAttemptAcquire = FALSE; + that->m_pendingReplyIsRoot = FALSE; + that->m_failedRootReceive = FALSE; + that->m_failedRootReply = FALSE; + DPRINTF(5, (KERN_WARNING "*** CREATING THREAD %p (%p:%d)\n", that, that->m_team, that->m_thid)); + } + DBSHUTDOWN((KERN_WARNING "%s(%u, %p): %p\n", __func__, thid, team, that)); + return that; +} + +void binder_thread_destroy(binder_thread_t *that) +{ + DBSHUTDOWN((KERN_WARNING "binder_thread_destroy(%p, %p):%d\n", that, that->m_team, that->m_thid)); + if (that->m_isLooping && that->m_team && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_FinishLooper(that->m_team, that->m_isSpawned); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + // We don't care about process, here. + //if (find_thread(that->m_thid, 0, TRUE) != that) { + //DPRINTF(1, (KERN_WARNING "binder_thread_destroy(%p): couldn't find ourselves in the thread hash\n", that)); + //} + + binder_thread_Cleanup(that); + + if (that->m_team) { + BND_RELEASE(binder_proc, that->m_team, WEAK, that); + that->m_team = NULL; + } + + atomic_dec(&g_count); + + // free_lock(&that->m_lock); + kmem_cache_free(thread_cache, that); +} + +void +binder_thread_Released(binder_thread_t *that) +{ + DBSHUTDOWN((KERN_WARNING "%s(%p, %p):%d\n", __func__, that, that->m_team, that->m_thid)); + binder_thread_Die(that); +} + +void +binder_thread_Die(binder_thread_t *that) +{ + DBSHUTDOWN((KERN_WARNING "%s(%p) (%p:%d) in %d\n", __func__, that, that->m_team, binder_thread_Thid(that), current->pid)); + + // Always do this, even if all primary references on the team + // are gone. This is the only way the thread list gets cleaned up. + if (that->m_team != NULL) + binder_proc_RemoveThread(that->m_team, that); + + binder_thread_Cleanup(that); + + /* + * Linux doesn't seem to have an equivalent to delet_sem() + * delete_sem(that->m_ioSem); that->m_ioSem = B_BAD_SEM_ID; + */ + + DBSHUTDOWN((KERN_WARNING "Binder thread %p:%d: DEAD!\n", that->m_team, that->m_thid)); +} + +bool binder_thread_SetParentThread(binder_thread_t *that, binder_thread_t *replyTo) +{ + bool success; + + DPRINTF(4, (KERN_WARNING "binder_thread_SetParentThread(%p, %p)\n", that, replyTo)); + + BND_LOCK(that->m_lock); + if ((success = !that->m_failedRootReply)) { + + BND_ASSERT(!that->m_pendingReply, "Attaching to child thread that already has someone waiting for a reply!"); + that->m_pendingReply = binder_transaction_CreateEmpty(); + binder_transaction_SetRootObject(that->m_pendingReply, TRUE); + that->m_pendingReply->sender = replyTo; + that->m_pendingReplyIsRoot = TRUE; + BND_ACQUIRE(binder_thread, replyTo, WEAK, m_pendingReply); + + // The thread now has the reply info, so allow it to wake up and reply. + binder_thread_Wakeup(that); + } + BND_UNLOCK(that->m_lock); + + return success; +} + +void binder_thread_ReleasePendingChild(binder_thread_t *that) +{ + binder_thread_t *child; + BND_LOCK(that->m_lock); + DPRINTF(4, (KERN_WARNING "binder_thread_ReleasePendingChild(%p): child=%p\n", that, that->pendingChild)); + child = that->pendingChild; + that->pendingChild = NULL; + BND_UNLOCK(that->m_lock); + + if (child) { + forget_thread(child); + } +} + +void binder_thread_AttachProcess(binder_thread_t *that, struct binder_proc *team) +{ + bool attached = FALSE; + + DPRINTF(4, (KERN_WARNING "binder_thread_AttachProcess(%p, %p)\n", that, team)); + + BND_LOCK(that->m_lock); + + BND_ASSERT(!that->m_team, "Child thread is already attached to its process!"); + if (that->m_team == NULL) { + attached = TRUE; + that->m_team = team; + BND_ACQUIRE(binder_proc, team, WEAK, that); + } + + BND_UNLOCK(that->m_lock); + + if (attached) { + if(!binder_proc_AddThread(team, that)) { + BND_ASSERT(0, "attached thread to dying process"); + } + } +} + +void +binder_thread_Cleanup(binder_thread_t *that) +{ + binder_transaction_t *cmd, *pendingRef; + binder_transaction_t *pendingReply; + binder_transaction_t *reply; + binder_node_t *contextManagerNode; + int relCount; + bool first; + + BND_LOCK(that->m_lock); + pendingRef = that->m_pendingRefResolution; + that->m_pendingRefResolution = NULL; + pendingReply = that->m_pendingReply; + that->m_pendingReply = NULL; + reply = that->m_reply; + that->m_reply = NULL; + relCount = that->m_teamRefs; + that->m_teamRefs = 0; + DPRINTF(0, (KERN_WARNING "%s(%p):%p,%d strong: %d, weak: %d\n", __func__, that, that->m_team, that->m_thid, that->m_primaryRefs.counter, that->m_secondaryRefs.counter)); + BND_UNLOCK(that->m_lock); + + while (relCount) { + if (that->m_team) + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + relCount--; + } + + first = TRUE; + while ((cmd = pendingRef)) { + if (first) { + first = FALSE; + DPRINTF(5, (KERN_WARNING "Binder thread %p:%d: cleaning up pending ref resolution.\n", that->m_team, that->m_thid)); + } + pendingRef = cmd->next; + DPRINTF(5, (KERN_WARNING "Deleting transaction %p\n", cmd)); + binder_transaction_DestroyNoRefs(cmd); + } + + first = TRUE; + while ((cmd = pendingReply)) { + if (first) { + first = FALSE; + DPRINTF(5, (KERN_WARNING "Binder thread %p:%d: cleaning up pending replies.\n", that->m_team, that->m_thid)); + } + if (cmd->sender) { + DPRINTF(5, (KERN_WARNING "Returning transaction %p to thread %p (%d)\n", + cmd, cmd->sender, binder_thread_Thid(cmd->sender))); + binder_thread_ReplyDead(cmd->sender); + } + pendingReply = cmd->next; + binder_transaction_Destroy(cmd); + } + + first = TRUE; + while ((cmd = reply)) { + if (first) { + first = FALSE; + DPRINTF(5, (KERN_WARNING "Binder thread %p:%d: cleaning up received replies.\n", that->m_team, that->m_thid)); + } + reply = cmd->next; + DPRINTF(5, (KERN_WARNING "Deleting transaction %p\n", cmd)); + binder_transaction_Destroy(cmd); + } + BND_LOCK(gContextManagerNodeLock); + if (gContextManagerNode && (gContextManagerNode->m_home == that->m_team && that->m_team->m_threads == NULL)) { + contextManagerNode = gContextManagerNode; + gContextManagerNode = NULL; + } + else { + contextManagerNode = NULL; + } + BND_UNLOCK(gContextManagerNodeLock); + if(contextManagerNode != NULL) { + DPRINTF(2, (KERN_WARNING "team %08lx is not longer the context manager\n", (unsigned long)that->m_team)); + binder_node_destroy(contextManagerNode); + } + + binder_thread_ReleasePendingChild(that); + + // Make sure this thread returns to user space. + binder_thread_Wakeup(that); +} + +int +binder_thread_Control(binder_thread_t *that, unsigned int cmd, void *buffer) +{ + int result = -EINVAL; + unsigned int size = _IOC_SIZE(cmd); + + //ddprintf("binder -- ioctl %d, size=%d\n", cmd, size); + + DPRINTF(2, (KERN_WARNING "%s(%p, %d, %p): proc=%p\n", __func__, that, cmd, buffer, that->m_team)); + + switch (cmd) { + case BINDER_WRITE_READ: + DPRINTF(2, (KERN_WARNING "BINDER_WRITE_READ: %p:%d\n", that->m_team, that->m_thid)); + if (size >= sizeof(binder_write_read_t)) { + binder_write_read_t bwr; + if (copy_from_user(&bwr, buffer, sizeof(bwr)) == 0) { + DPRINTF(2, (KERN_WARNING " -- write %ld at %08lx\n -- read %ld at %08lx\n", bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer)); + if (bwr.write_size > 0) { + result = binder_thread_Write(that, (void *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); + if (result < 0) { + bwr.read_consumed = 0; + copy_to_user(buffer, &bwr, sizeof(bwr)); + goto getout; + } + } + if (bwr.read_size > 0) { + result = binder_thread_Read(that, (void *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed); + if (result < 0) { + // For ERESTARTSYS, we have to propagate the fact + // that we've already done any writes. + //if (result != -ERESTARTSYS) { + //bwr.read_size = result; // FIXME? + //} + copy_to_user(buffer, &bwr, sizeof(bwr)); + goto getout; + } + } + copy_to_user(buffer, &bwr, sizeof(bwr)); + result = 0; + } + } + break; + case BINDER_SET_WAKEUP_TIME: + if (size >= sizeof(binder_wakeup_time_t) && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_wakeup_time_t *time = (binder_wakeup_time_t*)buffer; + result = binder_proc_SetWakeupTime(that->m_team, time->time, time->priority); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_IDLE_TIMEOUT: + if (size >= 8 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetIdleTimeout(that->m_team, *((bigtime_t*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_REPLY_TIMEOUT: + if (size >= 8 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetReplyTimeout(that->m_team, *((bigtime_t*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_MAX_THREADS: + if (size >= 4 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetMaxThreads(that->m_team, *((int*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_IDLE_PRIORITY: + if (size >= 4 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + result = binder_proc_SetIdlePriority(that->m_team, *((int*)buffer)); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_SET_CONTEXT_MGR: + if (size >= 4 && BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + DPRINTF(2, (KERN_WARNING "bcSET_CONTEXT_MANAGER attempt by %p\n", that->m_team)); + // LOCK + // check for existing context + BND_LOCK(gContextManagerNodeLock); + if (!gContextManagerNode) { + // check for administration rights +#if CHECK_CAPS + if (capable(CAP_SYS_ADMIN)) { +#endif + gContextManagerNode = binder_node_init(that->m_team, NULL, NULL); + BND_FIRST_ACQUIRE(binder_node, gContextManagerNode, STRONG, that->m_team); + DPRINTF(2, (KERN_WARNING "making team %08lx context manager\n", (unsigned long)that->m_team)); + result = 0; +#if CHECK_CAPS + } else { + DPRINTF(2, (KERN_WARNING "%p doesn't have CAP_SYS_ADMIN rights\n", that->m_team)); + } +#endif + } else { + DPRINTF(2, (KERN_WARNING "gContextManagerNode already set to %p by %08lx", gContextManagerNode, (unsigned long)that->m_team)); + } + BND_UNLOCK(gContextManagerNodeLock); + // UNLOCK + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + break; + case BINDER_THREAD_EXIT: + BND_RELEASE(binder_thread, that, STRONG, 0); + result = 0; + break; + case BINDER_VERSION: + if (size >= sizeof(binder_version_t)) { + binder_version_t *vers = (binder_version_t*)buffer; + vers->protocol_version = BINDER_CURRENT_PROTOCOL_VERSION; + result = 0; + } + break; + default: + break; + } + +getout: + DPRINTF(2, (KERN_WARNING "%s(%p, %d, %p): proc=%p: result=%d\n", __func__, that, cmd, buffer, that->m_team, -result)); + + return result; +} + +int +binder_thread_Write(binder_thread_t *that, void *_buffer, int _size, signed long *consumed) +{ + int result, cmd, target; + binder_node_t *n; + iobuffer_t io; + + DPRINTF(2, (KERN_WARNING "binder_thread_Write(%p, %d)\n", _buffer, _size)); + if (that->m_err) return that->m_err; + if (!binder_proc_IsAlive(that->m_team)) return -ECONNREFUSED; + result = iobuffer_init(&io, (unsigned long)_buffer, _size, *consumed); + if (result) return result; + + while (1) { + if (that->m_consume) { + that->m_consume -= iobuffer_drain(&io, that->m_consume); + iobuffer_mark_consumed(&io); + } + target = -1; + if (iobuffer_read_u32(&io, &cmd)) goto finished; + DPRINTF(5, (KERN_WARNING "cmd: %d\n",cmd)); + switch (cmd) { + case bcINCREFS: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcINCREFS of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_RefDescriptor(that->m_team, target, WEAK); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcINCREFS_DONE: { + void *ptr; + void *cookie; + if (iobuffer_read_void(&io, &ptr)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DBREFS((KERN_WARNING "bcINCREFS_DONE of %p\n", ptr)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + if (binder_proc_Ptr2Node(that->m_team, ptr, cookie, &n, NULL, that, WEAK) == 0) { + BND_RELEASE(binder_node, n, WEAK, that->m_team); + BND_RELEASE(binder_node, n, WEAK, that->m_team); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcACQUIRE: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcACQUIRE of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_RefDescriptor(that->m_team, target, STRONG); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcACQUIRE_DONE: { + void *ptr; + void *cookie; + if (iobuffer_read_void(&io, &ptr)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DBREFS((KERN_WARNING "bcACQUIRE_DONE of %p\n", ptr)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + if (binder_proc_Ptr2Node(that->m_team, ptr, cookie, &n, NULL, that, STRONG) == 0) { + BND_RELEASE(binder_node, n, STRONG, that->m_team); + BND_RELEASE(binder_node, n, STRONG, that->m_team); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcATTEMPT_ACQUIRE: { + int priority; + if (iobuffer_read_u32(&io, &priority)) goto finished; + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcATTEMPT_ACQUIRE of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_node_t *node; + if (binder_proc_AttemptRefDescriptor(that->m_team, target, &node)) { + DBREFS((KERN_WARNING "Immediate Success!\n")); + BND_ASSERT(!that->m_shortAttemptAcquire, "Already have AttemptAcquire result! (now succeeding)"); + that->m_shortAttemptAcquire = TRUE; + that->m_resultAttemptAcquire = TRUE; + } else if (node) { + binder_transaction_t *t; + // Need to wait for a synchronous acquire attempt + // on the remote node. Note that the transaction has + // special code to understand a tfAttemptAcquire, taking + // ownership of the secondary reference on 'node'. + DBREFS((KERN_WARNING "Sending off to owner!\n")); + t = binder_transaction_CreateRef(tfAttemptAcquire, binder_node_Ptr(node), binder_node_Cookie(node), that->m_team); + binder_transaction_SetPriority(t, (s16)priority); + t->target = node; + binder_transaction_SetInline(t, TRUE); + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + BND_UNLOCK(that->m_lock); + } else { + DBREFS((KERN_WARNING "Immediate Failure!\n")); + BND_ASSERT(!that->m_shortAttemptAcquire, "Already have AttemptAcquire result! (now failing)"); + that->m_shortAttemptAcquire = TRUE; + that->m_resultAttemptAcquire = FALSE; + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } else { + DBREFS((KERN_WARNING "Team Failure!\n")); + BND_ASSERT(!that->m_shortAttemptAcquire, "Already have AttemptAcquire result! (now team failing)"); + that->m_shortAttemptAcquire = TRUE; + that->m_resultAttemptAcquire = FALSE; + } + iobuffer_mark_consumed(&io); + } break; + case bcACQUIRE_RESULT: { + int result; + binder_transaction_t *t; + if (iobuffer_read_u32(&io, &result)) goto finished; + iobuffer_mark_consumed(&io); + DBREFS((KERN_WARNING "bcACQUIRE_RESULT: %d\n",result)); + t = binder_transaction_Create(0, 0, 0, 0, NULL); + binder_transaction_SetAcquireReply(t, TRUE); + binder_transaction_SetInline(t, TRUE); + *(int *)t->data = result; + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + BND_UNLOCK(that->m_lock); + } break; + case bcRELEASE: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcRELEASE of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_UnrefDescriptor(that->m_team, target, STRONG); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcDECREFS: { + if (iobuffer_read_u32(&io, &target)) goto finished; + DBREFS((KERN_WARNING "bcDECREFS of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_UnrefDescriptor(that->m_team, target, WEAK); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcFREE_BUFFER: { + void *ptr; + if (iobuffer_read_void(&io, &ptr)) goto finished; + DPRINTF(5, (KERN_WARNING "bcFREE_BUFFER: %p\n",ptr)); + BND_LOCK(that->m_lock); + if (that->m_pendingReply && that->m_pendingReply->map != NULL && binder_transaction_UserData(that->m_pendingReply) == ptr) { + // Data freed before reply sent. Remember this to free + // the transaction when we finally get its reply. + binder_transaction_SetFreePending(that->m_pendingReply, TRUE); + BND_UNLOCK(that->m_lock); + } else { + BND_UNLOCK(that->m_lock); + binder_proc_FreeBuffer(that->m_team, ptr); + } + iobuffer_mark_consumed(&io); + } break; + case bcRETRIEVE_ROOT_OBJECT: { + int pid; + binder_thread_t *child; + if (iobuffer_read_u32(&io, &pid)) goto finished; + DPRINTF(2, (KERN_WARNING "bcRETRIEVE_ROOT_OBJECT: process %d\n", pid)); + child = attach_child_thread((pid_t)pid, that); + DPRINTF(2, (KERN_WARNING "bcRETRIEVE_ROOT_OBJECT: child binder_thread=%p\n", child)); + + BND_LOCK(that->m_lock); + if (child) { + that->pendingChild = child; + that->m_waitForReply++; + } else { + that->m_failedRootReceive = TRUE; + } + BND_UNLOCK(that->m_lock); + + iobuffer_mark_consumed(&io); + } break; + case bcTRANSACTION: + case bcREPLY: { + binder_transaction_data_t tr; + + if(cmd == bcTRANSACTION) { + DPRINTF(5, (KERN_WARNING "bcTRANSACTION\n")); + } + else { + DPRINTF(5, (KERN_WARNING "bcREPLY\n")); + } + + if (iobuffer_read_raw(&io, &tr, sizeof(tr))) goto finished; + if (tr.flags & tfInline) { + // ddprintf("inline transactions not supported yet\n"); + that->m_consume = tr.data_size - sizeof(tr.data); + iobuffer_mark_consumed(&io); + } else if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_transaction_t *t; + iobuffer_mark_consumed(&io); +/* + if (tr.data_size && !is_valid_range(tr.data.ptr.buffer, tr.data_size, PROT_UWR)) { + that->m_err = -EINVAL; + goto finished; + } + if (tr.offsets_size && !is_valid_range(tr.data.ptr.offsets, tr.offsets_size, PROT_UWR)) { + that->m_err = -EINVAL; + goto finished; + } +*/ + t = binder_transaction_Create(tr.code, tr.data_size, tr.data.ptr.buffer, tr.offsets_size, tr.data.ptr.offsets); + binder_transaction_SetUserFlags(t, tr.flags); + binder_transaction_SetPriority(t, (s16)tr.priority); + binder_transaction_SetReply(t, cmd == bcREPLY); + DPRINTF(4, ("Command %s %p: size=%p, first=%p\n", + cmd == bcTRANSACTION ? "transaction" : "reply", t, + tr.data_size, tr.data_size > 0 ? (*(u32*)tr.data.ptr.buffer) : 0)); + if (cmd == bcTRANSACTION) { + target = tr.target.handle; + if(target) { + t->target = binder_proc_Descriptor2Node(that->m_team, target, t, STRONG); + BND_ASSERT(t->target, "Failure converting target descriptor to node"); + } + else { + BND_LOCK(gContextManagerNodeLock); + if (gContextManagerNode && BND_ATTEMPT_ACQUIRE(binder_node, gContextManagerNode, STRONG, t)) { + t->target = gContextManagerNode; + } + else { + DPRINTF(0, (KERN_WARNING "Failed to acquire context manager node\n")); + t->target = NULL; + } + BND_UNLOCK(gContextManagerNodeLock); + } + DPRINTF(4, (KERN_WARNING "Transacting %p to %d(%p) in team %p\n", t, target, t->target, t->target ? t->target->m_home : NULL)); + } + + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + if (that->m_pendingReply && binder_transaction_IsRootObject(that->m_pendingReply)) { + BND_ASSERT(binder_transaction_IsRootObject(t), "EXPECTING ROOT REPLY!"); + } else { + BND_ASSERT(!that->m_pendingReply || !binder_transaction_IsRootObject(t), "UNEXPECTED ROOT REPLY!"); + } + BND_UNLOCK(that->m_lock); + + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + } break; + case bcREGISTER_LOOPER: { + DPRINTF(5, (KERN_WARNING "bcREGISTER_LOOPER for %p (%p:%d)\n", that, that->m_team, that->m_thid)); + BND_ASSERT(that->m_isSpawned == FALSE, "m_isSpawned in bcREGISTER_LOOPER"); + BND_ASSERT(that->m_isLooping == FALSE, "m_isLooping in bcREGISTER_LOOPER"); + that->m_isSpawned = TRUE; + that->m_isLooping = TRUE; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_StartLooper(that->m_team, TRUE); + clear_bit(SPAWNING_BIT, &that->m_team->m_noop_spawner); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcENTER_LOOPER: { + DPRINTF(5, (KERN_WARNING "bcENTER_LOOPER for %p (%p:%d)\n", that, that->m_team, that->m_thid)); + /* This thread is going to loop, but it's not one of the + driver's own loopers. */ + // ASSERT(that->m_isLooping == FALSE); + that->m_isLooping = TRUE; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_StartLooper(that->m_team, FALSE); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcEXIT_LOOPER: { + /* End of a looper that is not the driver's own. */ + DBSPAWN((KERN_WARNING "*** THREAD %p:%d RECEIVED bcEXIT_LOOPER\n", that->m_team, that->m_thid)); + if (binder_proc_IsAlive(that->m_team)) { + // ASSERT(that->m_isLooping == TRUE); + that->m_isLooping = FALSE; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_FinishLooper(that->m_team, FALSE); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + } + iobuffer_mark_consumed(&io); + } break; +#if 0 + case bcCATCH_ROOT_OBJECTS: { + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + if (binder_proc_IsAlive(that->m_team)) { + binder_proc_StartCapturingRootObjects(that->m_team); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; +#endif + case bcSTOP_PROCESS: { + int now; + if (iobuffer_read_u32(&io, &target)) goto finished; + if (iobuffer_read_u32(&io, &now)) goto finished; + DBREFS((KERN_WARNING "bcSTOP_PROCESS of %d\n", target)); + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_node_t *node = binder_proc_Descriptor2Node(that->m_team, target,that,WEAK); + if (node != NULL) { + binder_proc_t* proc = binder_node_AcquireHome(node, that); + if (proc != NULL) { + binder_proc_Stop(proc, now ? TRUE : FALSE); + BND_RELEASE(binder_proc, proc, STRONG, that); + } + BND_RELEASE(binder_node, node, WEAK,that); + } + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcSTOP_SELF: { + DPRINTF(5, (KERN_WARNING "bcSTOP_SELF\n")); + int now; + if (iobuffer_read_u32(&io, &now)) goto finished; + if (BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) { + binder_proc_Stop(that->m_team, now ? TRUE : FALSE); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcREQUEST_DEATH_NOTIFICATION: { + void *cookie; + binder_node_t *node; + if (iobuffer_read_u32(&io, &target)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DPRINTF(5, (KERN_WARNING "bcREQUEST_DEATH_NOTIFICATION of %d w/cookie %p\n", target, cookie)); + node = binder_proc_Descriptor2Node(that->m_team, target, that, WEAK); + if(node != NULL) { + binder_proc_t* proc = binder_node_AcquireHome(node, node); + if (proc != NULL) { + binder_proc_RequestDeathNotification(proc, that->m_team, cookie); + BND_RELEASE(binder_proc, proc, STRONG, node); + } + BND_RELEASE(binder_node, node, WEAK, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcCLEAR_DEATH_NOTIFICATION: { + void *cookie; + binder_node_t *node; + if (iobuffer_read_u32(&io, &target)) goto finished; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DPRINTF(5, (KERN_WARNING "bcCLEAR_DEATH_NOTIFICATION of %d w/cookie %p\n", target, cookie)); + node = binder_proc_Descriptor2Node(that->m_team, target, that, WEAK); + if(node != NULL) { + binder_proc_t* proc = binder_node_AcquireHome(node, node); + if (proc != NULL) { + binder_proc_ClearDeathNotification(proc, that->m_team, cookie); + BND_RELEASE(binder_proc, proc, STRONG, node); + } + BND_RELEASE(binder_node, node, WEAK, that); + } + iobuffer_mark_consumed(&io); + } break; + case bcDEAD_BINDER_DONE: { + void *cookie; + if (iobuffer_read_void(&io, &cookie)) goto finished; + DPRINTF(5, (KERN_WARNING "bcDEAD_BINDER_DONE of cookie %p\n", cookie)); + binder_proc_DeadBinderDone(that->m_team, cookie); + iobuffer_mark_consumed(&io); + } break; + default: { + DPRINTF(5, (KERN_WARNING "Bad command %d on binder write().\n", cmd)); + } break; + } + } + +finished: + DPRINTF(5, (KERN_WARNING "binder_thread_Write() finished\n")); + *consumed = iobuffer_consumed(&io); + return 0; +} + +status_t +binder_thread_ReturnTransaction(binder_thread_t *that, iobuffer_t *io, binder_transaction_t *t) +{ + bool acquired; + bool freeImmediately; + binder_transaction_data_t tr; + DPRINTF(0, (KERN_WARNING "%s(%p:%d, %p, %p)\n", __func__, that->m_team, that->m_thid, io, t)); + if (iobuffer_remaining(io) < 18) return -ENOBUFS; + + acquired = BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that); + if (acquired) binder_transaction_ConvertFromNodes(t, that->m_team); + + freeImmediately = FALSE; + + if (binder_transaction_RefFlags(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_RefFlags()\n")); + switch (binder_transaction_RefFlags(t)) { + case tfAttemptAcquire: { + DPRINTF(5, (KERN_WARNING " --- tfAttemptAcquire\n")); + iobuffer_write_u32(io, brATTEMPT_ACQUIRE); + iobuffer_write_u32(io, binder_transaction_Priority(t)); + } break; + case tfRelease: + DPRINTF(5, (KERN_WARNING " --- tfRelease\n")); + iobuffer_write_u32(io, brRELEASE); + break; + case tfDecRefs: + DPRINTF(5, (KERN_WARNING " --- tfDecRefs\n")); + iobuffer_write_u32(io, brDECREFS); + break; + } + DPRINTF(5, (KERN_WARNING " --- writing data pointer %p\n", t->data_ptr)); + // iobuffer_write_void(io, *((void**)binder_transaction_Data(t))); + iobuffer_write_void(io, t->data_ptr); // binder object token + iobuffer_write_void(io, t->offsets_ptr); // binder object cookie + freeImmediately = binder_transaction_RefFlags(t) != tfAttemptAcquire; + // Take reference on team, so it won't go away until this transaction + // is processed. + if (binder_transaction_TakeTeam(t, that->m_team)) { + BND_LOCK(that->m_lock); + that->m_teamRefs++; + BND_UNLOCK(that->m_lock); + } + } else if (binder_transaction_IsAcquireReply(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_IsAcquireReply()\n")); + iobuffer_write_u32(io, brACQUIRE_RESULT); + // iobuffer_write_u32(io, *((int*)binder_transaction_Data(t))); + iobuffer_write_u32(io, *(u32*)t->data); + freeImmediately = TRUE; + } else if (binder_transaction_IsDeadReply(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_IsDeadReply()\n")); + if (that->pendingChild) binder_thread_ReleasePendingChild(that); + iobuffer_write_u32(io, brDEAD_REPLY); + freeImmediately = TRUE; + } else if (binder_transaction_IsFailedReply(t)) { + DPRINTF(5, (KERN_WARNING " -- binder_transaction_IsFailedReply()\n")); + if (that->pendingChild) binder_thread_ReleasePendingChild(that); + iobuffer_write_u32(io, brFAILED_REPLY); + freeImmediately = TRUE; + } else { + DPRINTF(5, (KERN_WARNING " -- else binder_transaction_IsReply(%p): %s\n", t, binder_transaction_IsReply(t) ? "true" : "false")); + if (that->pendingChild) binder_thread_ReleasePendingChild(that); + tr.flags = binder_transaction_UserFlags(t); + tr.priority = binder_transaction_Priority(t); + if (acquired) { + tr.data_size = binder_transaction_DataSize(t); + tr.offsets_size = binder_transaction_OffsetsSize(t); + tr.data.ptr.buffer = binder_transaction_UserData(t); + tr.data.ptr.offsets = binder_transaction_UserOffsets(t); + } else { + tr.data_size = 0; + tr.offsets_size = 0; + tr.data.ptr.buffer = NULL; + tr.data.ptr.offsets = NULL; + } + + DPRINTF(4, ("Response %s %p: size=%p, data=%p, first=%p\n", + !binder_transaction_IsReply(t) == bcTRANSACTION ? "transaction" : "reply", t, + tr.data_size, tr.data.ptr.buffer, + tr.data_size > 0 ? (*(u32*)binder_transaction_Data(t)) : 0)); + + DPRINTF(5, (KERN_WARNING "%s(%p:%d, %p, %p) tr-data %p %d tr-offsets %p %d\n", __func__, that->m_team, that->m_thid, io, t, tr.data.ptr.buffer, tr.data_size, tr.data.ptr.offsets, tr.offsets_size)); + + if (binder_transaction_IsReply(t)) { + tr.target.ptr = NULL; + tr.code = 0; + iobuffer_write_u32(io, brREPLY); + } else { + if (t->target) { + tr.target.ptr = binder_node_Ptr(t->target); + tr.cookie = binder_node_Cookie(t->target); + } else { + tr.target.ptr = NULL; + tr.cookie = NULL; + } + tr.code = binder_transaction_Code(t); + iobuffer_write_u32(io, brTRANSACTION); + } + iobuffer_write_raw(io, &tr, sizeof(tr)); + } + + if (freeImmediately) { + DPRINTF(0, (KERN_WARNING "binder_thread_ReturnTransaction() delete %p\n",t)); + binder_transaction_Destroy(t); + } else { + t->receiver = that; + BND_ACQUIRE(binder_thread, that, WEAK, t); + if (t->sender) { + /* A synchronous transaction blocks this thread until + the receiver completes. */ + DPRINTF(0, (KERN_WARNING "binder_thread %p:%d (%d): enqueueing transaction %p, pending reply %p\n", that->m_team, that->m_thid, that->virtualThid, t, that->m_pendingReply)); + BND_ASSERT(!binder_transaction_IsFreePending(t), "transaction with free pending!"); + if (that->virtualThid) { + if (t->sender->virtualThid) { + BND_ASSERT(t->sender->virtualThid == that->virtualThid, "Bad virtualThid from sender!"); + } else { + BND_ASSERT(t->sender->m_thid == that->virtualThid, "My virtualThid is different than sender thid!"); + } + } + DPRINTF(5, (KERN_WARNING "t->sender->virtualThid: %d, that->virtualThid: %d\n", t->sender->virtualThid, that->virtualThid)); + if (t->sender->virtualThid) { + BND_ASSERT(that->virtualThid == 0 || that->virtualThid == t->sender->virtualThid, "virtualThid not cleared!"); + that->virtualThid = t->sender->virtualThid; + DPRINTF(0, (KERN_WARNING "Continuing virtualThid: %d\n", that->virtualThid)); + } else { + BND_ASSERT(that->virtualThid == 0 || that->virtualThid == t->sender->m_thid, "virtualThid not cleared!"); + that->virtualThid = t->sender->m_thid; + DPRINTF(0, (KERN_WARNING "Starting new virtualThid: %d\n", that->virtualThid)); + } + BND_LOCK(that->m_lock); + DPRINTF(5, (KERN_WARNING "%p:%d(%d) new reply: %p, pending reply: %p\n", that->m_team, that->m_thid, that->virtualThid, t, that->m_pendingReply)); + t->next = that->m_pendingReply; + that->m_pendingReply = t; + BND_UNLOCK(that->m_lock); + } else { + /* A reply transaction just waits until the receiver is done with + its data. */ + DPRINTF(0, (KERN_WARNING "binder_thread: return reply transaction %p\n", t)); + binder_proc_AddToNeedFreeList(that->m_team, t); + } + } + + iobuffer_mark_consumed(io); + + if (acquired) BND_RELEASE(binder_proc, that->m_team, STRONG, that); + + return 0; +} + +status_t +binder_thread_WaitForReply(binder_thread_t *that, iobuffer_t *io) +{ + status_t err; + binder_transaction_t *t = NULL; + if (iobuffer_remaining(io) < 18) return -ENOBUFS; + + if (!BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)) return -ECONNREFUSED; + + if (that->m_isLooping) binder_proc_TakeMeOffYourList(that->m_team); + + // FIXME: implement reply timeouts? + err = wait_event_interruptible(that->m_wait, atomic_read(&that->m_wake_count) > 0); + if(err == 0) + atomic_dec(&that->m_wake_count); + DPRINTF(0, (KERN_WARNING "%p:%d down_interruptible() returned %08x\n", that->m_team, that->m_thid, err)); + + //DBTRANSACT((KERN_WARNING "*** Thread %d received direct %p! wait=%d, isAnyReply=%d\n", current->pid, that->m_reply, that->m_waitForReply, binder_transaction_IsAnyReply(that->m_reply))); + + /* FFB: why don't we check the err here, geh/hackbod? */ + if (that->m_isLooping) binder_proc_PutMeBackInTheGameCoach(that->m_team); + + BND_LOCK(that->m_lock); + if ((t = that->m_reply)) { + status_t result; + /* If this is a reply, handle it. When the binder_proc_t supplies + a reflection, it will take care of adjusting our thread + priority at that point. The user-space looper is responsible + for restoring its priority when done handling the reflect. */ + if (binder_transaction_IsAnyReply(t)) that->m_waitForReply--; + that->m_reply = t->next; + BND_UNLOCK(that->m_lock); + result = binder_thread_ReturnTransaction(that, io, t); + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + return result; + } + BND_UNLOCK(that->m_lock); + + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + // We can get here if we need to spawn a looper. + // BND_VALIDATE(err != 0, "Binder replySem released without reply available", return -EINVAL); + return err; +} + +status_t +binder_thread_WaitForRequest(binder_thread_t *that, iobuffer_t *io) +{ + binder_transaction_t *t = NULL; + status_t err; + if (iobuffer_remaining(io) < 18) return -ENOBUFS; + + err = binder_proc_WaitForRequest(that->m_team, that, &t); + if (err == 0 && t != NULL) { + // ASSERT(t); + err = binder_thread_ReturnTransaction(that, io, t); + } + + return err; +} + +static status_t +binder_thread_WaitForParent(binder_thread_t *that) +{ + binder_thread_t *targetThread; + struct task_struct *parentTask; + pid_t childPid; + bigtime_t wakeupTime; + status_t err; + + DPRINTF(5, (KERN_WARNING "%s: on thread %p\n", __func__, that)); + + // We want to support wrappers, where the real child process + // being run may have some additional processes (such as xterms, + // gdb sessions, etc) between it and the parent that started it. + // In that case, the parent won't be talking directly with our + // thread structure but instead with its immediate child, so we + // need to go up and find it. + + targetThread = that; + if (that->m_pendingReply == NULL) { + DPRINTF(5, (KERN_WARNING "%s: PID %d: finding parent who forked us.\n", __func__, that->m_thid)); + // Parent hasn't set this thread up for a reply... figure out + // what is going on. + targetThread = NULL; + parentTask = current; + do { + childPid = parentTask->pid; + parentTask = parentTask->parent; + if (!parentTask) break; + targetThread = check_for_thread(parentTask->pid, FALSE); + DPRINTF(5, (KERN_WARNING "%s: Up to parent PID %d: targetThread=%p\n", __func__, parentTask->pid, targetThread)); + } while (targetThread == NULL); + + // If we found a thread structure, and it is not set up to + // send a root reply, then we hit the parent and it has not + // yet stopped to wait for the reply. So we'll go ahead and + // and create the child thread structure so we can block on + // it until the parent gets it set up. + DPRINTF(5, (KERN_WARNING "%s: Finished search: targetThread=%p, childPid=%d\n", __func__, targetThread, childPid)); + if (targetThread && !targetThread->m_pendingReplyIsRoot) { + targetThread = check_for_thread(childPid, TRUE); + DPRINTF(5, (KERN_WARNING "%s: Created wrapper process thread structure: %p\n", __func__, targetThread)); + } + } + + if (targetThread == NULL) { + printk(KERN_WARNING "%s: Binder: PID %d attempting to send root reply without waiting parent\n", __func__, that->m_thid); + return -EINVAL; + } + + // Now wait for the parent to be blocked waiting for a reply. + // Hard-coded to give the parent 10 seconds to get around to us. + wakeupTime = 10*HZ; + do_div(wakeupTime, TICK_NSEC); + wakeupTime += get_jiffies_64(); + DPRINTF(0, (KERN_WARNING "%s: Process %d is about to snooze on thread %p (%d)\n", __func__, current->pid, targetThread, targetThread->m_thid)); + err = binder_thread_Snooze(targetThread, wakeupTime); + + // Just one more thing to deal with -- if there is a wrapper process, + // then it is the wrapper that has been set up to reply. We need to + // move that state to our own process because we are the one doing + // the reply. + if (targetThread != that) { + binder_transaction_t* reply; + BND_LOCK(targetThread->m_lock); + DPRINTF(1, (KERN_WARNING "%s: Wrapper has pendingReply=%p, isRoot=%d\n", __func__, targetThread->m_pendingReply, targetThread->m_pendingReplyIsRoot)); + reply = targetThread->m_pendingReply; + if (reply) { + targetThread->m_pendingReply = reply->next; + targetThread->m_pendingReplyIsRoot = FALSE; + } + BND_UNLOCK(targetThread->m_lock); + + if (reply) { + BND_LOCK(that->m_lock); + reply->next = that->m_pendingReply; + that->m_pendingReply = reply; + that->m_pendingReplyIsRoot = TRUE; + BND_UNLOCK(that->m_lock); + } + + // The retrieval of the wrapper thread structure caused us + // to take a reference on it. Now release the reference, + // removing the structure from our thread list if appropriate. + forget_thread(targetThread); + } + + if (err != 0 && that->m_pendingReply) { + /* If an error occurred but the pendingReply has + also been given, then our semaphore has also been + released. We don't want to get out of sync. */ + DPRINTF(5, (KERN_WARNING "Thread %d: Re-acquire IO sem!\n", binder_thread_Thid(that))); + // Note: targetThread -is- the correct one to use here, that + // is the one we blocked on. + binder_thread_AcquireIOSem(targetThread); + } + + DPRINTF(0, (KERN_WARNING "%s: Returning: pendingReply=%p, err=%d\n", __func__, that->m_pendingReply, err)); + return that->m_pendingReply ? 0 : err; +} + +int +binder_thread_Read(binder_thread_t *that, void *buffer, int size, signed long *consumed) +{ + int origRemain; + status_t err = 0; + bool isRoot; + bool isInline; + /* ditch these next two lines under linux, if we can */ + pid_t me = current->pid; + + binder_transaction_t *t,*replyTo; + iobuffer_t io; + bool acquired = FALSE; + + if (me != that->m_thid) return -EINVAL; + + DPRINTF(0, (KERN_WARNING "binder_thread_Read: %08lx (%p:%d)\n", (unsigned long)that, that->m_team, that->m_thid)); + iobuffer_init(&io, (unsigned long)buffer, size, *consumed); + + /* + * Write brNOOP, but don't mark it consumed. We'll replace the brNOOP with + * a brSPAWN_LOOPER if we need to spawn a thread. + * Only do this once, in case the system call gets restarted for some reason. + */ + if (*consumed == 0) iobuffer_write_u32(&io, brNOOP); + + /* Read as much data as possible, until we either have to block + or have filled the buffer. */ + + while (iobuffer_remaining(&io) > 8) { + if (!binder_proc_IsAlive(that->m_team)) { + /* If the team is dead, write a command to say so and exit + right now. Do not pass go, do not collect $200. */ + DPRINTF(0, (KERN_WARNING " binder_proc_IsAlive(%08x): false\n", (unsigned int)that->m_team)); + iobuffer_write_u32(&io, brFINISHED); + iobuffer_mark_consumed(&io); + err = -ECONNREFUSED; + goto finished; + } else if (that->m_shortAttemptAcquire) { + /* Return the result of a short-circuited attempt acquire. */ + DPRINTF(0, (KERN_WARNING "Thread %d already has reply!\n", that->m_thid)); + that->m_shortAttemptAcquire = FALSE; + iobuffer_write_u32(&io, brACQUIRE_RESULT); + iobuffer_write_u32(&io, that->m_resultAttemptAcquire); + iobuffer_mark_consumed(&io); + continue; + } else if (that->m_failedRootReceive) { + // XXX Would be nice to return a little more informative + // error message. + that->m_failedRootReceive = FALSE; + iobuffer_write_u32(&io, brDEAD_REPLY); + goto finished; + } + + /* Look for a queued transaction. */ + BND_LOCK(that->m_lock); + if ((t=that->m_pendingRefResolution) != NULL) { + if (iobuffer_consumed(&io) > 0 && (binder_transaction_MaxIOToNodes(t)+4) > iobuffer_remaining(&io)) { + /* If there is already data in the buffer, and may not be enough + room for what this transaction could generate, then stop now. */ + DPRINTF(0, (KERN_WARNING "Aborting ConvertToNodes: consumed=%d, max=%d, remain=%d\n", iobuffer_consumed(&io), binder_transaction_MaxIOToNodes(t)+4, iobuffer_remaining(&io))); + BND_UNLOCK(that->m_lock); + goto finished; + } + that->m_pendingRefResolution = t->next; + } + BND_UNLOCK(that->m_lock); + + /* If a transaction was found, twiddle it and send it off. */ + if (t != NULL && (acquired || (acquired=BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that)))) { + + DPRINTF(5, (KERN_WARNING "Thread %d has pending transaction %p\n", that->m_thid, t)); + + isRoot = (binder_transaction_IsRootObject(t)); + + /* Perform node conversion if not already done. */ + if (!binder_transaction_IsReferenced(t)) { + binder_proc_t *proc = NULL; + int acquiredProc = 0; + + DBREAD((KERN_WARNING "Thread %d performing ref resolution!\n", that->m_thid)); + origRemain = iobuffer_remaining(&io); + err = 0; + if (isRoot) { + // If we are replying with the root object, we first need to block + // until our parent has set us up to have somewhere to reply to. + err = binder_thread_WaitForParent(that); + + BND_LOCK(that->m_lock); + that->m_failedRootReply = that->m_pendingReply == NULL; + if (that->m_failedRootReply) err = -EINVAL; + BND_UNLOCK(that->m_lock); + } + /* + * The moment of truth. In order to convert nodes, we have to + * copy the data. In order to copy the data, we need to know + * the recipient of the transaction. If the transaction has a + * target, the target's team becomes the recipient. If the + * transaction carries a reply, use the pending reply's sending + * team. + */ + if (err == 0) { + if (t->target) { + proc = binder_node_AcquireHome(t->target, that); + acquiredProc = proc != NULL; + } else { + proc = that->m_pendingReply ? binder_thread_Team(that->m_pendingReply->sender) : NULL; + } + err = proc ? 0 : -EINVAL; + } + if (!proc) { + DPRINTF(0, (KERN_WARNING "*#*#*# NO TARGET PROCESS FOR binder_transaction_CopyTransactionData #*#*#*\n")); + DPRINTF(0, (KERN_WARNING "t->target: %p, that->m_pendingReply: %p, m_pendingReply->sender: %p\n", t->target, that->m_pendingReply, that->m_pendingReply ? that->m_pendingReply->sender : NULL)); + } + if (err == 0) + err = binder_transaction_CopyTransactionData(t, proc); + if (err == 0) + err = binder_transaction_ConvertToNodes(t, that->m_team, &io); + /* If we got some error, report error to the caller so they don't wait forever. */ + if (err < 0 && !binder_transaction_IsReply(t)) { + if(proc && binder_proc_IsAlive(proc)) + iobuffer_write_u32(&io, brFAILED_REPLY); + else + iobuffer_write_u32(&io, brDEAD_REPLY); + } + iobuffer_mark_consumed(&io); + + if (acquiredProc) { + BND_RELEASE(binder_proc, proc, STRONG, that); + } + + if (err < 0 || iobuffer_remaining(&io) < 4) { + /* XXX Fail if we run out of room. Do we need to deal with this + better. (It's only a problem if the caller is trying to read in + to a buffer that isn't big enough, in total, for a returned + transaction. */ + DPRINTF(0, (KERN_WARNING "Aborting transaction: err: %08x (or not enough room to return last command)\n", err)); + err = 0; + if(!binder_transaction_IsReply(t)) { + binder_transaction_Destroy(t); + goto finished; + } + binder_transaction_SetFailedReply(t, TRUE); + } + + /* If we aren't sending anything back to the caller, we can + deliver this transaction right away. Otherwise, we must + wait for the caller to process the returned data. This + is due to a race condition between the receiver releasing + its references and the caller acquiring any new references + returned by the driver. */ + if (origRemain != iobuffer_remaining(&io)) { + DBREAD((KERN_WARNING "Transaction acquired references! Keeping.\n")); + BND_LOCK(that->m_lock); + t->next = that->m_pendingRefResolution; + that->m_pendingRefResolution = t; + BND_UNLOCK(that->m_lock); + t = NULL; + } + } +#if 0 + // FFB's broken debug code + else { + DPRINTF(0, (KERN_WARNING "binder_transaction_IsReferenced(%p) true -- sender: %d (vthid: %d)\n", t, t->sender->m_thid, t->sender->virtualThid)); + } +#endif + + /* Send this transaction off to its target. */ + if (t != NULL) { + DBREAD((KERN_WARNING "Thread %d delivering transaction!\n", that->m_thid)); + isInline = binder_transaction_IsInline(t); + if (binder_transaction_IsAnyReply(t)) { + BND_LOCK(that->m_lock); + + replyTo = that->m_pendingReply; + if (replyTo) { + that->m_pendingReply = replyTo->next; + if (!that->m_pendingReply) { + that->virtualThid = 0; + DPRINTF(5, (KERN_WARNING "virtualThid reset to 0, m_waitForReply: %d\n", that->m_waitForReply)); + } else { + DPRINTF(5, (KERN_WARNING "virtualThid: %d, m_pendingReply: %p, m_waitForReply: %d\n", that->virtualThid, that->m_pendingReply, that->m_waitForReply)); + } + BND_UNLOCK(that->m_lock); + + /* If this is a successful bcATTEMPT_ACQUIRE, then take + care of reference counts now. + */ + if (binder_transaction_IsAcquireReply(t) && (*(int*)t->data != 0)) { + binder_proc_ForceRefNode(binder_thread_Team(replyTo->sender), replyTo->target, &io); + } + + if (binder_transaction_IsRootObject(replyTo)) { + BND_ASSERT(binder_transaction_IsRootObject(t), "EXPECTING ROOT REPLY!"); + } else if (binder_transaction_RefFlags(replyTo)&tfAttemptAcquire) { + BND_ASSERT(binder_transaction_IsAcquireReply(t), "EXPECTING ACQUIRE REPLY!"); + } else { + BND_ASSERT(!binder_transaction_IsRootObject(t) && !binder_transaction_IsAcquireReply(t), "EXPECTING REGULAR REPLY!"); + } + + DBTRANSACT((KERN_WARNING "*** Thread %d is replying to %p with %p! wait=%d\n", + that->m_thid, replyTo, t, that->m_waitForReply)); + binder_thread_Reply(replyTo->sender, t); + if (binder_transaction_IsInline(replyTo) || binder_transaction_IsRootObject(replyTo)) { + binder_transaction_Destroy(replyTo); + } else { + DPRINTF(0, (KERN_WARNING "binder_thread: finish reply request %p\n", replyTo)); + if (binder_transaction_IsFreePending(replyTo)) { + binder_transaction_Destroy(replyTo); + } else { + binder_proc_AddToNeedFreeList(that->m_team, replyTo); + } + } + } else { + BND_UNLOCK(that->m_lock); + DPRINTF(1, (KERN_WARNING "********** Nowhere for reply to go!!!!!!!!!!!\n")); +#if 0 + BND_ASSERT(binder_transaction_IsRootObject(t) || !binder_proc_IsAlive(that->m_team), "Unexpected reply!"); + if (binder_transaction_IsRootObject(t)) binder_proc_CaptureRootObject(t); + else { + binder_transaction_Destroy(t); + } +#endif + } + } else { + t->sender = that; + BND_ACQUIRE(binder_thread, that, WEAK, t); + that->m_waitForReply++; + DPRINTF(2, (KERN_WARNING "*** Thread %d going to wait for reply to %p! now wait=%d\n", that->m_thid, t, that->m_waitForReply)); + if (t->target) binder_node_Send(t->target, t); + else { + binder_thread_ReplyDead(that); + binder_transaction_Destroy(t); + } + } + if (!isInline) iobuffer_write_u32(&io, brTRANSACTION_COMPLETE); + iobuffer_mark_consumed(&io); + } + + /* Got a transaction but team is going away. Toss it. */ + } else if (t != NULL) { + DPRINTF(0, (KERN_WARNING "Transaction sent to dying team, thread %d.\n", that->m_thid)); + binder_transaction_DestroyNoRefs(t); + + /* If there is data available, return it now instead of + waiting for the next transaction. */ + } else if (iobuffer_consumed(&io) > 0) { + DPRINTF(2, (KERN_WARNING "Thread %d has %d bytes of data to return, won't wait for transaction.\n", that->m_thid, iobuffer_consumed(&io))); + goto finished; + + /* No transaction, but maybe we are waiting for a reply back? */ + } else if (that->m_waitForReply) { + DPRINTF(2, (KERN_WARNING "Thread %d waiting for reply!\n", that->m_thid)); + if ((sizeof(binder_transaction_data_t)+8) > iobuffer_remaining(&io)) { + /* If there isn't enough room in the buffer to return a transaction, + then stop now. */ + DPRINTF(0, (KERN_WARNING "Aborting read: Not enough room to return reply\n")); + goto finished; + } + err = binder_thread_WaitForReply(that, &io); + if (err == -ENOBUFS) err = 0; + goto finished; + + /* We're all out. Just wait for something else to do. */ + } else { + DPRINTF(2, (KERN_WARNING "Thread %d waiting for request, vthid: %d!\n", that->m_thid, that->virtualThid)); + BND_ASSERT(that->virtualThid == 0, "Waiting for transaction with vthid != 0"); + BND_ASSERT(that->m_pendingReply == NULL, "Waiting for transaction with pending reply"); + + if (that->m_teamRefs > 0) { + int relCount; + BND_LOCK(that->m_lock); + relCount = that->m_teamRefs; + that->m_teamRefs = 0; + BND_UNLOCK(that->m_lock); + DPRINTF(3, (KERN_WARNING "Unlocking proc %08x %d times\n", (unsigned int)that->m_team, relCount)); + + while (relCount) { + BND_RELEASE(binder_proc, that->m_team, STRONG, that); + relCount--; + } + } + + err = binder_thread_WaitForRequest(that, &io); + if (err == -ERESTARTSYS) { + goto finished; + } else if (err == -EINTR) { + goto finished; + } else if (err == -ECONNREFUSED) { + goto finished; + } else if (err == -ENOBUFS) { + err = 0; + goto finished; + } else if (err == REQUEST_EVENT_READY) { + iobuffer_write_u32(&io, brEVENT_OCCURRED); + iobuffer_write_u32(&io, that->returnedEventPriority); + iobuffer_mark_consumed(&io); + err = 0; + } else if (err == DEATH_NOTIFICATION_READY) { + binder_proc_GetPendingDeathNotifications(that->m_team, that, &io); + iobuffer_mark_consumed(&io); + err = 0; + } else if (err == -ETIMEDOUT) { + if (that->m_isLooping) { + if ((acquired=BND_ATTEMPT_ACQUIRE(binder_proc, that->m_team, STRONG, that))) + binder_proc_FinishLooper(that->m_team, that->m_isSpawned); + that->m_isLooping = FALSE; + } + if (that->m_isSpawned) iobuffer_write_u32(&io, brFINISHED); + else iobuffer_write_u32(&io, brOK); + iobuffer_mark_consumed(&io); + err = 0; + } + /* + else if (err == B_BAD_SEM_ID) { + iobuffer_write_u32(&io, brFINISHED); + iobuffer_mark_consumed(&io); + } + */ + else if (err < 0) { + iobuffer_write_u32(&io, brERROR); + iobuffer_write_u32(&io, err); + iobuffer_mark_consumed(&io); + err = 0; + goto finished; + } + } + } + +finished: + if (acquired) BND_RELEASE(binder_proc, that->m_team, STRONG, that); + + // Return number of bytes available, or the last error code + // if there are none. (This is so we can return -EINTR.) + *consumed = iobuffer_consumed(&io); + + if (err != -ERESTARTSYS) { + if (test_and_clear_bit(DO_SPAWN_BIT, &that->m_team->m_noop_spawner)) { + DBSPAWN((KERN_WARNING "Asking %p:%d to brSPAWN_LOOPER\n", that->m_team, that->m_thid)); + // make the brNOOP into a brSPAWN_LOOPER + // *(u32*)buffer = brSPAWN_LOOPER; + // We call the unchecked __put_user() here because the constructor + // for iobuffer already called access_ok(). + __put_user(brSPAWN_LOOPER, (u32*)buffer); + if (iobuffer_consumed(&io) < sizeof(u32)) { + iobuffer_mark_consumed(&io); + *consumed = iobuffer_consumed(&io); + } + } + } + return err; +} + +status_t +binder_thread_Snooze(binder_thread_t *that, bigtime_t timeout) +{ + status_t res = 0; + + DPRINTF(1, (KERN_WARNING "binder_thread_Snooze(%d, %lld)\n", that->m_thid, timeout)); + /* + * I don't know if I got the semantics correct for this. + status_t res = acquire_sem_etc(that->m_ioSem,1,B_CAN_INTERRUPT|B_ABSOLUTE_TIMEOUT,timeout); + */ + + if(signal_pending(current)) { + DPRINTF(1, (KERN_WARNING "binder_thread_Snooze(%d, %lld) signal pending -- ABORT\n", that->m_thid, timeout)); + return -ERESTARTSYS; + } + + timeout -= get_jiffies_64(); + DPRINTF(1, (KERN_WARNING "binder_thread_Snooze(%d, relative %lld)\n", that->m_thid, timeout)); + if (timeout > 0) { +#if 1 + bigtime_t check = timeout; + do_div(check, HZ); + if (check > 10) { + DPRINTF(0, (KERN_WARNING "%s: timeout exceeds 10 seconds at %Ld sec\n", __func__, check)); + return -ETIMEDOUT; + } +#endif + DPRINTF(5, (KERN_WARNING "%s: m_wake_count: %d\n", __func__, atomic_read(&that->m_wake_count))); + res = wait_event_interruptible_timeout(that->m_wait, atomic_read(&that->m_wake_count) > 0, timeout); + if(res > 0) + atomic_dec(&that->m_wake_count); + } + else { + /* Makes system lock up due to busy wait + * bug temporary + * when not using unlocked ioctl + */ + static unsigned int last_yield = 0; + unsigned int now = jiffies; + if ((now - last_yield) > 5*HZ) { + last_yield = now; + //printk(KERN_WARNING "binder_thread_Snooze(%d, %lld) yield wakeup_time thread %lld, team %lld, this %p, team->waitStack %p, team->state %x\n", + // that->m_thid, timeout, that->wakeupTime, that->m_team->m_wakeupTime, that, that->m_team->m_waitStack, that->m_team->m_state); + yield(); + } + } + + //ddprintf("Result of snooze in thread %ld: 0x%08lx\n", that->m_thid, res); + if (res == 0) // timed out + res = -ETIMEDOUT; + else if (res > 0) // acquired, reports time remaining + res = 0; + return res; +} + +status_t +binder_thread_AcquireIOSem(binder_thread_t *that) +{ + int err; + DPRINTF(0, (KERN_WARNING "binder_thread_AcquireIOSem(%d)\n", that->m_thid)); + // while (acquire_sem_etc(that->m_ioSem,1,B_TIMEOUT,0) == -EINTR) ; + //wait_event(that->m_wait, that->m_wake_count > 0); + err = wait_event_interruptible(that->m_wait, atomic_read(&that->m_wake_count) > 0); // this should probably not be interruptible, but it allows us to kill the thread + if(err == 0) + atomic_dec(&that->m_wake_count); + return err; +} + +void +binder_thread_Wakeup(binder_thread_t *that) +{ + DIPRINTF(0, (KERN_WARNING "binder_thread_Wakeup(%d)\n", that->m_thid)); + // We use B_DO_NOT_RESCHEDULE here because Wakeup() is usually called + // while the binder_proc_t is locked. If the thread is a real-time + // priority, waking up here will cause pinging between this thread + // and its caller. (We wake up, block on the binder_proc_t, the caller + // continues and unlocks, then we continue.) + // release_sem_etc(that->m_ioSem, 1, B_DO_NOT_RESCHEDULE); + // FIXME: this may not have the do-not-reschedule semantics we want (wake_up_interruptible_sync may work for this) + atomic_add(1, &that->m_wake_count); + wake_up(&that->m_wait); + //wake_up_interruptible_sync(&that->m_wait); +} + +void +binder_thread_Reply(binder_thread_t *that, binder_transaction_t *t) +{ + DBTRANSACT((KERN_WARNING "*** Thread %d (vthid %d) sending to %d (vthid %d)! wait=%d, isReply=%d, isAcquireReply=%d\n", + current->pid, t->sender ? t->sender->virtualThid : -1, + that->m_thid, that->virtualThid, that->m_waitForReply, binder_transaction_IsReply(t), binder_transaction_IsAcquireReply(t))); + BND_LOCK(that->m_lock); + if (that->m_team && binder_proc_IsAlive(that->m_team)) { + // BND_VALIDATE(that->m_reply == NULL, "Already have reply!", ddprintf("Current reply: %p, new reply: %p\n", that->m_reply, t)); + BND_ASSERT(that->m_waitForReply > 0, "Not waiting for a reply!"); + t->next = that->m_reply; + that->m_reply = t; + } else { + BND_ASSERT(t != NULL, "binder_thread_Reply() called with NULL transaction!"); + if (t) binder_transaction_Destroy(t); + } + BND_UNLOCK(that->m_lock); + atomic_add(1, &that->m_wake_count); + wake_up(&that->m_wait); +} + +void +binder_thread_ReplyDead(binder_thread_t *that) +{ + binder_transaction_t* t = binder_transaction_CreateEmpty(); + binder_transaction_SetDeadReply(t, TRUE); + binder_thread_Reply(that, t); +} + +BND_IMPLEMENT_ACQUIRE_RELEASE(binder_thread); +BND_IMPLEMENT_ATTEMPT_ACQUIRE(binder_thread); + + diff -Nru linux-2.6.23/drivers/binder/binder_thread.h kernel.android/drivers/binder/binder_thread.h --- linux-2.6.23/drivers/binder/binder_thread.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_thread.h 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,153 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_THREAD_H +#define BINDER_THREAD_H + +#include "binder_defs.h" +#include + +typedef struct binder_thread { + /* These are protected by binder.c's global lock. */ + struct hlist_node node; + bool attachedToThread; /* Expecting a BINDER_THREAD_EXIT. */ + + /* These are managed by binder_proc_t. Nothing else should + touch them. */ + struct binder_thread * next; /* List of all threads */ + struct list_head waitStackEntry; + struct binder_thread * pendingChild; /* Child for bcREQUEST_ROOT_OBJECT */ + struct binder_transaction * nextRequest; /* Return request to waiting thread */ + enum { + WAKE_REASON_NONE = 0, + WAKE_REASON_IDLE, + WAKE_REASON_PROCESS_DEATH + } wakeReason; + + /* Stupid hack. */ + int returnedEventPriority; + + pid_t virtualThid; /* The thid for the transaction thread group */ + atomic_t m_primaryRefs; + atomic_t m_secondaryRefs; + status_t m_err; + pid_t m_thid; + wait_queue_head_t m_wait; + atomic_t m_wake_count; + int m_waitForReply; + int m_consume; + + struct semaphore m_lock; + struct binder_proc * m_team; // the team we belong to + struct binder_transaction * m_reply; + struct binder_transaction * m_pendingReply; + struct binder_transaction * m_pendingRefResolution; + + /* This is the number of primary references on our team + that must be removed when we continue looping. It is + used to keep the team around while processing final + brRELEASE and brDECREFS commands on objects inside it. */ + int m_teamRefs; + + /* Did the driver spawn this thread? */ + bool m_isSpawned : 1; + + /* Is this thread running as a looper? */ + bool m_isLooping : 1; + + /* For driver spawned threads: first time looping? */ + bool m_firstLoop : 1; + + /* Set if thread has determined an immediate reply for a + bcATTEMPT_ACQUIRE. In this case, 'short' is true and + 'result' is whether it succeeded. */ + bool m_shortAttemptAcquire : 1; + bool m_resultAttemptAcquire : 1; + + /* Set if this thread structure has been initialized to + reply with a root object to its parent thread. */ + bool m_pendingReplyIsRoot : 1; + + /*! Set if this thread had an error when trying to + receive a child's root reply, to return the result + at the next Read(). */ + bool m_failedRootReceive : 1; + + /* Set if this thread tried to send a root object, but + timed out. */ + bool m_failedRootReply : 1; +} binder_thread_t; + +int binder_thread_GlobalCount(void); + +binder_thread_t * binder_thread_init(pid_t thid, struct binder_proc *team); +void binder_thread_destroy(binder_thread_t *that); + +void binder_thread_Released(binder_thread_t *that); + +void binder_thread_Die(binder_thread_t *that); + +BND_DECLARE_ACQUIRE_RELEASE(binder_thread); +BND_DECLARE_ATTEMPT_ACQUIRE(binder_thread); + +/* Attach parent thread to this thread. The child is set up as if it had + received a transaction, and the first thing it should do is send a reply + that will go back to the parent. This is for bcRETRIEVE_ROOT_OBJECT. */ +bool binder_thread_SetParentThread(binder_thread_t *that, binder_thread_t *replyTo); + +/* Clear the pendingChild field when we have received the reply. */ +void binder_thread_ReleasePendingChild(binder_thread_t *that); + +/* When binder_thread_SetParentThread() is used to wait for the child thread + to send its root object, we can create a binder_thread structure that is + not attached to a binder_proc. This function is called when the child + thread finally gets into the driver, to get its pre-created thread + structure attached to its new process structure. */ +void binder_thread_AttachProcess(binder_thread_t *that, struct binder_proc *team); + +/* Calls from binder_proc_t to block until new requests arrive */ +status_t binder_thread_Snooze(binder_thread_t *that, bigtime_t wakeupTime); +status_t binder_thread_AcquireIOSem(binder_thread_t *that); +void binder_thread_Wakeup(binder_thread_t *that); + +/* Returning transactions -- reflections and the final reply */ +void binder_thread_Reply(binder_thread_t *that, struct binder_transaction *t); +void binder_thread_Reflect(binder_thread_t *that, struct binder_transaction *t); + +/* Reply that the target is no longer with us. */ +void binder_thread_ReplyDead(binder_thread_t *that); + +bool binder_thread_AttemptExecution(binder_thread_t *that, struct binder_transaction *t); +void binder_thread_FinishAsync(binder_thread_t *that, struct binder_transaction *t); +void binder_thread_Sync(binder_thread_t *that); + +#define binder_thread_Thid(that) ((that)->m_thid) +#define binder_thread_Team(that) ((that)->m_team) + +#define binder_thread_VirtualThid(that) ((that)->virtualThid) + +#define binder_thread_PrimaryRefCount(that) atomic_read(&(that)->m_primaryRefs) +#define binder_thread_SecondaryRefCount(that) atomic_read(&(that)->m_secondaryRefs) + +int binder_thread_Control(binder_thread_t *that, unsigned int cmd, void *buffer); +int binder_thread_Write(binder_thread_t *that, void *buffer, int size, signed long *consumed); +int binder_thread_Read(binder_thread_t *that, void *buffer, int size, signed long *consumed); + +#define binder_thread_Reflect(that, t) binder_thread_Reply(that, t) + +#endif // BINDER_THREAD_H diff -Nru linux-2.6.23/drivers/binder/binder_transaction.c kernel.android/drivers/binder/binder_transaction.c --- linux-2.6.23/drivers/binder/binder_transaction.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_transaction.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,541 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "binder_defs.h" +#include "binder_transaction.h" +#include "binder_proc.h" +#include "binder_thread.h" +#include "binder_node.h" +#include +#include +#include + +static int binder_transaction_print_size = 32 * 1024; +static int binder_transaction_fail_size = 16 * 1024 * 1024; +module_param_named(warn_transaction_size, binder_transaction_print_size, int, 0644); +module_param_named(max_transaction_size, binder_transaction_fail_size, int, 0644); + +#define PURGATORY 0 +#if PURGATORY +static DECLARE_MUTEX(sem); +static binder_transaction_t* head = NULL; +static binder_transaction_t** tail = &head; +static int count = 0; + +static void my_free_trans(binder_transaction_t *t) +{ + down(&sem); + *tail = t; + tail = &t->next; + count++; + if (count > 20) { + t = head; + head = head->next; + kmem_cache_free(transaction_cache, t); + count--; + } + up(&sem); +} +#define ALLOC_TRANS kmem_cache_alloc(transaction_cache, GFP_KERNEL) +#define FREE_TRANS(x) my_free_trans(x) +#else +#define ALLOC_TRANS kmem_cache_alloc(transaction_cache, GFP_KERNEL) +#define FREE_TRANS(x) kmem_cache_free(transaction_cache, x) +#endif + +void binder_transaction_dtor(binder_transaction_t *that); + +void binder_transaction_Init(binder_transaction_t *that); +void binder_transaction_debug_dump(binder_transaction_t *that); + +status_t +binder_transaction_ConvertToNodes(binder_transaction_t *that, binder_proc_t *from, iobuffer_t *io) +{ + DPRINTF(4, (KERN_WARNING "%s(%p, %p, %p)\n", __func__, that, from, io)); + if (binder_transaction_RefFlags(that)) return 0; + + if (that->team != from) { + BND_ACQUIRE(binder_proc, from, WEAK, that); + if (that->team) BND_RELEASE(binder_proc, that->team, WEAK, that); + that->team = from; + } + + if (that->offsets_size > 0) { + u8 *ptr = binder_transaction_Data(that); + const size_t *off = binder_transaction_Offsets(that); //(const size_t*)(ptr + INT_ALIGN(that->data_size)); + const size_t *offEnd = off + (that->offsets_size/sizeof(size_t)); + struct flat_binder_object* flat; + + // This function is called before any references have been acquired. + BND_ASSERT((that->flags&tfReferenced) == 0, "ConvertToNodes() already called!"); + that->flags |= tfReferenced; + + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserOffsets(that) + + binder_transaction_OffsetsSize(that) ); + + while (off < offEnd) { + bool strong = TRUE; + BND_ASSERT(*off <= (that->data_size-sizeof(struct flat_binder_object)), "!!! ConvertToNodes: type code pointer out of range."); + flat = (struct flat_binder_object*)(ptr + *off++); + switch (flat->type) { + case kPackedLargeBinderHandleType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_HANDLE_TYPE %ld\n",flat->handle)); + // Retrieve node and acquire reference. + flat->node = binder_proc_Descriptor2Node(from, flat->handle,that, STRONG); + break; + case kPackedLargeBinderType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_TYPE %p\n",flat->binder)); + // Lookup node and acquire reference. + if (binder_proc_Ptr2Node(from, flat->binder,flat->cookie,&flat->node,io,that, STRONG) != 0) return -EINVAL; + if (binder_transaction_IsRootObject(that)) { + DPRINTF(5,(KERN_WARNING "Making node %p a root node\n", flat->node)); + binder_proc_SetRootObject(from, flat->node); + } + break; + case kPackedLargeBinderWeakHandleType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_WEAK_HANDLE_TYPE %ld\n",flat->handle)); + // Retrieve node and acquire reference. + flat->node = binder_proc_Descriptor2Node(from, flat->handle,that,WEAK); + strong = FALSE; + break; + case kPackedLargeBinderWeakType: + DPRINTF(5,(KERN_WARNING "ConvertToNodes B_BINDER_WEAK_TYPE %p\n",flat->binder)); + // Lookup node and acquire reference. + if (binder_proc_Ptr2Node(from, flat->binder,flat->cookie,&flat->node,io,that,WEAK) != 0) return -EINVAL; + strong = FALSE; + break; + default: + BND_ASSERT(FALSE, "Bad binder offset given to transaction!"); + DPRINTF(0, (KERN_WARNING "ConvertToNodes: unknown typecode %08lx, off: %p, offEnd: %p\n", flat->type, off, offEnd)); + BND_FLUSH_CACHE(ptr, offEnd); + return -EINVAL; + } + flat->type = strong ? kPackedLargeBinderNodeType : kPackedLargeBinderWeakNodeType; + } + BND_FLUSH_CACHE(ptr, offEnd); + } + + return 0; +} + +status_t +binder_transaction_ConvertFromNodes(binder_transaction_t *that, binder_proc_t *to) +{ + u8 *ptr; + size_t *off; + size_t *offEnd; + DPRINTF(4, (KERN_WARNING "%s(%p, %p)\n", __func__, that, to)); + if (binder_transaction_RefFlags(that)) return 0; + + if (that->team != to) { + BND_ACQUIRE(binder_proc, to, WEAK, that); + if (that->team) BND_RELEASE(binder_proc, that->team, WEAK, that); + that->team = to; + } + + if (that->offsets_size > 0) { + // This function is called after references have been acquired. + BND_ASSERT((that->flags&tfReferenced) != 0, "ConvertToNodes() not called!"); + + ptr = binder_transaction_Data(that); + off = binder_transaction_Offsets(that); //(const size_t*)(ptr + INT_ALIGN(that->data_size)); + offEnd = off + (that->offsets_size/sizeof(size_t)); + struct flat_binder_object* flat; + + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserOffsets(that) + + binder_transaction_OffsetsSize(that) ); + while (off < offEnd) { + flat = (struct flat_binder_object*)(ptr + *off++); + binder_node_t *n = flat->node; + if (flat->type == kPackedLargeBinderNodeType) { + if (!n) { + flat->type = kPackedLargeBinderType; + flat->binder = NULL; + flat->cookie = NULL; + } else if (n->m_home == to) { + flat->type = kPackedLargeBinderType; + flat->binder = binder_node_Ptr(n); + flat->cookie = binder_node_Cookie(n); + // Keep a reference on the node so that it doesn't + // go away until this transaction completes. + } else { + flat->type = kPackedLargeBinderHandleType; + flat->handle = binder_proc_Node2Descriptor(to, n, TRUE, STRONG); + flat->cookie = NULL; + // We now have a reference on the node through the + // target team's descriptor, so remove our own ref. + BND_RELEASE(binder_node, n, STRONG, that); + } + } else if (flat->type == kPackedLargeBinderWeakNodeType) { + if (!n) { + flat->type = kPackedLargeBinderWeakType; + flat->binder = NULL; + flat->cookie = NULL; + } else if (n->m_home == to) { + flat->type = kPackedLargeBinderWeakType; + flat->binder = binder_node_Ptr(n); + flat->cookie = binder_node_Cookie(n); + // Keep a reference on the node so that it doesn't + // go away until this transaction completes. + } else { + flat->type = kPackedLargeBinderWeakHandleType; + flat->handle = binder_proc_Node2Descriptor(to, n, TRUE, WEAK); + flat->cookie = NULL; + // We now have a reference on the node through the + // target team's descriptor, so remove our own ref. + BND_RELEASE(binder_node, n, WEAK, that); + } + } else { + BND_ASSERT(FALSE, "Bad binder offset given to transaction!"); + DPRINTF(0, (KERN_WARNING "ConvertToNodes: unknown typecode %08lx, off: %p, offEnd: %p\n", flat->type, off, offEnd)); + BND_FLUSH_CACHE(ptr, offEnd); + return -EINVAL; + } + } + BND_FLUSH_CACHE(ptr, offEnd); + } + + return 0; +} + +void +binder_transaction_ReleaseTarget(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that->sender) { + DPRINTF(5, (KERN_WARNING "%s(%p) release sender %p\n", __func__, that, that->sender)); + BND_RELEASE(binder_thread, that->sender, WEAK, that); + that->sender = NULL; + } + if (that->receiver) { + DPRINTF(5, (KERN_WARNING "%s(%p) release receiver %p\n", __func__, that, that->receiver)); + BND_RELEASE(binder_thread, that->receiver, WEAK, that); + that->receiver = NULL; + } + + if (that->target) { + DPRINTF(5, (KERN_WARNING "%s(%p) release target %p\n", __func__, that, that->target)); + BND_RELEASE(binder_node, that->target, binder_transaction_RefFlags(that) == tfAttemptAcquire ? WEAK : STRONG,that); + that->target = NULL; + } + DPRINTF(4, (KERN_WARNING "%s(%p) fini\n", __func__, that)); +} + +void +binder_transaction_ReleaseTeam(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p), team: %p\n", __func__, that, that->team)); + + if (that->team) { + BND_RELEASE(binder_proc, that->team, binder_transaction_RefFlags(that) ? STRONG : WEAK, that); + that->team = NULL; + } +} + +size_t +binder_transaction_MaxIOToNodes(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p): %d\n", __func__, that, (that->offsets_size/8)*16)); + // Each offsets entry is 4 bytes, and could result in 24 bytes + // being written. (To be more accurate, we could actually look + // at the offsets and only include the ones that are a + // B_BINDER_TYPE or B_BINDER_WEAK_TYPE.) + return (that->offsets_size/4)*24; +} + +binder_proc_t * +binder_transaction_TakeTeam(binder_transaction_t *that, binder_proc_t * me) +{ + binder_proc_t *ret; + DPRINTF(4, (KERN_WARNING "%s(%p, %p)\n", __func__, that, me)); + if (that->team != me || binder_transaction_RefFlags(that)) return NULL; + + ret = that->team; + that->team = NULL; + return ret; +} + +binder_transaction_t* +binder_transaction_CreateRef(u16 refFlags, void *ptr, void *cookie, binder_proc_t *team) +{ + binder_transaction_t* that = ALLOC_TRANS; + DPRINTF(4, (KERN_WARNING "%s(%04x, %p, %p): %p\n", __func__, refFlags, ptr, team, that)); + if (that) { + binder_transaction_Init(that); + BND_ASSERT((refFlags&(~tfRefTransaction)) == 0 && (refFlags&tfRefTransaction) != 0, + "Bad flags to binder_transaction::create_ref()"); + that->flags |= refFlags; + that->data_ptr = ptr; + that->offsets_ptr = cookie; + if (team) { + that->team = team; + BND_ACQUIRE(binder_proc, that->team, STRONG, that); + } + } + return that; +} + +binder_transaction_t* +binder_transaction_Create(u32 _code, size_t _dataSize, const void *_data, size_t _offsetsSize, const void *_offsetsData) +{ + binder_transaction_t* that = ALLOC_TRANS; + DPRINTF(4, (KERN_WARNING "%s(%08x, %u:%p, %u:%p): %p\n", __func__, _code, _dataSize, _data, _offsetsSize, _offsetsData, that)); + if (that) { + binder_transaction_Init(that); + that->code = _code; + BND_ASSERT(_dataSize == 0 || _data != NULL, "Transaction with dataSize > 0, but NULL data!"); + if (_dataSize && _data) { + that->data_size = _dataSize; + that->data_ptr = _data; + BND_ASSERT(_offsetsSize == 0 || _offsetsData != NULL, "Transaction with offsetsSize > 0, but NULL offsets!"); + if (_offsetsSize && _offsetsData) { + that->offsets_size = _offsetsSize; + that->offsets_ptr = _offsetsData; + } + } + } + return that; +} + +binder_transaction_t* binder_transaction_CreateEmpty(void) +{ + binder_transaction_t* that = ALLOC_TRANS; + DPRINTF(4, (KERN_WARNING "%s(void): %p\n", __func__, that)); + if (that) binder_transaction_Init(that); + return that; +} + +void binder_transaction_Destroy(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that) { + binder_transaction_dtor(that); + } +} + +void binder_transaction_DestroyNoRefs(binder_transaction_t *that) +{ + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that) { + that->offsets_size = 0; + binder_transaction_dtor(that); + } +} + +void binder_transaction_Init(binder_transaction_t *that) +{ + that->next = NULL; + that->target = NULL; + that->sender = NULL; + that->receiver = NULL; + + that->code = 0; + that->team = NULL; + that->flags = 0; + that->priority = B_NORMAL_PRIORITY; // FIXME? + that->data_size = 0; + that->offsets_size = 0; + that->data_ptr = NULL; + that->offsets_ptr = NULL; + + that->map = NULL; +} + +void +binder_transaction_dtor(binder_transaction_t *that) +{ + binder_proc_t *owner = NULL; + DPRINTF(4, (KERN_WARNING "%s(%p)\n", __func__, that)); + if (that->offsets_size > 0) { + DPRINTF(5, (KERN_WARNING " -- have binders to clean up\n")); + if(that->flags & tfReferenced) { + BND_ASSERT((that->map) != NULL, "binder_transaction_dtor that->map == NULL"); + } + else { + DPRINTF(0, (KERN_WARNING "ConvertToNodes() not called on %p! that->map == %p\n", that, that->map)); + BND_ASSERT((that->map) == NULL, "binder_transaction_dtor ConvertToNodes() not called and that->map != NULL"); + } + if (that->team && BND_ATTEMPT_ACQUIRE(binder_proc, that->team, STRONG, that)) owner = that->team; + + DPRINTF(5, (KERN_WARNING " -- that->map == %p\n", that->map)); + if(that->map != NULL) { // avoid crash due to corrupt transaction + u8 *ptr = 0; + const size_t *off; + const size_t *offEnd; + struct flat_binder_object* flat; + + ptr = binder_transaction_Data(that); + off = (const size_t*)binder_transaction_Offsets(that); + offEnd = off + (that->offsets_size/sizeof(size_t)); + + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserOffsets(that) + + binder_transaction_OffsetsSize(that) ); + while (off < offEnd) { + DPRINTF(9, (KERN_WARNING "type ptr: %p\n", ptr+*off)); + flat = (struct flat_binder_object*)(ptr + *off++); + DPRINTF(9, (KERN_WARNING " type: %08lx\n", flat->type)); + switch (flat->type) { + case kPackedLargeBinderHandleType: + DPRINTF(9, (KERN_WARNING "Delete binder_transaction B_BINDER_HANDLE_TYPE %ld\n",flat->handle)); + // Only call if there are primary references on the team. + // Otherwise, it has already removed all of its handles. + if (owner) binder_proc_UnrefDescriptor(owner, flat->handle, STRONG); + break; + case kPackedLargeBinderType: + // Only do this if there are primary references on the team. + // The team doesn't go away until all published binders are + // removed; after that, there are no references to remove. + if (owner) { + binder_node_t *n; + if (binder_proc_Ptr2Node(owner, flat->binder,flat->cookie,&n,NULL,that, STRONG) == 0) { + if (n) { + BND_RELEASE(binder_node, n, STRONG,that); // once for the grab we just did + BND_RELEASE(binder_node, n, STRONG,that); // and once for the reference this transaction holds + } + } else { + BND_ASSERT(FALSE, "Can't find node!"); + } + } + break; + case kPackedLargeBinderNodeType: + if (flat->node) BND_RELEASE(binder_node, flat->node, STRONG,that); + break; + case kPackedLargeBinderWeakHandleType: + DPRINTF(9, (KERN_WARNING "Delete binder_transaction B_BINDER_HANDLE_TYPE %ld\n",flat->handle)); + // Only call if there are primary references on the team. + // Otherwise, it has already removed all of its handles. + if (owner) binder_proc_UnrefDescriptor(owner, flat->handle, WEAK); + break; + case kPackedLargeBinderWeakType: + // Only do this if there are primary references on the team. + // The team doesn't go away until all published binders are + // removed; after that, there are no references to remove. + if (owner) { + binder_node_t *n; + if (binder_proc_Ptr2Node(owner, flat->binder,flat->cookie,&n,NULL,that,WEAK) == 0) { + if (n) { + BND_RELEASE(binder_node, n, WEAK,that); // once for the grab we just did + BND_RELEASE(binder_node, n, WEAK,that); // and once for the reference this transaction holds + } + } else { + BND_ASSERT(FALSE, "Can't find node!"); + } + } + break; + case kPackedLargeBinderWeakNodeType: + if (flat->node) BND_RELEASE(binder_node, flat->node, WEAK,that); + break; + } + } + BND_FLUSH_CACHE(ptr, offEnd); + } + } + + // release the RAM and address space in the receiver. + if (that->map) { + binder_proc_t* mapProc = that->map->team; + if (mapProc) { + binder_proc_FreeTransactionBuffer(mapProc, that->map); + BND_RELEASE(binder_proc, mapProc, WEAK, that); + } + else printk(KERN_WARNING "%s(%p) -- no team trying to release map %p\n", __func__, that, that->map); + } + + if (owner) BND_RELEASE(binder_proc, owner, STRONG,that); + + binder_transaction_ReleaseTeam(that); + binder_transaction_ReleaseTarget(that); + + // release the RAM + FREE_TRANS(that); +} + +/* We need the recipient team passed in because we can't always know the + * receiver at this point. */ +status_t +binder_transaction_CopyTransactionData(binder_transaction_t *that, binder_proc_t *recipient) +{ + status_t result = -EINVAL; + size_t tSize = INT_ALIGN(that->data_size) + INT_ALIGN(that->offsets_size); + DPRINTF(0, (KERN_WARNING "%s(%p, %p)\n", __func__, that, recipient)); + // Do we need to ensure that->map contains NULL? What do we do if it doesn't? + if(tSize >= binder_transaction_print_size) { + printk(KERN_WARNING "%s-%d: binder_transaction_CopyTransactionData size %d (%d,%d) to %p, reply=%d\n", + current->comm, current->pid, tSize, that->data_size, that->offsets_size, recipient, binder_transaction_IsReply(that)); + } + if (binder_transaction_IsAcquireReply(that)) { + // No data to copy + result = 0; + } else { + // if (tSize >= sizeof(that->data)) { + if(tSize >= binder_transaction_fail_size) { + printk(KERN_ERR "%s-%d: binder_transaction_CopyTransactionData transaction size too big, size %d (%d,%d) to %p\n", + current->comm, current->pid, tSize, that->data_size, that->offsets_size, recipient); + return result; + } + that->map = binder_proc_AllocateTransactionBuffer(recipient, tSize); + if (that->map) { + BND_ACQUIRE(binder_proc, that->map->team, WEAK, that); + + // locate our kernel-space address + u8 *to = page_address(that->map->page); + size_t not_copied; + // copy the data from user-land + BND_FLUSH_CACHE( binder_transaction_UserData(that), + binder_transaction_UserData(that) + tSize ); + not_copied = copy_from_user(to, that->data_ptr, that->data_size); + // and the offsets, too + if ((not_copied == 0) && (that->offsets_size != 0)) { + to += INT_ALIGN(that->data_size); + not_copied = copy_from_user(to, that->offsets_ptr, that->offsets_size); + if (not_copied) { + DPRINTF(0, (KERN_WARNING " -- failed to copy %u of %u bytes of offsets from %p to %p\n", not_copied, that->offsets_size, that->offsets_ptr, to)); + } + } else if (not_copied) { + // BUSTED! + DPRINTF(0, (KERN_WARNING " -- Couldn't copy %u of %u bytes from user-land %p to %p\n", not_copied, that->data_size, that->data_ptr, to)); + } + DPRINTF(4, ("Copied transaction %p: data=%p, size=%p, not_copied=%p\n", + that, binder_transaction_Data(that), + binder_transaction_DataSize(that), + not_copied)); + if (binder_transaction_DataSize(that) > 0) { + DPRINTF(4, ("Copied transaction %p: my_first=%p, user_first=%p\n", + that, + (*(u32*)binder_transaction_Data(that)), + (*(u32*)that->data_ptr))); + } + BND_FLUSH_CACHE( binder_transaction_Data(that), + binder_transaction_Data(that) + tSize ); + result = not_copied ? -EFAULT : 0; + } + else { + DPRINTF(0, (KERN_WARNING "binder_transaction_CopyTransactionData() failed to allocate transaction buffer in %p\n", recipient)); + } + // } else { + // // ignore inlined data for now + // printk(KERN_WARNING "Small transaction in binder_transaction_CopyTransactionData\n"); + // binder_transaction_SetInline(that, TRUE); + // } + } + return result; +} + diff -Nru linux-2.6.23/drivers/binder/binder_transaction.h kernel.android/drivers/binder/binder_transaction.h --- linux-2.6.23/drivers/binder/binder_transaction.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/binder_transaction.h 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,127 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef BINDER_TRANSACTION_H +#define BINDER_TRANSACTION_H + +#include "binder_defs.h" +#include "iobuffer.h" +#include // for page_address() + +enum { + tfUserFlags = 0x000F, + + tfIsReply = 0x0100, + tfIsEvent = 0x0200, + tfIsAcquireReply = 0x0400, + tfIsDeadReply = 0x0800, + tfIsFailedReply = 0x0020, + tfIsFreePending = 0x0040, + + tfAttemptAcquire = 0x1000, + tfRelease = 0x2000, + tfDecRefs = 0x3000, + tfRefTransaction = 0xF000, + + tfReferenced = 0x0080 +}; + +typedef struct binder_transaction { + struct binder_transaction * next; // next in the transaction queue + struct binder_node * target; // the receiving binder + struct binder_thread * sender; // the sending thread + struct binder_thread * receiver; // the receiving thread + + u32 code; + struct binder_proc * team; // do we need this? Won't sender or receiver's m_team do? + u16 flags; + s16 priority; + size_t data_size; + size_t offsets_size; + const void * data_ptr; + const void * offsets_ptr; + + // The pointer to the actual transaction data. The binder offsets appear + // at (mapped address + data_size). + struct range_map * map; + // 12 bytes of inlined transaction data: just enough for one binder (type, ptr/descriptor, offset) + u8 data[12]; +} binder_transaction_t; + +binder_transaction_t* binder_transaction_CreateRef(u16 refFlags, void *ptr, void *cookie, struct binder_proc* team /* = NULL */); +binder_transaction_t* binder_transaction_Create(u32 code, size_t dataSize, const void *data, size_t offsetsSize /* = 0 */, const void *offsetsData /* = NULL */); +binder_transaction_t* binder_transaction_CreateEmpty(void); +void binder_transaction_Destroy(binder_transaction_t *that); +/* Call this to destroy a transaction before you have called + ConvertToNodes() on it. This will avoid releasing references + on any nodes in the transaction, which you haven't yet acquired. */ +void binder_transaction_DestroyNoRefs(binder_transaction_t *that); +/* Converts from user-types to kernel-nodes */ +status_t binder_transaction_ConvertToNodes(binder_transaction_t *that, struct binder_proc *from, iobuffer_t *io); +/* Converts from kernel-nodes to user-types */ +status_t binder_transaction_ConvertFromNodes(binder_transaction_t *that, struct binder_proc *to); +void binder_transaction_ReleaseTarget(binder_transaction_t *that); +void binder_transaction_ReleaseTeam(binder_transaction_t *that); + +/* Return the maximum IO bytes that will be written by + ConvertToNodes(). */ +size_t binder_transaction_MaxIOToNodes(binder_transaction_t *that); + +/* If this transaction has a primary reference on its team, + return it and clear the pointer. You now own the reference. */ +struct binder_proc * binder_transaction_TakeTeam(binder_transaction_t *that, struct binder_proc *me); +status_t binder_transaction_CopyTransactionData(binder_transaction_t *that, struct binder_proc *recipient); + +#define INT_ALIGN(x) (((x)+sizeof(int)-1)&~(sizeof(int)-1)) +#define binder_transaction_Data(that) ((u8*)page_address((that)->map->page)) +#define binder_transaction_UserData(that) ((void*)((that)->map->start)) +#define binder_transaction_DataSize(that) ((that)->data_size) +#define binder_transaction_Offsets(that) ((size_t*)(binder_transaction_Data(that)+INT_ALIGN((that)->data_size))) +#define binder_transaction_UserOffsets(that) ((void*)((that)->map->start + INT_ALIGN((that)->data_size))) +#define binder_transaction_OffsetsSize(that) ((that)->offsets_size) + +#define binder_transaction_UserFlags(that) ((that)->flags & tfUserFlags) +#define binder_transaction_RefFlags(that) ((that)->flags & tfRefTransaction) +#define binder_transaction_IsInline(that) ((that)->flags & tfInline) +#define binder_transaction_IsRootObject(that) ((that)->flags & tfRootObject) +#define binder_transaction_IsReply(that) ((that)->flags & tfIsReply) +#define binder_transaction_IsEvent(that) ((that)->flags & tfIsEvent) +#define binder_transaction_IsAcquireReply(that) ((that)->flags & tfIsAcquireReply) +#define binder_transaction_IsDeadReply(that) ((that)->flags & tfIsDeadReply) +#define binder_transaction_IsFailedReply(that) ((that)->flags & tfIsFailedReply) +#define binder_transaction_IsAnyReply(that) ((that)->flags & (tfIsReply|tfIsAcquireReply|tfIsDeadReply)) +#define binder_transaction_IsFreePending(that) ((that)->flags & tfIsFreePending) +#define binder_transaction_IsReferenced(that) ((that)->flags & tfReferenced) + +#define binder_transaction_SetUserFlags(that, f) { (that)->flags = ((that)->flags&(~tfUserFlags)) | (f&tfUserFlags); } +#define binder_transaction_SetInline(that, f) { if (f) (that)->flags |= tfInline; else (that)->flags &= ~tfInline; } +#define binder_transaction_SetRootObject(that, f) { if (f) (that)->flags |= tfRootObject; else (that)->flags &= ~tfRootObject; } +#define binder_transaction_SetReply(that, f) { if (f) (that)->flags |= tfIsReply; else (that)->flags &= ~tfIsReply; } +#define binder_transaction_SetDeadReply(that, f) { if (f) (that)->flags |= tfIsDeadReply; else (that)->flags &= ~tfIsDeadReply; } +#define binder_transaction_SetFailedReply(that, f) { if (f) (that)->flags |= tfIsFailedReply; else (that)->flags &= ~tfIsFailedReply; } +#define binder_transaction_SetEvent(that, f) { if (f) (that)->flags |= tfIsEvent; else (that)->flags &= ~tfIsEvent; } +#define binder_transaction_SetAcquireReply(that, f) { if (f) (that)->flags |= tfIsAcquireReply; else (that)->flags &= ~tfIsAcquireReply; } +#define binder_transaction_SetFreePending(that, f) { if (f) (that)->flags |= tfIsFreePending; else (that)->flags &= ~tfIsFreePending; } + +#define binder_transaction_Code(that) ((that)->code) + +#define binder_transaction_Priority(that) ((that)->priority) +#define binder_transaction_SetPriority(that, pri) { (that)->priority = pri; } + + +#endif diff -Nru linux-2.6.23/drivers/binder/iobuffer.c kernel.android/drivers/binder/iobuffer.c --- linux-2.6.23/drivers/binder/iobuffer.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/iobuffer.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,112 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "iobuffer.h" +#include "binder_defs.h" +#include + +int iobuffer_init(iobuffer_t *that, unsigned long base, int size, int consumed) { + // require 4 byte alignment for base + if ((base & 0x3) != 0) printk(KERN_WARNING "iobuffer_init() bad buffer alignment\n"); + if ((base & 0x3) != 0) return -EFAULT; + if (!access_ok(VERIFY_WRITE, base, size)) printk(KERN_WARNING "access_ok(): FALSE\n"); + if (!access_ok(VERIFY_WRITE, base, size)) return -EFAULT; + DPRINTF(9, (KERN_WARNING "iobuffer_init(%p, %08lx, %d)\n", that, base, size)); + that->m_base = base; + that->m_size = size; + that->m_offs = that->m_consumed = consumed; + return 0; +} + +int iobuffer_read_raw(iobuffer_t *that, void *data, int size) +{ + if ((that->m_size-that->m_offs) < size) return -EFAULT; + copy_from_user(data, (void*)(that->m_base+that->m_offs), size); + that->m_offs += size; + return 0; +} + +int iobuffer_read_u32(iobuffer_t *that, u32 *data) +{ + if ((that->m_size-that->m_offs) < sizeof(u32)) return -EFAULT; + copy_from_user(data, (void*)(that->m_base+that->m_offs), sizeof(u32)); + that->m_offs += sizeof(u32); + return 0; +} + +int iobuffer_read_void(iobuffer_t *that, void **data) +{ + if ((that->m_size-that->m_offs) < sizeof(void*)) return -EFAULT; + copy_from_user(data, (void*)(that->m_base+that->m_offs), sizeof(void*)); + that->m_offs += sizeof(void*); + return 0; +} + +int iobuffer_write_raw(iobuffer_t *that, const void *data, int size) +{ + if ((that->m_size-that->m_offs) < size) return -EFAULT; + copy_to_user((void*)(that->m_base+that->m_offs), data, size); + that->m_offs += size; + return 0; +} + +int iobuffer_write_u32(iobuffer_t *that, u32 data) +{ + if ((that->m_size-that->m_offs) < sizeof(u32)) return -EFAULT; + // *((u32*)(that->m_base+that->m_offs)) = data; + __put_user(data, ((u32*)(that->m_base+that->m_offs))); + that->m_offs += sizeof(u32); + return 0; +} + +int iobuffer_write_void(iobuffer_t *that, const void *data) +{ + if ((that->m_size-that->m_offs) < sizeof(void *)) return -EFAULT; + // *((void **)(that->m_base+that->m_offs)) = data; + __put_user(data, ((void**)(that->m_base+that->m_offs))); + that->m_offs += sizeof(void*); + return 0; +} + +int iobuffer_drain(iobuffer_t *that, int size) { + if (size > (that->m_size-that->m_offs)) size = that->m_size-that->m_offs; + that->m_offs += size; + return size; +} + +int iobuffer_remaining(iobuffer_t *that) +{ + return that->m_size-that->m_offs; +} + +int iobuffer_consumed(iobuffer_t *that) +{ + return that->m_consumed; +} + +void iobuffer_mark_consumed(iobuffer_t *that) +{ + that->m_consumed = that->m_offs; +} + +void iobuffer_remainder(iobuffer_t *that, void **ptr, int *size) +{ + *ptr = ((uint8_t*)that->m_base)+that->m_offs; + *size = that->m_size - that->m_offs; +} + diff -Nru linux-2.6.23/drivers/binder/iobuffer.h kernel.android/drivers/binder/iobuffer.h --- linux-2.6.23/drivers/binder/iobuffer.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/iobuffer.h 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,44 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#ifndef _IOBUFFER_H_ +#define _IOBUFFER_H_ + +#include + +typedef struct iobuffer { + unsigned long m_base; + int m_offs; + int m_size; + int m_consumed; +} iobuffer_t; + +extern int iobuffer_init(iobuffer_t *that, unsigned long base, int size, int consumed); +extern int iobuffer_read_raw(iobuffer_t *that, void *data, int size); +extern int iobuffer_read_u32(iobuffer_t *that, u32 *data); +extern int iobuffer_read_void(iobuffer_t *that, void **data); +extern int iobuffer_write_raw(iobuffer_t *that, const void *data, int size); +extern int iobuffer_write_u32(iobuffer_t *that, u32 data); +extern int iobuffer_write_void(iobuffer_t *that, const void *data); +extern int iobuffer_drain(iobuffer_t *that, int size); +extern int iobuffer_remaining(iobuffer_t *that); +extern int iobuffer_consumed(iobuffer_t *that); +extern void iobuffer_mark_consumed(iobuffer_t *that); +extern void iobuffer_remainder(iobuffer_t *that, void **ptr, int *size); + +#endif diff -Nru linux-2.6.23/drivers/binder/tester.c kernel.android/drivers/binder/tester.c --- linux-2.6.23/drivers/binder/tester.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/binder/tester.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,63 @@ +/* binder driver + * Copyright (C) 2005 Palmsource, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef signed long sl_t; +typedef unsigned long ul_t; + +const sl_t cmd_write_limit = 1024; +const sl_t cmd_read_limit = 1024; + +int main(int argc, char **argv) { + int result; + binder_write_read_t bwr; + sl_t write_count = 0; + uint8_t *write_buf = malloc(cmd_write_limit); + uint8_t *read_buf = malloc(cmd_read_limit); + bwr.write_buffer = (ul_t)write_buf; + bwr.write_size = 0; + bwr.read_size = cmd_read_limit; + bwr.read_buffer = (ul_t)read_buf; + uint8_t *wb = write_buf; + + + int fd = open("/dev/binder", O_RDWR); + if (fd < 0) { + printf("Open failed: %s\n", strerror(errno)); + return -1; + } + *(ul_t*)wb = bcSET_CONTEXT_MANAGER; + bwr.write_size += sizeof(ul_t); + wb += sizeof(ul_t); + *(ul_t*)wb = bcENTER_LOOPER; + bwr.write_size += sizeof(ul_t); + result = ioctl(fd, BINDER_WRITE_READ, &bwr); + printf("ioctl(fd, BINDER_WRITE_READ, &bwr): %08x", result); + if (result < 0) printf(" %08x : %s", errno, strerror(errno)); + printf("\n"); + return 0; +} diff -Nru linux-2.6.23/drivers/char/Kconfig kernel.android/drivers/char/Kconfig --- linux-2.6.23/drivers/char/Kconfig 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/drivers/char/Kconfig 2007-11-12 07:49:02.000000000 +1100 @@ -1062,6 +1062,18 @@ depends on ISA || PCI default y +config GOLDFISH_TTY + tristate "Goldfish TTY Driver" + default n + help + TTY driver for Goldfish Virtual Platform. + +config BINDER + tristate "OpenBinder IPC Driver" + default n + help + from openbinder.org + source "drivers/s390/char/Kconfig" endmenu --- linux-2.6.23/drivers/input/evdev.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/drivers/input/evdev.c 2007-11-12 07:49:02.000000000 +1100 @@ -20,6 +20,9 @@ #include #include #include +#ifdef CONFIG_ANDROID_POWER +#include +#endif struct evdev { int exist; @@ -40,10 +43,23 @@ struct fasync_struct *fasync; struct evdev *evdev; struct list_head node; +#ifdef CONFIG_ANDROID_POWER + android_suspend_lock_t suspend_lock; +#endif }; static struct evdev *evdev_table[EVDEV_MINORS]; +#ifdef CONFIG_ANDROID_POWER +static void do_gettimeofday_monotonic(struct timeval *tv) +{ + struct timespec ts; + ktime_get_ts(&ts); + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / 1000; +} +#endif + static void evdev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct evdev *evdev = handle->private; @@ -52,7 +68,12 @@ if (evdev->grab) { client = evdev->grab; +#ifdef CONFIG_ANDROID_POWER + android_lock_suspend_auto_expire(&client->suspend_lock, 5 * HZ); + do_gettimeofday_monotonic(&client->buffer[client->head].time); +#else do_gettimeofday(&client->buffer[client->head].time); +#endif client->buffer[client->head].type = type; client->buffer[client->head].code = code; client->buffer[client->head].value = value; @@ -62,7 +83,12 @@ } else list_for_each_entry(client, &evdev->client_list, node) { +#ifdef CONFIG_ANDROID_POWER + android_lock_suspend_auto_expire(&client->suspend_lock, 5 * HZ); + do_gettimeofday_monotonic(&client->buffer[client->head].time); +#else do_gettimeofday(&client->buffer[client->head].time); +#endif client->buffer[client->head].type = type; client->buffer[client->head].code = code; client->buffer[client->head].value = value; @@ -121,6 +147,9 @@ input_close_device(&evdev->handle); put_device(&evdev->dev); +#ifdef CONFIG_ANDROID_POWER + android_uninit_suspend_lock(&client->suspend_lock); +#endif return 0; } @@ -148,6 +177,10 @@ goto err_put_evdev; } +#ifdef CONFIG_ANDROID_POWER + client->suspend_lock.name = "evdev"; + android_init_suspend_lock(&client->suspend_lock); +#endif client->evdev = evdev; list_add_tail(&client->node, &evdev->client_list); @@ -315,6 +348,10 @@ return -EFAULT; client->tail = (client->tail + 1) & (EVDEV_BUFFER_SIZE - 1); +#ifdef CONFIG_ANDROID_POWER + if(client->head == client->tail) + android_unlock_suspend(&client->suspend_lock); +#endif retval += evdev_event_size(); } diff -Nru linux-2.6.23/drivers/misc/Kconfig kernel.android/drivers/misc/Kconfig --- linux-2.6.23/drivers/misc/Kconfig 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/drivers/misc/Kconfig 2007-11-12 07:49:02.000000000 +1100 @@ -202,5 +202,14 @@ If you are not sure, say Y here. +config LOW_MEMORY_KILLER + tristate "Low Memory Killer" + ---help--- + Register processes to be killed when memory is low. + +config QEMU_TRACE + tristate "Virtual Device for QEMU tracing" + ---help--- + This is a virtual device for QEMU tracing. endif # MISC_DEVICES diff -Nru linux-2.6.23/drivers/misc/Makefile kernel.android/drivers/misc/Makefile --- linux-2.6.23/drivers/misc/Makefile 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/drivers/misc/Makefile 2007-11-12 07:49:02.000000000 +1100 @@ -15,3 +15,5 @@ obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o +obj-$(CONFIG_LOW_MEMORY_KILLER) += lowmemorykiller/ +obj-$(CONFIG_QEMU_TRACE) += qemutrace/ diff -Nru linux-2.6.23/drivers/misc/lowmemorykiller/Makefile kernel.android/drivers/misc/lowmemorykiller/Makefile --- linux-2.6.23/drivers/misc/lowmemorykiller/Makefile 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/misc/lowmemorykiller/Makefile 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1 @@ +obj-$(CONFIG_LOW_MEMORY_KILLER) := lowmemorykiller.o diff -Nru linux-2.6.23/drivers/misc/lowmemorykiller/lowmemorykiller.c kernel.android/drivers/misc/lowmemorykiller/lowmemorykiller.c --- linux-2.6.23/drivers/misc/lowmemorykiller/lowmemorykiller.c 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/drivers/misc/lowmemorykiller/lowmemorykiller.c 2007-11-12 07:49:02.000000000 +1100 @@ -0,0 +1,119 @@ +/* drivers/misc/lowmemorykiller/lowmemorykiller.c +** +** Copyright (C) 2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#include +#include +#include +#include +#include + +static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask); + +static struct shrinker lowmem_shrinker = { + .shrink = lowmem_shrink, + .seeks = DEFAULT_SEEKS * 16 +}; +static uint32_t lowmem_debug_level = 2; +static int lowmem_adj[6] = { + 0, + 1, + 6, + 12, +}; +static int lowmem_adj_size = 4; +static size_t lowmem_minfree[6] = { + 3*512, // 6MB + 2*1024, // 8MB + 4*1024, // 16MB + 16*1024, // 64MB +}; +static int lowmem_minfree_size = 4; + +#define lowmem_print(level, x...) do { if(lowmem_debug_level >= (level)) printk(x); } while(0) + +module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); +module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, S_IRUGO | S_IWUSR); +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR); +module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); + +static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) +{ + struct task_struct *p; + struct task_struct *selected = NULL; + int rem = 0; + int tasksize; + int i; + int min_adj = OOM_ADJUST_MAX + 1; + int selected_tasksize = 0; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free = global_page_state(NR_FREE_PAGES) + global_page_state(NR_FILE_PAGES); + if(lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if(lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + for(i = 0; i < array_size; i++) { + if(other_free < lowmem_minfree[i]) { + min_adj = lowmem_adj[i]; + break; + } + } + if(nr_to_scan > 0) + lowmem_print(3, "lowmem_shrink %d, %x, ofree %d, ma %d\n", nr_to_scan, gfp_mask, other_free, min_adj); + read_lock(&tasklist_lock); + for_each_process(p) { + if(p->oomkilladj >= 0 && p->mm) { + tasksize = get_mm_rss(p->mm); + if(nr_to_scan > 0 && tasksize > 0 && p->oomkilladj >= min_adj) { + if(selected == NULL || + p->oomkilladj > selected->oomkilladj || + (p->oomkilladj == selected->oomkilladj && + tasksize > selected_tasksize)) { + selected = p; + selected_tasksize = tasksize; + lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", + p->pid, p->comm, p->oomkilladj, tasksize); + } + } + rem += tasksize; + } + } + if(selected != NULL) { + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected->oomkilladj, selected_tasksize); + force_sig(SIGKILL, selected); + rem -= selected_tasksize; + } + lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem); + read_unlock(&tasklist_lock); + return rem; +} + +static int __init lowmem_init(void) +{ + register_shrinker(&lowmem_shrinker); + return 0; +} + +static void __exit lowmem_exit(void) +{ + unregister_shrinker(&lowmem_shrinker); +} + +module_init(lowmem_init); +module_exit(lowmem_exit); + +MODULE_LICENSE("GPL"); + diff -Nru linux-2.6.23/drivers/usb/gadget/Makefile kernel.android/drivers/usb/gadget/Makefile --- linux-2.6.23/drivers/usb/gadget/Makefile 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/drivers/usb/gadget/Makefile 2007-11-12 07:49:03.000000000 +1100 @@ -28,6 +28,9 @@ g_file_storage-objs := file_storage.o usbstring.o config.o \ epautoconf.o +# needed for drivers/android/android_gadget.c +obj-$(CONFIG_ANDROID_GADGET) += config.o epautoconf.o usbstring.o + ifeq ($(CONFIG_USB_ETH_RNDIS),y) g_ether-objs += rndis.o endif --- linux-2.6.23/fs/inotify_user.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/fs/inotify_user.c 2007-11-12 07:49:03.000000000 +1100 @@ -31,6 +31,9 @@ #include #include #include +#ifdef CONFIG_ANDROID_POWER +#include +#endif #include @@ -81,6 +84,9 @@ unsigned int queue_size; /* size of the queue (bytes) */ unsigned int event_count; /* number of pending events */ unsigned int max_events; /* maximum number of events */ +#ifdef CONFIG_ANDROID_POWER + android_suspend_lock_t suspend_lock; +#endif }; /* @@ -157,6 +163,9 @@ if (atomic_dec_and_test(&dev->count)) { atomic_dec(&dev->user->inotify_devs); free_uid(dev->user); +#ifdef CONFIG_ANDROID_POWER + android_uninit_suspend_lock(&dev->suspend_lock); +#endif kfree(dev); } } @@ -301,6 +310,9 @@ dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; list_add_tail(&kevent->list, &dev->events); wake_up_interruptible(&dev->wq); +#ifdef CONFIG_ANDROID_POWER + android_lock_suspend_auto_expire(&dev->suspend_lock, 5 * HZ); +#endif out: mutex_unlock(&dev->ev_mutex); @@ -318,6 +330,10 @@ dev->event_count--; dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; +#ifdef CONFIG_ANDROID_POWER + if(dev->event_count == 0) + android_unlock_suspend(&dev->suspend_lock); +#endif kfree(kevent->name); kmem_cache_free(event_cachep, kevent); @@ -594,6 +610,10 @@ dev->max_events = inotify_max_queued_events; dev->user = user; atomic_set(&dev->count, 0); +#ifdef CONFIG_ANDROID_POWER + dev->suspend_lock.name = "inotify"; + android_init_suspend_lock(&dev->suspend_lock); +#endif get_inotify_dev(dev); atomic_inc(&user->inotify_devs); diff -Nru linux-2.6.23/include/asm-arm/elf.h kernel.android/include/asm-arm/elf.h --- linux-2.6.23/include/asm-arm/elf.h 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/include/asm-arm/elf.h 2007-11-12 07:49:03.000000000 +1100 @@ -60,6 +60,11 @@ #define ELF_PLATFORM (elf_platform) extern char elf_platform[]; + +struct task_struct; + +extern int dump_task_regs (struct task_struct *, elf_gregset_t *); + #endif /* @@ -114,6 +119,9 @@ } \ } while (0) +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) + + #endif #endif diff -Nru linux-2.6.23/include/linux/android_alarm.h kernel.android/include/linux/android_alarm.h --- linux-2.6.23/include/linux/android_alarm.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/include/linux/android_alarm.h 2007-11-12 07:49:03.000000000 +1100 @@ -0,0 +1,55 @@ +/* include/linux/android_alarm.h +** +** Copyright (C) 2006-2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_ANDROID_ALARM_H +#define _LINUX_ANDROID_ALARM_H + +#include +#include + +typedef enum { + // return code bit numbers or set alarm arg + ANDROID_ALARM_RTC_WAKEUP, + ANDROID_ALARM_RTC, + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, + ANDROID_ALARM_ELAPSED_REALTIME, + ANDROID_ALARM_SYSTEMTIME, + // + ANDROID_ALARM_TYPE_COUNT, + + // return code bit numbers +// ANDROID_ALARM_TIME_CHANGE = 16 +} android_alarm_type_t; + +typedef enum { + ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, + ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, + ANDROID_ALARM_ELAPSED_REALTIME_MASK = 1U << ANDROID_ALARM_ELAPSED_REALTIME, + ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, + ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 +} android_alarm_return_flags_t; + +#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4)) // diasable alarm +#define ANDROID_ALARM_WAIT _IO('a', 1) // ack last alarm and wait for next +#define ANDROID_ALARM_SET(type) _IOW('a', 2 | ((type) << 4), struct timespec) // set alarm +#define ANDROID_ALARM_SET_AND_WAIT(type) _IOW('a', 3 | ((type) << 4), struct timespec) +#define ANDROID_ALARM_GET_TIME(type) _IOW('a', 4 | ((type) << 4), struct timespec) +#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) + +#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) +#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) + +#endif diff -Nru linux-2.6.23/include/linux/android_gadget.h kernel.android/include/linux/android_gadget.h --- linux-2.6.23/include/linux/android_gadget.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/include/linux/android_gadget.h 2007-11-12 07:49:03.000000000 +1100 @@ -0,0 +1,88 @@ +/* include/linux/android_gadget.h +** +** Copyright (C) 2006-2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_ANDROID_GADGET_H +#define _LINUX_ANDROID_GADGET_H + + +/* used for ANDROID_GADGET_OPEN_FILE */ +struct android_gadget_file_open { + const char* path; + uint32_t path_length; // strlen(path) + uint32_t flags; + uint32_t mode; +}; + + +/* + * Used for optimized file copy ioctls ANDROID_GADGET_READ_TO_FILE and ANDROID_GADGET_WRITE_FROM_FILE. + * File's seek position must be set to the proper location before calling ioctl. +*/ +struct android_gadget_file_copy { + uint32_t fd; + uint32_t length; + uint64_t offset; +}; + + +/* Set number of UMS devices, for USB_BULK_GET_MAX_LUN_REQUEST SETUP request */ +#define ANDROID_GADGET_SET_UMS_DEVICE_COUNT _IO('g',1) + +/* Sets USB configuration settings */ +#define ANDROID_GADGET_SET_MANUFACTURER_NAME _IO('g',2) +#define ANDROID_GADGET_SET_PRODUCT_NAME _IO('g',3) +#define ANDROID_GADGET_SET_SERIAL _IO('g',4) +#define ANDROID_GADGET_SET_VENDOR_ID _IO('g',5) +#define ANDROID_GADGET_SET_PRODUCT_ID _IO('g',6) +#define ANDROID_GADGET_SET_COMPOSITE_PRODUCT_ID _IO('g',19) + +/* Enables ADB interface */ +#define ANDROID_GADGET_ENABLE_ADB _IO('g',7) + +/* Enables mass storage interface */ +#define ANDROID_GADGET_ENABLE_UMS _IO('g',8) + +/* Enables kernel debug interface */ +#define ANDROID_GADGET_ENABLE_KDBG _IO('g',18) + +/* Enables MTP support */ +#define ANDROID_GADGET_ENABLE_MTP _IO('g',21) + +/* + * Enable or disable USB, depending on boolean argument. + * Do this after all other configuation is complete + */ +#define ANDROID_GADGET_ENABLE_USB _IO('g',9) + +/* used to open a file within the kernel. ioctl returns fd or error */ +#define ANDROID_GADGET_OPEN_FILE _IO('g',10) + +/* used to close a file opened with ANDROID_GADGET_OPEN_FILE */ +#define ANDROID_GADGET_CLOSE_FILE _IO('g',11) + +/* used to copy data from USB to a local file */ +#define ANDROID_GADGET_READ_TO_FILE _IO('g',12) + +/* used to copy data from a local file to USB */ +#define ANDROID_GADGET_WRITE_FROM_FILE _IO('g',13) + +/* Used to query current state of android_gadget */ +#define ANDROID_GADGET_IS_CONNECTED _IO('g',14) +#define ANDROID_GADGET_IS_ADB_ENABLED _IO('g',15) +#define ANDROID_GADGET_IS_UMS_ENABLED _IO('g',16) +#define ANDROID_GADGET_IS_CONFIGURED _IO('g',17) +#define ANDROID_GADGET_IS_KDBG_ENABLED _IO('g',20) + +#endif /* _LINUX_ANDROID_GADGET_H */ diff -Nru linux-2.6.23/include/linux/android_power.h kernel.android/include/linux/android_power.h --- linux-2.6.23/include/linux/android_power.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/include/linux/android_power.h 2007-11-12 07:49:03.000000000 +1100 @@ -0,0 +1,89 @@ +/* include/linux/android_power.h +** +** Copyright (C) 2007 Google, Inc. +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_ANDROID_POWER_H +#define _LINUX_ANDROID_POWER_H + +#include +#include + +typedef struct +{ + struct list_head link; + int flags; + const char *name; + int expires; +#ifdef CONFIG_ANDROID_POWER_STAT + struct { + int count; + int expire_count; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + } stat; +#endif +} android_suspend_lock_t; + +#if 0 // none of these flags are implemented +#define ANDROID_SUSPEND_LOCK_FLAG_COUNTED (1U << 0) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_READABLE (1U << 1) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_SET (1U << 2) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_CLEAR (1U << 3) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_INC (1U << 4) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_DEC (1U << 5) +#define ANDROID_SUSPEND_LOCK_FLAG_USER_VISIBLE_MASK (0x1fU << 1) +#endif +#define ANDROID_SUSPEND_LOCK_AUTO_EXPIRE (1U << 6) +#define ANDROID_SUSPEND_LOCK_ACTIVE (1U << 7) + + +typedef struct android_early_suspend android_early_suspend_t; +struct android_early_suspend +{ + struct list_head link; + int level; + void (*suspend)(android_early_suspend_t *h); + void (*resume)(android_early_suspend_t *h); +}; + +typedef enum { + ANDROID_CHARGING_STATE_UNKNOWN, + ANDROID_CHARGING_STATE_DISCHARGE, + ANDROID_CHARGING_STATE_MAINTAIN, // or trickle + ANDROID_CHARGING_STATE_SLOW, + ANDROID_CHARGING_STATE_NORMAL, + ANDROID_CHARGING_STATE_FAST, + ANDROID_CHARGING_STATE_OVERHEAT +} android_charging_state_t; + +//android_suspend_lock_t *android_allocate_suspend_lock(const char *debug_name); +//void android_free_suspend_lock(android_suspend_lock_t *lock); +int android_init_suspend_lock(android_suspend_lock_t *lock); +void android_uninit_suspend_lock(android_suspend_lock_t *lock); +void android_lock_suspend(android_suspend_lock_t *lock); +void android_lock_suspend_auto_expire(android_suspend_lock_t *lock, int timeout); +void android_unlock_suspend(android_suspend_lock_t *lock); +void android_power_wakeup(int notification); /* notification = 0: normal wakeup, notification = 1: temporary wakeup */ + +int android_power_is_driver_suspended(void); + +void android_register_early_suspend(android_early_suspend_t *handler); +void android_unregister_early_suspend(android_early_suspend_t *handler); + +void android_power_set_battery_level(int level); // level 0-100 +void android_power_set_charging_state(android_charging_state_t state); + +#endif + diff -Nru linux-2.6.23/include/linux/binder_module.h kernel.android/include/linux/binder_module.h --- linux-2.6.23/include/linux/binder_module.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/include/linux/binder_module.h 2007-11-12 07:49:03.000000000 +1100 @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed as described in the file LICENSE, which + * you should have received as part of this distribution. The terms + * are also available at http://www.openbinder.org/license.html. + * + * This software consists of voluntary contributions made by many + * individuals. For the exact contribution history, see the revision + * history and logs, available at http://www.openbinder.org + */ + +#ifndef _BINDER_MODULE_H_ +#define _BINDER_MODULE_H_ + +#include + +#if TARGET_HOST == TARGET_HOST_PALMOS +#include +#include +#endif + +#ifdef __cplusplus +#if _SUPPORTS_NAMESPACE +namespace openbinder { +namespace support { +#endif +#endif + +// These are pre-packed type constants for the object type codes. +enum { + kPackedLargeBinderType = B_PACK_LARGE_TYPE(B_BINDER_TYPE), + kPackedLargeBinderWeakType = B_PACK_LARGE_TYPE(B_BINDER_WEAK_TYPE), + kPackedLargeBinderHandleType = B_PACK_LARGE_TYPE(B_BINDER_HANDLE_TYPE), + kPackedLargeBinderWeakHandleType = B_PACK_LARGE_TYPE(B_BINDER_WEAK_HANDLE_TYPE), + kPackedLargeBinderNodeType = B_PACK_LARGE_TYPE(B_BINDER_NODE_TYPE), + kPackedLargeBinderWeakNodeType = B_PACK_LARGE_TYPE(B_BINDER_WEAK_NODE_TYPE), +}; + +// Internal data structure used by driver. +struct binder_node; + +// This is the flattened representation of a Binder object for transfer +// between processes. The 'offsets' supplied as part of a binder transaction +// contains offsets into the data where these structures occur. The Binder +// driver takes care of re-writing the structure type and data as it moves +// between processes. +// +// Note that this is very intentionally designed to be the same as a user-space +// large_flat_data structure holding 8 bytes. The IPC mechanism requires that +// this structure be at least 8 bytes large. +typedef struct flat_binder_object +{ + // 8 bytes for large_flat_header. + unsigned long type; + unsigned long length; + + // 8 bytes of data. + union { + void* binder; // local object + signed long handle; // remote object + struct binder_node* node; // driver node + }; + void* cookie; // extra data associated with local object +} flat_binder_object_t; + +/* + * On 64-bit platforms where user code may run in 32-bits the driver must + * translate the buffer (and local binder) addresses apropriately. + */ + +typedef struct binder_write_read { + signed long write_size; // bytes to write + signed long write_consumed; // bytes consumed by driver (for ERESTARTSYS) + unsigned long write_buffer; + signed long read_size; // bytes to read + signed long read_consumed; // bytes consumed by driver (for ERESTARTSYS) + unsigned long read_buffer; +} binder_write_read_t; + +// Use with BINDER_VERSION, driver fills in fields. +typedef struct binder_version { + signed long protocol_version; // driver protocol version -- increment with incompatible change +} binder_version_t; + +// This is the current protocol version. +#define BINDER_CURRENT_PROTOCOL_VERSION 5 + +#define BINDER_IOC_MAGIC 'b' +#define BINDER_WRITE_READ _IOWR(BINDER_IOC_MAGIC, 1, binder_write_read_t) +#define BINDER_SET_WAKEUP_TIME _IOW(BINDER_IOC_MAGIC, 2, binder_wakeup_time_t) +#define BINDER_SET_IDLE_TIMEOUT _IOW(BINDER_IOC_MAGIC, 3, bigtime_t) +#define BINDER_SET_REPLY_TIMEOUT _IOW(BINDER_IOC_MAGIC, 4, bigtime_t) +#define BINDER_SET_MAX_THREADS _IOW(BINDER_IOC_MAGIC, 5, size_t) +#define BINDER_SET_IDLE_PRIORITY _IOW(BINDER_IOC_MAGIC, 6, int) +#define BINDER_SET_CONTEXT_MGR _IOW(BINDER_IOC_MAGIC, 7, int) +#define BINDER_THREAD_EXIT _IOW(BINDER_IOC_MAGIC, 8, int) +#define BINDER_VERSION _IOWR(BINDER_IOC_MAGIC, 9, binder_version_t) +#define BINDER_IOC_MAXNR 9 + +// NOTE: Two special error codes you should check for when calling +// in to the driver are: +// +// EINTR -- The operation has been interupted. This should be +// handled by retrying the ioctl() until a different error code +// is returned. +// +// ECONNREFUSED -- The driver is no longer accepting operations +// from your process. That is, the process is being destroyed. +// You should handle this by exiting from your process. Note +// that once this error code is returned, all further calls to +// the driver from any thread will return this same code. + +typedef int64_t bigtime_t; + +enum transaction_flags { + tfInline = 0x01, // not yet implemented + tfSynchronous = 0x02, // obsolete + tfRootObject = 0x04, // contents are the component's root object + tfStatusCode = 0x08 // contents are a 32-bit status code +}; + +typedef struct binder_transaction_data +{ + // The first two are only used for bcTRANSACTION and brTRANSACTION, + // identifying the target and contents of the transaction. + union { + unsigned long handle; // target descriptor of command transaction + void *ptr; // target descriptor of return transaction + } target; + void* cookie; // target object cookie + unsigned int code; // transaction command + + // General information about the transaction. + unsigned int flags; + int priority; // requested/current thread priority + size_t data_size; // number of bytes of data + size_t offsets_size; // number of bytes of flat_binder_object offsets + + // If this transaction is inline, the data immediately + // follows here; otherwise, it ends with a pointer to + // the data buffer. + union { + struct { + const void *buffer; // transaction data + const void *offsets; // offsets to flat_binder_object structs + } ptr; + uint8_t buf[8]; + } data; +} binder_transaction_data_t; + +typedef struct binder_wakeup_time +{ + bigtime_t time; + int priority; +} binder_wakeup_time_t; + +enum BinderDriverReturnProtocol { + brERROR = -1, + /* + int: error code + */ + + brOK = 0, + brTIMEOUT, + brWAKEUP, + /* No parameters! */ + + brTRANSACTION, + brREPLY, + /* + binder_transaction_data: the received command. + */ + + brACQUIRE_RESULT, + /* + int: 0 if the last bcATTEMPT_ACQUIRE was not successful. + Else the remote object has acquired a primary reference. + */ + + brDEAD_REPLY, + /* + The target of the last transaction (either a bcTRANSACTION or + a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. + */ + + brTRANSACTION_COMPLETE, + /* + No parameters... always refers to the last transaction requested + (including replies). Note that this will be sent even for asynchronous + transactions. + */ + + brINCREFS, + brACQUIRE, + brRELEASE, + brDECREFS, + /* + void *: ptr to binder + void *: cookie for binder + */ + + brATTEMPT_ACQUIRE, + /* + int: priority + void *: ptr to binder + void *: cookie for binder + */ + + brEVENT_OCCURRED, + /* + This is returned when the bcSET_NEXT_EVENT_TIME has elapsed. + At this point the next event time is set to B_INFINITE_TIMEOUT, + so you must send another bcSET_NEXT_EVENT_TIME command if you + have another event pending. + */ + + brNOOP, + /* + * No parameters. Do nothing and examine the next command. It exists + * primarily so that we can replace it with a brSPAWN_LOOPER command. + */ + + brSPAWN_LOOPER, + /* + * No parameters. The driver has determined that a process has no threads + * waiting to service incomming transactions. When a process receives this + * command, it must spawn a new service thread and register it via + * bcENTER_LOOPER. + */ + + brFINISHED, + + brDEAD_BINDER, + /* + void *: cookie + */ + brCLEAR_DEATH_NOTIFICATION_DONE, + /* + void *: cookie + */ + + brFAILED_REPLY + /* + The the last transaction (either a bcTRANSACTION or + a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. + */ +}; + +enum BinderDriverCommandProtocol { + bcNOOP = 0, + /* No parameters! */ + + bcTRANSACTION, + bcREPLY, + /* + binder_transaction_data: the sent command. + */ + + bcACQUIRE_RESULT, + /* + int: 0 if the last brATTEMPT_ACQUIRE was not successful. + Else you have acquired a primary reference on the object. + */ + + bcFREE_BUFFER, + /* + void *: ptr to transaction data received on a read + */ + + bcTRANSACTION_COMPLETE, + /* + No parameters... send when finishing an asynchronous transaction. + */ + + bcINCREFS, + bcACQUIRE, + bcRELEASE, + bcDECREFS, + /* + int: descriptor + */ + + bcINCREFS_DONE, + bcACQUIRE_DONE, + /* + void *: ptr to binder + void *: cookie for binder + */ + + bcATTEMPT_ACQUIRE, + /* + int: priority + int: descriptor + */ + + bcRETRIEVE_ROOT_OBJECT, + /* + int: process ID + */ + + bcSET_THREAD_ENTRY, + /* + void *: thread entry function for new threads created to handle tasks + void *: argument passed to those threads + */ + + bcREGISTER_LOOPER, + /* + No parameters. + Register a spawned looper thread with the device. This must be + called by the function that is supplied in bcSET_THREAD_ENTRY as + part of its initialization with the binder. + */ + + bcENTER_LOOPER, + bcEXIT_LOOPER, + /* + No parameters. + These two commands are sent as an application-level thread + enters and exits the binder loop, respectively. They are + used so the binder can have an accurate count of the number + of looping threads it has available. + */ + + bcSYNC, + /* + No parameters. + Upon receiving this command, the driver waits until all + pending asynchronous transactions have completed. + */ + +#if 0 + bcCATCH_ROOT_OBJECTS, + /* + No parameters. + Call this to have your team start catching root objects + published by other teams that are spawned outside of the binder. + When this happens, you will receive a brTRANSACTION with the + tfRootObject flag set. (Note that this is distinct from receiving + normal root objects, which are a brREPLY.) + */ +#endif + + bcSTOP_PROCESS, + /* + int: descriptor of process's root object + int: 1 to stop immediately, 0 when root object is released + */ + + bcSTOP_SELF, + /* + int: 1 to stop immediately, 0 when root object is released + */ + + bcREQUEST_DEATH_NOTIFICATION, + /* + void *: ptr to binder + void *: cookie + */ + + bcCLEAR_DEATH_NOTIFICATION, + /* + void *: ptr to binder + void *: cookie + */ + bcDEAD_BINDER_DONE + /* + void *: cookie + */ +}; + +#if 0 +/* Parameters for BINDER_READ_WRITE ioctl. */ +#if BINDER_DEBUG_LIB + +struct binder_write_read +{ + ssize_t write_size; + const void* write_buffer; + ssize_t read_size; + void* read_buffer; +}; + + +/* Below are calls to access the binder when debugging the driver from + user space by compiling it as libbinderdbg and linking libbe2 with it. */ + +extern int open_binder(int teamID=0); +extern status_t close_binder(int desc); +extern status_t ioctl_binder(int desc, int cmd, void *data, int len); +extern ssize_t read_binder(int desc, void *data, size_t numBytes); +extern ssize_t write_binder(int desc, void *data, size_t numBytes); + +#else + +#include +inline int open_binder(int ) { return open("/dev/misc/binder2",O_RDWR|O_CLOEXEC); }; +inline status_t close_binder(int desc) { return close(desc); }; +inline status_t ioctl_binder(int desc, int cmd, void *data, int len) { return ioctl(desc,cmd,data,len); }; +inline ssize_t read_binder(int desc, void *data, size_t numBytes) { return read(desc,data,numBytes); }; +inline ssize_t write_binder(int desc, void *data, size_t numBytes) { return write(desc,data,numBytes); }; + +#endif +#endif + +#ifdef __cplusplus +#if _SUPPORTS_NAMESPACE +} } // namespace openbinder::support +#endif +#endif + +#endif // _BINDER_MODULE_H_ diff -Nru linux-2.6.23/include/linux/binder_type_constants.h kernel.android/include/linux/binder_type_constants.h --- linux-2.6.23/include/linux/binder_type_constants.h 1970-01-01 10:00:00.000000000 +1000 +++ kernel.android/include/linux/binder_type_constants.h 2007-11-12 07:49:03.000000000 +1100 @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed as described in the file LICENSE, which + * you should have received as part of this distribution. The terms + * are also available at http://www.openbinder.org/license.html. + * + * This software consists of voluntary contributions made by many + * individuals. For the exact contribution history, see the revision + * history and logs, available at http://www.openbinder.org + */ + +#ifndef _SUPPORT_TYPECONSTANTS_H +#define _SUPPORT_TYPECONSTANTS_H + +/*! @file support/TypeConstants.h + @ingroup CoreSupportUtilities + @brief Format and standard definitions of SValue type codes. +*/ + +#ifdef __cplusplus +#if _SUPPORTS_NAMESPACE +namespace openbinder { +namespace support { +#endif +#endif + +/*! @addtogroup CoreSupportUtilities + @{ +*/ + +/*-------------------------------------------------------------*/ +/*----- Data Types --------------------------------------------*/ + +/*! @name Type Code Definitions + Type codes are 32-bit integers. The upper 24 bits are the + the code, and the lower 8 bits are metadata. The code is + constructed as 3 characters. Codes containing only the + characters a-z and _, and codes whose last letter is not + alphabetic (a-zA-Z), are reserved for use by the system. + Type codes that end with the character '*' contain + pointers to external objects. + Type codes that end with the character '#' are in a special + namespace reserved for SDimth units. */ +//@{ + +//! Type code manipulation. +enum { + B_TYPE_CODE_MASK = 0x7f7f7f00, // Usable bits for the type code + B_TYPE_CODE_SHIFT = 8, // Where code appears. + + B_TYPE_LENGTH_MASK = 0x00000007, // Usable bits for the data length + B_TYPE_LENGTH_MAX = 0x00000004, // Largest length that can be encoded in type + B_TYPE_LENGTH_LARGE = 0x00000005, // Value when length is > 4 bytes + B_TYPE_LENGTH_MAP = 0x00000007, // For use by SValue + + B_TYPE_BYTEORDER_MASK = 0x80000080, // Bits used to check byte order + B_TYPE_BYTEORDER_NORMAL = 0x00000080, // This bit is set if the byte order is okay + B_TYPE_BYTEORDER_SWAPPED = 0x80000000 // This bit is set if the byte order is swapped +}; + +//! Pack a small (size <= B_TYPE_LENGTH_MAX) type code from its constituent parts. +#define B_PACK_SMALL_TYPE(code, length) (((code)&B_TYPE_CODE_MASK) | (length) | B_TYPE_BYTEORDER_NORMAL) +//! Pack a large (size > B_TYPE_LENGTH_MAX) type code from its constituent parts. +#define B_PACK_LARGE_TYPE(code) (((code)&B_TYPE_CODE_MASK) | B_TYPE_LENGTH_LARGE | B_TYPE_BYTEORDER_NORMAL) +//! Retrieve type information from a packed type code. +#define B_UNPACK_TYPE_CODE(type) ((type)&B_TYPE_CODE_MASK) +//! Retrieve size information from a packaed type code. +#define B_UNPACK_TYPE_LENGTH(type) ((type)&B_TYPE_LENGTH_MASK) + +//! Build a valid code for a type code. +/*! Ensures only correct bits are used, and shifts value into correct location. */ +#define B_TYPE_CODE(code) (((code)< +** +** This software is licensed under the terms of the GNU General Public +** License version 2, as published by the Free Software Foundation, and +** may be copied, distributed, and modified under those terms. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +*/ + +#ifndef _LINUX_LOGGER_H +#define _LINUX_LOGGER_H + +#include +#include + +struct logger_entry { + __u16 len; /* length of the payload */ + __u16 __pad; /* no matter what, we get 2 bytes of padding */ + __s32 pid; /* generating process's pid */ + __s32 tid; /* generating process's tid */ + __s32 sec; /* seconds since Epoch */ + __s32 nsec; /* nanoseconds */ + char msg[0]; /* the entry's payload */ +}; + +#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ +#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ +#define LOGGER_LOG_MAIN "log_main" /* everything else */ + +#define LOGGER_ENTRY_MAX_LEN (4*1024) +#define LOGGER_ENTRY_MAX_PAYLOAD \ + (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry)) + +#define __LOGGERIO 0xAE + +#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */ +#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ +#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ +#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ + +#endif /* _LINUX_LOGGER_H */ diff -Nru linux-2.6.23/init/Kconfig kernel.android/init/Kconfig --- linux-2.6.23/init/Kconfig 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/init/Kconfig 2007-11-12 07:49:03.000000000 +1100 @@ -350,6 +350,12 @@ config SYSCTL bool +config PANIC_TIMEOUT + int "Default panic timeout" + default 0 + help + Set default panic timeout. + menuconfig EMBEDDED bool "Configure standard kernel features (for small systems)" help diff -Nru linux-2.6.23/kernel/futex.c kernel.android/kernel/futex.c --- linux-2.6.23/kernel/futex.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/kernel/futex.c 2007-11-12 07:49:03.000000000 +1100 @@ -1292,8 +1292,8 @@ restart->fn = futex_wait_restart; restart->arg0 = (unsigned long)uaddr; restart->arg1 = (unsigned long)val; - restart->arg2 = (unsigned long)abs_time; - restart->arg3 = 0; + restart->arg2 = abs_time->tv64 & 0xFFFFFFFF; + restart->arg3 = abs_time->tv64 >> 32; if (fshared) restart->arg3 |= ARG3_SHARED; return -ERESTART_RESTARTBLOCK; @@ -1312,13 +1312,14 @@ { u32 __user *uaddr = (u32 __user *)restart->arg0; u32 val = (u32)restart->arg1; - ktime_t *abs_time = (ktime_t *)restart->arg2; + ktime_t abs_time; struct rw_semaphore *fshared = NULL; + abs_time.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; restart->fn = do_no_restart_syscall; if (restart->arg3 & ARG3_SHARED) fshared = ¤t->mm->mmap_sem; - return (long)futex_wait(uaddr, fshared, val, abs_time); + return (long)futex_wait(uaddr, fshared, val, &abs_time); } diff -Nru linux-2.6.23/kernel/hrtimer.c kernel.android/kernel/hrtimer.c --- linux-2.6.23/kernel/hrtimer.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/kernel/hrtimer.c 2007-11-12 07:49:03.000000000 +1100 @@ -1125,8 +1125,14 @@ * If the timer was rearmed on another CPU, reprogram * the event device. */ - if (timer->base->first == &timer->node) - hrtimer_reprogram(timer, timer->base); + if (timer->base->first == &timer->node) { + if(hrtimer_reprogram(timer, timer->base)) { + __remove_hrtimer(timer, timer->base, + HRTIMER_STATE_PENDING, 0); + list_add_tail(&timer->cb_entry, + &cpu_base->cb_pending); + } + } } } spin_unlock_irq(&cpu_base->lock); diff -Nru linux-2.6.23/kernel/panic.c kernel.android/kernel/panic.c --- linux-2.6.23/kernel/panic.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/kernel/panic.c 2007-11-12 07:49:03.000000000 +1100 @@ -26,7 +26,10 @@ static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); -int panic_timeout; +#ifndef CONFIG_PANIC_TIMEOUT +#define CONFIG_PANIC_TIMEOUT 0 +#endif +int panic_timeout = CONFIG_PANIC_TIMEOUT; ATOMIC_NOTIFIER_HEAD(panic_notifier_list); diff -Nru linux-2.6.23/kernel/power/console.c kernel.android/kernel/power/console.c --- linux-2.6.23/kernel/power/console.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/kernel/power/console.c 2007-11-12 07:49:03.000000000 +1100 @@ -52,6 +52,12 @@ acquire_console_sem(); set_console(orig_fgconsole); release_console_sem(); + + if (vt_waitactive(orig_fgconsole)) { + pr_debug("Resume: Can't switch VCs."); + return; + } + kmsg_redirect = orig_kmsg; return; } diff -Nru linux-2.6.23/kernel/power/process.c kernel.android/kernel/power/process.c --- linux-2.6.23/kernel/power/process.c 2007-10-10 06:31:38.000000000 +1000 +++ kernel.android/kernel/power/process.c 2007-11-12 07:49:03.000000000 +1100 @@ -13,6 +13,9 @@ #include #include #include +#ifdef CONFIG_ANDROID_POWER +#include +#endif /* * Timeout for stopping processes @@ -149,6 +152,10 @@ } while_each_thread(g, p); read_unlock(&tasklist_lock); yield(); /* Yield is okay here */ +#ifdef CONFIG_ANDROID_POWER + if (todo && !android_power_is_driver_suspended()) + break; +#endif if (time_after(jiffies, end_time)) break; } while (todo);