在Android系统中能够所是无处不Binder,Binder传输在每时每刻都发生着。不少状况下,一个进程中都不会只存在一个独立的Binder传输,常常是并发多个Binder传输,并且会存在Binder嵌套。尤为像system_server这种重要的进程Binder传输会更多。在系统发生问题时,若是追踪到system_server,会发现大部分状况都是在Binder传输中。但不管有多少Binder传输或多复杂的Binder嵌套,最终都是经过两种Binder传输实现的:同步传输和异步传输。这里试图经过最简单的传输来解释Binder通讯流程。node
Binder传输中最多见的就是同步传输。同步传输中,IPC通讯的发起端须要等到对端处理完消息才能继续。一个完整的同步传输以下图所示。
跳过Binder设备初始化的过程,直接看传输过程。客户端经过ioctl的BINDER_WRITE_READ发送BC_TRANSACTION命令到Binder驱动。android
drivers/staging/android/binder.c static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { ...... switch (cmd) { case BINDER_WRITE_READ: { struct binder_write_read bwr; ...... // 须要写数据 if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed); trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } // 须要读数据 if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } ...... break; } ......
发起Binder传输时,须要写入BC_TRANSACTION命令,而后等待命令返回。cookie
drivers/staging/android/binder.c static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { ...... case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY); break; } ...... }
BC_TRANSACTION和BC_REPLY都会调用binder_transaction(),区别在因而否设置reply。binder_transaction()也是写数据的核心函数。函数很长,逻辑不少,尽可能分析一下。并发
drivers/staging/android/binder.c static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { ...... if (reply) { ...... } else { if (tr->target.handle) { // 根据handle找到相应的binder实体 struct binder_ref *ref; ref = binder_get_ref(proc, tr->target.handle, true); ...... target_node = ref->node; } else { // handle为0时为service manager的binder实体 target_node = binder_context_mgr_node; ...... } e->to_node = target_node->debug_id; // binder实体的binder_proc target_proc = target_node->proc; ...... if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; ...... // 若是是同步传输,寻找是否传输栈中是否有来自对端的传输,若是有就使用对端线程处理传输 while (tmp) { if (tmp->from && tmp->from->proc == target_proc) target_thread = tmp->from; tmp = tmp->from_parent; } } } // 找到对端线程这使用线程todo list,不然使用进程todo list if (target_thread) { e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { target_list = &target_proc->todo; target_wait = &target_proc->wait; } e->to_proc = target_proc->pid; // 分配binder transaction t = kzalloc(sizeof(*t), GFP_KERNEL); ...... // 分配binder_work用于处理传输完成 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); ...... // 同步的非reply传输,设置当前线程为from if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->sender_euid = proc->tsk->cred->euid; // 设置传输的目标进程和线程 t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); // 从目标进程中分配传输空间 t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); ...... t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; // 增长binder实体的引用计数 if (target_node) binder_inc_node(target_node, 1, 0, NULL); offp = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); // 拷贝用户数据到binder实体的传输空间中 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) tr->data.ptr.buffer, tr->data_size)) { ...... } // 拷贝用户数据的flat_binder_object对象信息 if (copy_from_user(offp, (const void __user *)(uintptr_t) tr->data.ptr.offsets, tr->offsets_size)) { ...... } ...... off_end = (void *)offp + tr->offsets_size; off_min = 0; // 处理flat_binder_object对象信息 for (; offp < off_end; offp++) { struct flat_binder_object *fp; ...... fp = (struct flat_binder_object *)(t->buffer->data + *offp); off_min = *offp + sizeof(struct flat_binder_object); switch (fp->type) { // 类型为binder实体,用于server注册 case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; // 若是找不到binder实体就建立一个 struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { node = binder_new_node(proc, fp->binder, fp->cookie); ...... } ...... // 在目标进程中建立引用 ref = binder_get_ref_for_node(target_proc, node); ...... // 修改binder对象的类型为handle if (fp->type == BINDER_TYPE_BINDER) fp->type = BINDER_TYPE_HANDLE; else fp->type = BINDER_TYPE_WEAK_HANDLE; fp->binder = 0; // 将引用的handle赋值给对象 fp->handle = ref->desc; fp->cookie = 0; // 增长引用计数 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); ...... } break; // 类型为binder引用,client向server传输 case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { // 获取当前进程中的binder引用 struct binder_ref *ref = binder_get_ref( proc, fp->handle, fp->type == BINDER_TYPE_HANDLE); ...... if (ref->node->proc == target_proc) { // 若是binder传输发生在同一进程中则直接使用binder实体 if (fp->type == BINDER_TYPE_HANDLE) fp->type = BINDER_TYPE_BINDER; else fp->type = BINDER_TYPE_WEAK_BINDER; fp->binder = ref->node->ptr; fp->cookie = ref->node->cookie; binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); ...... } else { struct binder_ref *new_ref; // 在目标进程中建立binder引用 new_ref = binder_get_ref_for_node(target_proc, ref->node); ...... fp->binder = 0; fp->handle = new_ref->desc; fp->cookie = 0; binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); ...... } } break; // 类型为文件描述符,用于共享文件或内存 case BINDER_TYPE_FD: { ...... } break; ...... } } if (reply) { ...... } else if (!(t->flags & TF_ONE_WAY)) { // 当前线程的传输入栈 t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; } else { // 异步传输使用aync todo list if (target_node->has_async_transaction) { target_list = &target_node->async_todo; target_wait = NULL; } else target_node->has_async_transaction = 1; } // 将传输添加到目标队列中 t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); // 将传输完成添加到当前线程todo队列中 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); // 唤醒目标线程或进程 if (target_wait) wake_up_interruptible(target_wait); return; ...... }
BC_TRANSACTION简单来讲流程以下,异步
Client在执行BINDER_WRITE_READ时,先经过binder_thread_write()写数据,将BINDER_WORK_TRANSACTION_COMPLETE放入工做队列。紧接着就执行binder_thread_read()读取返回数据。这里会将命令BR_TRANSACTION_COMPLETE返回给Client线程。async
drivers/staging/android/binder.c static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { ...... // 第一次读时,插入命令BR_NOOP返回给用户 if (*consumed == 0) { if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: // 当前线程没有传输而且todo队列为空时,处理进程的工做队列 wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); ...... thread->looper |= BINDER_LOOPER_STATE_WAITING; // 若是处理进程工做队列,则当前线程为空闲线程 if (wait_for_proc_work) proc->ready_threads++; ...... // 等待进程或线程工做队列被唤醒 if (wait_for_proc_work) { ...... ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); } else { ...... ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } ...... // 唤醒后,开始处理传输,空闲线程减1 if (wait_for_proc_work) proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; ...... while (1) { ...... // 优先处理线程工做队列,再处理进程工做队列 if (!list_empty(&thread->todo)) w = list_first_entry(&thread->todo, struct binder_work, entry); else if (!list_empty(&proc->todo) && wait_for_proc_work) w = list_first_entry(&proc->todo, struct binder_work, entry); else { if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ goto retry; break; } ...... switch (w->type) { ...... case BINDER_WORK_TRANSACTION_COMPLETE: { // 发送命令BR_TRANSACTION_COMPLETE给用户 cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); ...... list_del(&w->entry); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; ...... if (!t) continue; ...... }
Server端线程启动后就调用talkWithDriver()等待读取数据。Binder驱动处理Client发送的BC_TRANSACTION命令后,会唤醒Server线程。Server线程读取数据进行处理一样是在binder_thread_read()中完成的。函数
drivers/staging/android/binder.c static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { ...... while (1) { switch (w->type) { // binder_transaction()将工做BINDER_WORK_TRANSACTION加入队列后唤醒目标进程 case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); } break; ...... // 只有BINDER_WORK_TRANSACTION取出传输事件,因此能够继续执行 if (!t) continue; BUG_ON(t->buffer == NUL); // target_node存在时代表是BC_TRANSACTION产生的工做事件,须要回复BR_TRANSACTION。 // 不然是BC_REPLY产生的工做事件,回复BR_REPLY if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; tr.cookie = 0; cmd = BR_REPLY; } tr.code = t->code; tr.flags = t->flags; tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); // 同步传输时,sender_pid为调用进程的pid。异步传输时为0。 if (t->from) { struct task_struct *sender = t->from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); } else { tr.sender_pid = 0; } ...... // 将数据拷贝到用户空间 if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); ...... // 从队列中移除当前工做事件 list_del(&t->work.entry); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { // 同步传输时,命令为BR_TRANSACTION的状况下,将工做事件入栈 t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; } else { // 其余状况下,代表传输已经完成,释放工做事件 t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } break; } ...... }
BR_REPLY也是一样的流程,区别在于发送BR_REPLY意味着传输已经完成,能够释放工做事件。oop
Server端接收到BR_TRANSACTION命令后,取出buffer进行处理,完成后会发送BC_REPLY给Binder驱动。ui
frameworks/native/libs/binder/IPCThreadState.cpp status_t IPCThreadState::executeCommand(int32_t cmd) { ...... case BR_TRANSACTION: { // 取出传输数据 binder_transaction_data tr; result = mIn.read(&tr, sizeof(tr)); ...... Parcel reply; ...... // BBinder对数据进行解析 if (tr.target.ptr) { sp<BBinder> b((BBinder*)tr.cookie); error = b->transact(tr.code, buffer, &reply, tr.flags); } else { error = the_context_object->transact(tr.code, buffer, &reply, tr.flags); } if ((tr.flags & TF_ONE_WAY) == 0) { LOG_ONEWAY("Sending reply to %d!", mCallingPid); if (error < NO_ERROR) reply.setError(error); // 同步传输须要发送BC_REPLY sendReply(reply, 0); } else { LOG_ONEWAY("NOT sending reply to %d!", mCallingPid); } ...... } break; ...... }
BC_REPLY也是经过binder_transaction()处理,只是须要设置参数reply。下面只分析与以前不一样的地方。this
drivers/staging/android/binder.c static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { ...... if (reply) { // 从当前线程中出栈 in_reply_to = thread->transaction_stack; ...... thread->transaction_stack = in_reply_to->to_parent; // 目标线程为发起端线程 target_thread = in_reply_to->from; ...... target_proc = target_thread->proc; } else { ...... } if (target_thread) { e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { ...... } ...... // reply传输的from为空 if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; ...... if (reply) { // 从目标线程中出栈 binder_pop_transaction(target_thread, in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { ...... } else { ...... } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); return; ...... }
binder_transaction()执行完BC_REPLY后一样是加入工做队列,唤醒target。BINDER_WORK_TRANSACTION_COMPLETE会将BR_TRANSACTION_COMPLETE返回给当前线程,也就是Server端。BINDER_WORK_TRANSACTION由target处理,这时的target为Client端。根据上面分析,驱动将返回BR_REPLY给Client端。
Binder的每一次传输,不管是从Client到Sever仍是Server到Client,在对端接收到数据并处理完成后,都会经过BC_FREE_BUFFER来释放传输空间。在同步传输中会包含两次传输,由Client发出的BC_TRANSACTION和由Server发出的BC_REPLY。
在BC_TRANSACTION中,Server端接收到BR_TRANSACTION命令开始处理Binder数据,处理完成后就会发出BC_FREE_BUFFER来释放buffer。这个释放命令不是直接发出的,是经过Parcel的释放函数完成的。将freeBuffer设置为Parcel实例buffer的释放函数,在buffer析构时会调用释放函数freeBuffer。
frameworks/native/libs/binder/IPCThreadState.cpp status_t IPCThreadState::executeCommand(int32_t cmd) { ...... case BR_TRANSACTION: { binder_transaction_data tr; result = mIn.read(&tr, sizeof(tr)); ...... Parcel buffer; // 设置buffer的释放函数为freeBuffer buffer.ipcSetDataReference( reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(binder_size_t), freeBuffer, this); ...... sp<BBinder> b((BBinder*)tr.cookie); error = b->transact(tr.code, buffer, &reply, tr.flags); ...... } break; ...... } ...... void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data, size_t /*dataSize*/, const binder_size_t* /*objects*/, size_t /*objectsSize*/, void* /*cookie*/) { ...... if (parcel != NULL) parcel->closeFileDescriptors(); // 发送BC_FREE_BUFFER命令 IPCThreadState* state = self(); state->mOut.writeInt32(BC_FREE_BUFFER); state->mOut.writePointer((uintptr_t)data); }
在BC_REPLY中,Client端接收到BR_REPLY会将freeBuffer设置为释放函数或直接调用freeBuffer。
frameworks/native/libs/binder/IPCThreadState.cpp status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult) { int32_t cmd; int32_t err; while (1) { ...... case BR_REPLY: { binder_transaction_data tr; err = mIn.read(&tr, sizeof(tr)); ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY"); if (err != NO_ERROR) goto finish; if (reply) { if ((tr.flags & TF_STATUS_CODE) == 0) { // 设置freeBuffer为释放函数 reply->ipcSetDataReference( reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(binder_size_t), freeBuffer, this); } else { // 发生错误时直接调用freeBuffer err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer); freeBuffer(NULL, reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(binder_size_t), this); } } else { freeBuffer(NULL, reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(binder_size_t), this); continue; } } goto finish; ...... } ......
FreeBuffer()发送BC_FREE_BUFFER命令给Binder驱动。
drivers/staging/android/binder.c static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) { size_t size, buffer_size; // 获取buffer的大小 buffer_size = binder_buffer_size(proc, buffer); size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)); ...... // 更新异步传输的free_async_space if (buffer->async_transaction) { proc->free_async_space += size + sizeof(struct binder_buffer); ...... } // 释放物理内存 binder_update_page_range(proc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), NULL); // 将buffer从allocated_buffers树上擦除 rb_erase(&buffer->rb_node, &proc->allocated_buffers); buffer->free = 1; // 向后合并空闲buffer if (!list_is_last(&buffer->entry, &proc->buffers)) { struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (next->free) { rb_erase(&next->rb_node, &proc->free_buffers); binder_delete_free_buffer(proc, next); } } // 向前合并空闲buffer if (proc->buffers.next != &buffer->entry) { struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); if (prev->free) { binder_delete_free_buffer(proc, buffer); rb_erase(&prev->rb_node, &proc->free_buffers); buffer = prev; } } // 将合并后的buffer插入到free_buffers上 binder_insert_free_buffer(proc, buffer); } ...... static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { ...... case BC_FREE_BUFFER: { binder_uintptr_t data_ptr; struct binder_buffer *buffer; // 获取用户空间数据 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); // 从buffer树中找到相应的binder_buffer buffer = binder_buffer_lookup(proc, data_ptr); ...... if (buffer->transaction) { buffer->transaction->buffer = NULL; buffer->transaction = NULL; } // 异步传输在释放buffer时将未完成的async_todo工做移动到线程的todo队列上 if (buffer->async_transaction && buffer->target_node) { BUG_ON(!buffer->target_node->has_async_transaction); if (list_empty(&buffer->target_node->async_todo)) buffer->target_node->has_async_transaction = 0; else list_move_tail(buffer->target_node->async_todo.next, &thread->todo); } trace_binder_transaction_buffer_release(buffer); // 减小binder引用计数 binder_transaction_buffer_release(proc, buffer, NULL); // 释放buffer内存空间 binder_free_buf(proc, buffer); break; } ...... }
Binder通讯中,若是Client端只但愿发送数据而无论Server端的执行结果,可使用异步传输。异步传输须要在传输数据的flag中设置TF_ONE_WAY位,简单的传输流程以下图。
异步传输在Binder驱动中的处理流程与同步传输同样,咱们重点看一下对TF_ONE_WAY标志的处理流程。
drivers/staging/android/binder.c static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t data_size, size_t offsets_size, int is_async) { ...... // 异步传输须要考虑free_async_space if (is_async && proc->free_async_space < size + sizeof(struct binder_buffer)) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", proc->pid, size); return NULL; } ...... buffer->data_size = data_size; buffer->offsets_size = offsets_size; // buffer中设置is_async标志 buffer->async_transaction = is_async; if (is_async) { // 更新free_async_space proc->free_async_space -= size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", proc->pid, size, proc->free_async_space); } return buffer; } ...... static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { ...... // 异步传输时须要将传输事件的from设置为空 if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; ...... // 分配buffer时带有异步标志位 t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); ...... if (reply) { ...... } else if (!(t->flags & TF_ONE_WAY)) { ...... } else { // 异步传输使用async_todo队列 if (target_node->has_async_transaction) { target_list = &target_node->async_todo; target_wait = NULL; } else target_node->has_async_transaction = 1; } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); return; ...... } ...... static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { ...... if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { ...... } else { // 异步传输是单向的,不须要回复。 t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } break; ...... }