binder 驱动分析

2018-06-07  本文已影响0人  Little熊猫

一 binder线程池

hidl service启动时要设置binder的线程池:
configureRpcThreadpool(10, true);
具体到驱动的调用

    case BINDER_SET_MAX_THREADS: {
        int max_threads;

        if (copy_from_user(&max_threads, ubuf,
                   sizeof(max_threads))) {
            ret = -EINVAL;
            goto err;
        }
        binder_inner_proc_lock(proc);
        proc->max_threads = max_threads;
        binder_inner_proc_unlock(proc);
        break;
    }

proc->max_threads 设置的时候有的是1 有的是10不等,具体这个值要设置多少呢? 这个值有什么意义呢?
在驱动的binder_read读取完成后有如下:


done:

    *consumed = ptr - buffer;
    binder_inner_proc_lock(proc);
    if (proc->requested_threads == 0 &&
        list_empty(&thread->proc->waiting_threads) &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        binder_inner_proc_unlock(proc);
        binder_debug(BINDER_DEBUG_THREADS,
                 "%d:%d BR_SPAWN_LOOPER\n",
                 proc->pid, thread->pid);
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
        binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
    } else
        binder_inner_proc_unlock(proc);
    return 0;

我们看到条件是
if (proc->requested_threads == 0 &&
list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
proc->requested_threads 要么是0要么是1,默认是0。&thread->proc->waiting_threads也就是当前waiting_threads为空,没有在等待状态,都在忙碌,proc->requested_threads_started小于我们设定的值。从判断条件看,如果线程都在忙的话,说明线程忙不过来,那么我们就要起一个其他线程来帮我们处理了。也就是下面的BR_SPAWN_LOOPER 命令给用户层,调用下面的函数,起线程代码如下:

void ProcessState::spawnPooledThread(bool isMain)
{
    if (mThreadPoolStarted) {
        String8 name = makeBinderThreadName();
        ALOGV("Spawning new pooled thread, name=%s\n", name.string());
        sp<Thread> t = new PoolThread(isMain);
        t->run(name.string());
    }
}

我们在stats看到的

proc 2941
context hwbinder
  threads: 4
  requested threads: 0+3/9
  ready threads 4
  free async space 520192
  nodes: 1
  refs: 1 s 1 w 1
  buffers: 0
  pending transactions: 0
  BC_FREE_BUFFER: 682
  BC_INCREFS: 1
  BC_ACQUIRE: 1
  BC_INCREFS_DONE: 1
  BC_ACQUIRE_DONE: 1
  BC_REGISTER_LOOPER: 3
  BC_ENTER_LOOPER: 1
  BC_TRANSACTION_SG: 1
  BC_REPLY_SG: 25
  BR_TRANSACTION: 681
  BR_REPLY: 1
  BR_TRANSACTION_COMPLETE: 26
  BR_INCREFS: 1
  BR_ACQUIRE: 1
  BR_SPAWN_LOOPER: 3

requested threads: 0+3/9 说明我们当前启动了3个线程用来处理,最大可以启动9个线程

二 todo列表

在binder_thread和binder_proc中都有todo列表,这两个todo列表有什么区别和联系呢?

static bool binder_proc_transaction(struct binder_transaction *t,
                    struct binder_proc *proc,
                    struct binder_thread *thread)
    if (!thread && !target_list)
        thread = binder_select_thread_ilocked(proc);

    if (thread) {
        target_list = &thread->todo;
        binder_transaction_priority(thread->task, t, node_prio,
                        node->inherit_rt);
    } else if (!target_list) {
        target_list = &proc->todo;

如果有空闲thread就挂到空闲thread的todo中,如果没有的话就挂在process的todo中。

三 binder transact 驱动流程

binder_transaction.png

首先client BC_TRANSACTION给驱动的binder_thread_write

binder_thread_write将client的todo添加BINDER_WORK_TRANSACTION_COMPLETE
将server的todo添加BINDER_WORK_TRANSACTION.client阻塞在binder_thread_read

server被唤醒,通过binder_thread_read 读取BINDER_WORK_TRANSACTION,返回BR_TRANSACTION 给server

server 处理完成后,发送sendReply BC_REPLY 给drvier驱动,也是先写后读

binder_thread_write将server的todo添加BINDER_WORK_TRANSACTION_COMPLETE,然后将client的todo添加BINDER_WORK_TRANSACTION唤醒client,然后进入binder_thread_read

client被唤醒,执行BINDER_WORK_TRANSACTION_COMPLETE,返回BR_TRANSACTION_COMPLETE。执行BINDER_WORK_TRANSACTION,返回BR_REPLY

四 binder数据传输

binder_cmd.png

binder_transaction中cmd后面跟的是binder_transaction_data,他的结构体如下

struct binder_transaction_data {
    /* The first two are only used for bcTRANSACTION and brTRANSACTION,
     * identifying the target and contents of the transaction.
     */
    union {
        /* target descriptor of command transaction */
        __u32   handle;
        /* target descriptor of return transaction */
        binder_uintptr_t ptr;
    } target;
    binder_uintptr_t    cookie; /* target object cookie */
    __u32       code;       /* transaction command */

    /* General information about the transaction. */
    __u32           flags;
    pid_t       sender_pid;
    uid_t       sender_euid;
    binder_size_t   data_size;  /* number of bytes of data */
    binder_size_t   offsets_size;   /* number of bytes of offsets */

    /* If this transaction is inline, the data immediately
     * follows here; otherwise, it ends with a pointer to
     * the data buffer.
     */
    union {
        struct {
            /* transaction data */
            binder_uintptr_t    buffer;
            /* offsets from buffer to flat_binder_object structs */
            binder_uintptr_t    offsets;
        } ptr;
        __u8    buf[8];
    } data;
};

在传输过程中有两次copy,一次是将binder_transaction_data后面的buffercopy到binder_buffer下,还有


binder_transaction_data.png

offsets保存的是

struct flat_binder_object {
    struct binder_object_header hdr;
    __u32               flags;

    /* 8 bytes of data. */
    union {
        binder_uintptr_t    binder; /* local object */
        __u32           handle; /* remote object */
    };

    /* extra data associated with local object */
    binder_uintptr_t    cookie;
};
struct binder_object_header {
    __u32        type;
};

type的种类有如下:

enum {
    BINDER_TYPE_BINDER  = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
    BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
    BINDER_TYPE_HANDLE  = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
    BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
    BINDER_TYPE_FD      = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
    BINDER_TYPE_FDA     = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
    BINDER_TYPE_PTR     = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};

我们在writeStrongBinder中写入的就是flat_binder_object
在binder 被对方read的时候,直接指针赋值,没有copy,只是将binder_transaction_data copy到用户空间。

tr.data.ptr.buffer = (binder_uintptr_t)
            ((uintptr_t)t->buffer->data +
            binder_alloc_get_user_buffer_offset(&proc->alloc));

tr.data.ptr.offsets = tr.data.ptr.buffer +
            ALIGN(t->buffer->data_size,
                sizeof(void *));
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
    if (t_from)
        binder_thread_dec_tmpref(t_from);

    binder_cleanup_transaction(t, "copy_to_user failed",
                   BR_FAILED_REPLY);

    return -EFAULT;
}

也就是buffer在传输过程中,只进行了一次copy操作。
在buffer处理完成后执行 IPCThreadState::freeBuffer释放掉

void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data,
                                size_t /*dataSize*/,
                                const binder_size_t* /*objects*/,
                                size_t /*objectsSize*/, void* /*cookie*/)
{
    //ALOGI("Freeing parcel %p", &parcel);
    IF_LOG_COMMANDS() {
        alog << "Writing BC_FREE_BUFFER for " << data << endl;
    }
    ALOG_ASSERT(data != NULL, "Called with NULL data");
    if (parcel != NULL) parcel->closeFileDescriptors();
    IPCThreadState* state = self();
    state->mOut.writeInt32(BC_FREE_BUFFER);
    state->mOut.writePointer((uintptr_t)data);
}
上一篇下一篇

猜你喜欢

热点阅读