Android-Binder是如何启动和获取service_ma

2020-12-19  本文已影响0人  zzq_nene
image.png

一、启动service_manager服务

ServiceManager也是一个服务,其handle==0。
ServiceManager其实就是用来获取AMS、PMS等服务的Binder对象,然后就可以通过AMS等服务的IBinder对象与对应的AMS进行跨进程通信。AMS等服务在启动的时候就会把其IBinder对象注册到ServiceManager中,然后客户端找到ServiceManager,然后通过ServiceManager找到AMS的IBinder返回给客户端,然后客户端就可以通过该AMS的IBinder与AMS进行跨进程通信。
就是在system/core/rootdir/init.rc中,有
service servicemanager /system/bin/servicemanager这样的命令,执行这个命令之后,就会进行native/cmds/servicemanager/service_manager.c中的main方法

service servicemanager /system/bin/servicemanager
    class core
    user system
    group system
    critical
    onrestart restart healthd
    onrestart restart zygote
    onrestart restart media
    onrestart restart surfaceflinger
    onrestart restart drm

1.native/cmds/servicemanager/service_manager.c中的main方法

这里完成service_manager(SM)的注册:主要分三步(这是流程)

  • 创建binder_node结构体对象
  • 将binder_node添加到proc->nodes中
  • 创建work和todo队列,保存客户端和服务端需要处理的消息
  • 向thread->looper写入状态为ENTER
  • 读取数据,修改thread->looper状态,并且调用ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));使SM进入阻塞等待状态
int main(int argc, char **argv)
{
    struct binder_state *bs;
    // 这个binder_open是native层的binder_open,而不是驱动层的binder_open
    // 这里其实就是调用了native/cmds/servicemanager/binder.c的binder_open方法
    // 在native层binder.c的binder_open方法中会调用open方法,这个open方法就是调用了kernel层binder.c的binder_open方法
    // 然后在native层binder.c的binder_open方法中还会调用mmap方法,这个mmap方法就是调用了kernel层binder.c的binder_mmap方法
    // sm的内存大小是128K
    // 这里主要就是做两件事:1.打开驱动;2.映射SM和Binder驱动的内存地址
    bs = binder_open(128*1024);
    if (!bs) {
        ALOGE("failed to open binder driver\n");
        return -1;
    }
    // 把service_manager设置为大管家(大管家就是为了管理系统服务的,其实就是守护进程)
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    selinux_enabled = is_selinux_enabled();
    sehandle = selinux_android_service_context_handle();
    selinux_status_open(true);

    if (selinux_enabled > 0) {
        if (sehandle == NULL) {
            ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
            abort();
        }

        if (getcon(&service_manager_context) != 0) {
            ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
            abort();
        }
    }

    union selinux_callback cb;
    cb.func_audit = audit_callback;
    selinux_set_callback(SELINUX_CB_AUDIT, cb);
    cb.func_log = selinux_log_callback;
    selinux_set_callback(SELINUX_CB_LOG, cb);

    // 进入无限循环。不断监听有没有人访问ServiceManager
    // 轮询处理数据
    binder_loop(bs, svcmgr_handler);

    return 0;
}

2.native/cmds/servicemanager/binder.c#binder_open

在native/cmds/servicemanager/service_manager.c中的main方法中调用了binder_open,进入该方法。

struct binder_state *binder_open(size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));
    if (!bs) {
        errno = ENOMEM;
        return NULL;
    }

    // 调用驱动,这里开启的是binder驱动。
    // 这里调用open,实际上就是根据native与kernel层的方法关系,调用到了驱动层的binder.c的binder_open方法
    bs->fd = open("/dev/binder", O_RDWR);
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open device (%s)\n",
                strerror(errno));
        goto fail_open;
    }

    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr,
                "binder: kernel driver version (%d) differs from user space version (%d)\n",
                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
        goto fail_open;
    }

    bs->mapsize = mapsize;
    // 这里就是把SM的虚拟内存和Binder驱动的虚拟内存做地址映射
    // 这里调用mmap,实际上就是根据native与kernel层的方法关系,调用到了驱动层的binder.c的binder_mmap方法
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }

    return bs;

fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return NULL;
}

3.native/cmds/servicemanager/binder.c#binder_become_context_manager

在native/cmds/servicemanager/service_manager.c中的main方法中调用了binder_become_context_manager方法进入,在这里其实就是直接调用了ioctl,而这里是native的ioctl方法,native层的ioctl方法与kernel(驱动层)的binder_ioctl方法映射,就直接调用的是驱动层的binder_ioctl方法,这里传入了命令是BINDER_SET_CONTEXT_MGR,这样在binder_ioctl就会处理BINDER_SET_CONTEXT_MGR分支的内容

int binder_become_context_manager(struct binder_state *bs)
{
    // 直接调用了ioctl,命令是BINDER_SET_CONTEXT_MGR
    // 其实就是调用了kernel下的binder.c的binder_ioctl
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

在这里我们看下驱动层的binder_ioctl方法:因为这里是从native层binder.c的ioctl函数,映射调用到了kernel层的binder.c的binder_ioctl函数

/**
 * 主要是读写操作的
 * 
 */ 
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    /*pr_info("binder_ioctl: %d:%d %x %lx\n",
            proc->pid, current->pid, cmd, arg);*/

    trace_binder_ioctl(cmd, arg);

    // 这里是一个挂起中断,正常情况下是不会中断的
    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

    binder_lock(__func__);
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ:
        // 读写命令 --- 读写操作的时候,由应用层ioctl(BINDER_WRITE_READ)调用
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);
        if (ret)
            goto err;
        break;
    case BINDER_SET_MAX_THREADS:
        if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
            ret = -EINVAL;
            goto err;
        }
        break;
    case BINDER_SET_CONTEXT_MGR:
        ret = binder_ioctl_set_ctx_mgr(filp);
        if (ret)
            goto err;
        break;
    case BINDER_THREAD_EXIT:
        binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
                 proc->pid, thread->pid);
        binder_free_thread(proc, thread);
        thread = NULL;
        break;
    case BINDER_VERSION: {
        struct binder_version __user *ver = ubuf;

        if (size != sizeof(struct binder_version)) {
            ret = -EINVAL;
            goto err;
        }
        if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                 &ver->protocol_version)) {
            ret = -EINVAL;
            goto err;
        }
        break;
    }
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    binder_unlock(__func__);
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -ERESTARTSYS)
        pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}

可以看到BINDER_SET_CONTEXT_MGR命令,主要就是调用了binder_ioctl_set_ctx_mgr方法
看kernel/drivers/staging/android/binder.c#binder_ioctl_set_ctx_mgr方法:
在这里就会创建了service_manager的实体,binder_new_node就是代表了service_manager服务

/**
 * 只创建一次
 */ 
static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    struct binder_context *context = proc->context;

    kuid_t curr_euid = current_euid();

    // 判断是否存在,如果存在,则直接返回
    if (context->binder_context_mgr_node) {
        pr_err("BINDER_SET_CONTEXT_MGR already set\n");
        ret = -EBUSY;
        goto out;
    }
    ret = security_binder_set_context_mgr(proc->tsk);
    if (ret < 0)
        goto out;
    // 这里第一次SM的uid是无效的
    if (uid_valid(context->binder_context_mgr_uid)) {
        if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
            pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
                   from_kuid(&init_user_ns, curr_euid),
                   from_kuid(&init_user_ns,
                     context->binder_context_mgr_uid));
            ret = -EPERM;
            goto out;
        }
    } else {
        // 设置uid
        context->binder_context_mgr_uid = curr_euid;
    }
    // 创建sm的实体,这里的binder_new_node创建的node其实就是代表了service_manager
    // 会添加到proc->nodes中
    // 并且会对这个node做初始化
    // 不过最先会对这个node分配虚拟内存
    // context中的binder_context_mgr_node就是代表了SM的实体
    context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
    if (!context->binder_context_mgr_node) {
        ret = -ENOMEM;
        goto out;
    }
    context->binder_context_mgr_node->local_weak_refs++;
    context->binder_context_mgr_node->local_strong_refs++;
    context->binder_context_mgr_node->has_strong_ref = 1;
    context->binder_context_mgr_node->has_weak_ref = 1;
out:
    return ret;
}

在这里会创建了binder_node,这个binder_node其实就是SM实体,通过调用kernel/drivers/staging/android/binder.c#binder_new_node方法

/**
 * 1.创建了binder_node结构体对象
 * 2.proc --> binder_node
 * 3.创建work和todo消息队列,分别由客户端和服务端进行不断的读取消息,
 * 类似于Handler中的MessageQueue
 */ 
static struct binder_node *binder_new_node(struct binder_proc *proc,
                       binder_uintptr_t ptr,
                       binder_uintptr_t cookie)
{
    struct rb_node **p = &proc->nodes.rb_node;
    struct rb_node *parent = NULL;
    struct binder_node *node;

    while (*p) {
        parent = *p;
        node = rb_entry(parent, struct binder_node, rb_node);

        if (ptr < node->ptr)
            p = &(*p)->rb_left;
        else if (ptr > node->ptr)
            p = &(*p)->rb_right;
        else
            return NULL;
    }
    // 分配内存空间
    node = kzalloc(sizeof(*node), GFP_KERNEL);
    if (node == NULL)
        return NULL;
    binder_stats_created(BINDER_STAT_NODE);
    rb_link_node(&node->rb_node, parent, p);
    // 添加到proc->nodes中
    rb_insert_color(&node->rb_node, &proc->nodes);
    // 进行node初始化
    node->debug_id = ++binder_last_id;
    node->proc = proc;
    node->ptr = ptr;
    node->cookie = cookie;
    node->work.type = BINDER_WORK_NODE;
    // work和todo,这是客户端和服务端用来处理的消息队列
    // 因为存在客户端和服务端,所以需要两个来不断的读取消息的消息队列
    INIT_LIST_HEAD(&node->work.entry);
    INIT_LIST_HEAD(&node->async_todo);
    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
             "%d:%d node %d u%016llx c%016llx created\n",
             proc->pid, current->pid, node->debug_id,
             (u64)node->ptr, (u64)node->cookie);
    return node;
}

4.native/cmds/servicemanager/binder.c#binder_loop

在native层的service_manager.c的main方法,创建驱动对象,并且调用binder_ioctl创建了service_manager服务之后,就会在native层的service_manager.c的main方法中调用binder_loop函数
最初始的时候,binder_loop方法,主要是做两个工作:

/**
 * 循环处理数据的
 */ 
void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    // binder_write_read是在kernel层中的binder.h中定义的
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    // 进入BC_ENTER_LOOPER命令,进入循环
    readbuf[0] = BC_ENTER_LOOPER;
    // 将readbuf写入
    // 这里是处理write
    // 写入BC_ENTER_LOOPER命令,根据该命令设置thread->looper的状态
    binder_write(bs, readbuf, sizeof(uint32_t));

    // 无限死循环不断读取数据
    for (;;) {
        // 在进入for循环的时候,write_size最初是为0的
        // 而read_size是不为0
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        // 又是通过BINDER_WRITE_READ命令进行读写,进入kernel层的binder_ioctl方法
        // 这里是处理read
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

binder_write_read结构体

/*
 * On 64-bit platforms where user code may run in 32-bits the driver must
 * translate the buffer (and local binder) addresses appropriately.
 */

struct binder_write_read {
    binder_size_t       write_size; /* bytes to write */
    binder_size_t       write_consumed; /* bytes consumed by driver */
    binder_uintptr_t    write_buffer;
    binder_size_t       read_size;  /* bytes to read */
    binder_size_t       read_consumed;  /* bytes consumed by driver */
    binder_uintptr_t    read_buffer;
};
(1)写入BC_ENTER_LOOPER命令

在native/cmds/servicemanager/binder.c#binder_loop中调用binder_write写入readbuf数据,而readbuf中是只有一个命令数据。binder_write写入BC_ENTER_LOOPER命令的时候,就是将该命令数据保存在binder_write_read实体对象中的write_buffer中,然后调用驱动层的binder_ioctl函数,执行BINDER_WRITE_READ分支的功能,此时因为binder_write_read实体的write_buffer中有数据,所以会执行binder_ioctl函数的write功能。

/**
 * 写数据
 */ 
int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;
    // 因为是写数据,所以write_size不为0
    bwr.write_size = len;
    bwr.write_consumed = 0;
    // 这里的data其实就是readbuf命令数据,将命令数据保存在binder_write_read
    // 这个结构体的对象的write_buffer中
    bwr.write_buffer = (uintptr_t) data;
    // 写数据,所以read部分的信息都是0
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    // 初始化binder_write_read之后,调用ioctl执行BINDER_WRITE_READ功能
    // 这里就是调用了kernel层的binder_ioctl,触发调用BINDER_WRITE_READ部分功能
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

在在native/cmds/servicemanager/binder.c#binder_write中调用了ioctl写入进入循环的命令,接着就会调用kernel层的binder.c#binder_ioctl方法,而触发的是BINDER_WRITE_READ分支,所以会最终调用了binder_ioctl_write_read方法

/**
 * binder_ioctl
 * 读写操作
 * 
 */ 
static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    // 这里copy的不是有效数据,copy的是数据头。但是也是将用户空间的数据拷贝到内核空间
    // 这里拷贝的依然是数据头,即readbuf[0] = BC_ENTER_LOOPER;这个数据的数据头
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d write %lld at %016llx, read %lld at %016llx\n",
             proc->pid, thread->pid,
             (u64)bwr.write_size, (u64)bwr.write_buffer,
             (u64)bwr.read_size, (u64)bwr.read_buffer);
    // 判断写入数据是否有
    // 这里的bwr其实就是在native/cmds/servicemanager/binder.c#binder_write
    // 方法中初始化的binder_write_read结构体对象bwr
    // 这里的bwr.buffer其实就是readbuf[0]=BC_ENTER_LOOPER
    // write_size是大于0的
    if (bwr.write_size > 0) {
        // 这里就会处理命令,这里的命令是BC_ENTER_LOOPER
        // 进入binder_thread_write方法中进行处理该命令
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) {
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    // 判断读的数据是否有
    if (bwr.read_size > 0) {
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait);
        if (ret < 0) {
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
             proc->pid, thread->pid,
             (u64)bwr.write_consumed, (u64)bwr.write_size,
             (u64)bwr.read_consumed, (u64)bwr.read_size);
    // 把内核空间的数据拷贝到用户空间
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

因为在binder_loop流程中,首先是需要写入BC_ENTER_LOOPER命令,所以binder_write_read实体的write_buffer有值,并且write_size>0,所以在驱动层binder.c的binder_ioctl_write_read函数中,会调用bwr.write_size>0的if条件,即调用到了binder_thread_write函数
在kernel/drivers/staging/android/binder.c中的binder_thread_write方法中进行处理BC_ENTER_LOOPER命令,主要处理代码如下:
这里其实就是设置了thread->looper的状态

        case BC_ENTER_LOOPER:
            binder_debug(BINDER_DEBUG_THREADS,
                     "%d:%d BC_ENTER_LOOPER\n",
                     proc->pid, thread->pid);
            if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
                thread->looper |= BINDER_LOOPER_STATE_INVALID;
                binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
                    proc->pid, thread->pid);
            }
            thread->looper |= BINDER_LOOPER_STATE_ENTERED;
            break;
(2)处理BC_ENTER_LOOPER命令

写入操作完成之后,在native/cmds/servicemanager/binder.c#binder_loop方法中的for循环,会循环调用ioctl,这里就是对binder_write_read的read数据做初始化,去读取BC_ENTER_LOOPER命令数据,然后调用ioctl的BINDER_WRITE_READ指令操作,这里因为对binder_write_read的read数据做了初始,read_size>0,而write_size不是大于0,所以会调用到kernel/drivers/staging/android/binder.c中的binder_ioctl,最终根据BINDER_WRITE_READ指令调用到了binder_ioctl_write_read方法,又因为read_size>0,所以最终调用到了binder_thread_read方法

/**
 * binder_ioctl
 * 读写操作
 * 
 */ 
static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    // 这里copy的不是有效数据,copy的是数据头。但是也是将用户空间的数据拷贝到内核空间
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d write %lld at %016llx, read %lld at %016llx\n",
             proc->pid, thread->pid,
             (u64)bwr.write_size, (u64)bwr.write_buffer,
             (u64)bwr.read_size, (u64)bwr.read_buffer);
    // 判断写入数据是否有
    if (bwr.write_size > 0) {
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) {
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    // 判断读的数据是否有,在native/cmds/servicemanager/binder.c中的binder_loop方法中
    // for循环的read数据不为0,read_size>0,调用kernel层的binder_ioctl
    // 进而根据BINDER_WRITE_READ调用此方法,根据read_size>0调用这里
    if (bwr.read_size > 0) {
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait);
        if (ret < 0) {
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
             proc->pid, thread->pid,
             (u64)bwr.write_consumed, (u64)bwr.write_size,
             (u64)bwr.read_consumed, (u64)bwr.read_size);
    // 把内核空间的数据拷贝到用户空间
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

接着调用binder_thread_read方法,进入处理BC_ENTER_LOOPER命令的过程,让SM进入等待状态,进行阻塞,等待消息的进入。
在执行到ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));的时候,service_manager服务就进入阻塞,等待消息的进入。

/**
 * 这里wait_for_proc_work=true
 * 而会调用ret = wait_event_freezable_exclusive方法,SM进入阻塞状态
 */ 
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    // 这里的buffer其实就是bwr中的read_buffer
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;

    // 这里的read_consumed是为0的
    if (*consumed == 0) {
        // 如果是从native层的binder.c中的binder_loop方法调用binder_ioctl
        // 这里的ptr就是在binder_loop调用ioctl传入的bwr
        // ptr中其实是有很多个命令的,以先入先出的原则,先进入的命令先处理
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
    }

retry:
    // wait_for_proc_work这里是为true
    // 因为两个条件都满足为null
    wait_for_proc_work = thread->transaction_stack == NULL &&
                list_empty(&thread->todo);

    if (thread->return_error != BR_OK && ptr < end) {
        if (thread->return_error2 != BR_OK) {
            if (put_user(thread->return_error2, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            binder_stat_br(proc, thread, thread->return_error2);
            if (ptr == end)
                goto done;
            thread->return_error2 = BR_OK;
        }
        if (put_user(thread->return_error, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        binder_stat_br(proc, thread, thread->return_error);
        thread->return_error = BR_OK;
        goto done;
    }


    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    // 这里进行已经准备好的线程++,后面就会--
    // 这里++是告诉binder已经service_manager已经准备好
    // 已经在等待消息的进入,而如果SM在处理消息,则会ready_threads--,表示正在处理消息
    if (wait_for_proc_work)
        proc->ready_threads++;

    binder_unlock(__func__);

    trace_binder_wait_for_work(wait_for_proc_work,
                   !!thread->transaction_stack,
                   !list_empty(&thread->todo));
    if (wait_for_proc_work) {
        if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
                    BINDER_LOOPER_STATE_ENTERED))) {
            binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
                proc->pid, thread->pid, thread->looper);
            wait_event_interruptible(binder_user_error_wait,
                         binder_stop_on_user_error < 2);
        }
        binder_set_nice(proc->default_priority);
        // sm进入的时候,这里是阻塞的,所以non_block是false
        if (non_block) {
            if (!binder_has_proc_work(proc, thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));// 这里就是进行一个等待,因为没有数据
    } else {
        if (non_block) {
            if (!binder_has_thread_work(thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
    }

    binder_lock(__func__);

    if (wait_for_proc_work)
        proc->ready_threads--;
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

    if (ret)
        return ret;

    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        if (!list_empty(&thread->todo)) {
            w = list_first_entry(&thread->todo, struct binder_work,
                         entry);
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
            w = list_first_entry(&proc->todo, struct binder_work,
                         entry);
        } else {
            /* no data added */
            if (ptr - buffer == 4 &&
                !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
                goto retry;
            break;
        }

        if (end - ptr < sizeof(tr) + 4)
            break;

        switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            t = container_of(w, struct binder_transaction, work);
        } break;
        case BINDER_WORK_TRANSACTION_COMPLETE: {
            cmd = BR_TRANSACTION_COMPLETE;
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);

            binder_stat_br(proc, thread, cmd);
            binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
                     "%d:%d BR_TRANSACTION_COMPLETE\n",
                     proc->pid, thread->pid);

            list_del(&w->entry);
            kfree(w);
            binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
        } break;
        case BINDER_WORK_NODE: {
            struct binder_node *node = container_of(w, struct binder_node, work);
            uint32_t cmd = BR_NOOP;
            const char *cmd_name;
            int strong = node->internal_strong_refs || node->local_strong_refs;
            int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;

            if (weak && !node->has_weak_ref) {
                cmd = BR_INCREFS;
                cmd_name = "BR_INCREFS";
                node->has_weak_ref = 1;
                node->pending_weak_ref = 1;
                node->local_weak_refs++;
            } else if (strong && !node->has_strong_ref) {
                cmd = BR_ACQUIRE;
                cmd_name = "BR_ACQUIRE";
                node->has_strong_ref = 1;
                node->pending_strong_ref = 1;
                node->local_strong_refs++;
            } else if (!strong && node->has_strong_ref) {
                cmd = BR_RELEASE;
                cmd_name = "BR_RELEASE";
                node->has_strong_ref = 0;
            } else if (!weak && node->has_weak_ref) {
                cmd = BR_DECREFS;
                cmd_name = "BR_DECREFS";
                node->has_weak_ref = 0;
            }
            if (cmd != BR_NOOP) {
                if (put_user(cmd, (uint32_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(uint32_t);
                if (put_user(node->ptr,
                         (binder_uintptr_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(binder_uintptr_t);
                if (put_user(node->cookie,
                         (binder_uintptr_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(binder_uintptr_t);

                binder_stat_br(proc, thread, cmd);
                binder_debug(BINDER_DEBUG_USER_REFS,
                         "%d:%d %s %d u%016llx c%016llx\n",
                         proc->pid, thread->pid, cmd_name,
                         node->debug_id,
                         (u64)node->ptr, (u64)node->cookie);
            } else {
                list_del_init(&w->entry);
                if (!weak && !strong) {
                    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "%d:%d node %d u%016llx c%016llx deleted\n",
                             proc->pid, thread->pid,
                             node->debug_id,
                             (u64)node->ptr,
                             (u64)node->cookie);
                    rb_erase(&node->rb_node, &proc->nodes);
                    kfree(node);
                    binder_stats_deleted(BINDER_STAT_NODE);
                } else {
                    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "%d:%d node %d u%016llx c%016llx state unchanged\n",
                             proc->pid, thread->pid,
                             node->debug_id,
                             (u64)node->ptr,
                             (u64)node->cookie);
                }
            }
        } break;
        case BINDER_WORK_DEAD_BINDER:
        case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
        case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
            struct binder_ref_death *death;
            uint32_t cmd;

            death = container_of(w, struct binder_ref_death, work);
            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
                cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
            else
                cmd = BR_DEAD_BINDER;
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (put_user(death->cookie,
                     (binder_uintptr_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(binder_uintptr_t);
            binder_stat_br(proc, thread, cmd);
            binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
                     "%d:%d %s %016llx\n",
                      proc->pid, thread->pid,
                      cmd == BR_DEAD_BINDER ?
                      "BR_DEAD_BINDER" :
                      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
                      (u64)death->cookie);

            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
                list_del(&w->entry);
                kfree(death);
                binder_stats_deleted(BINDER_STAT_DEATH);
            } else
                list_move(&w->entry, &proc->delivered_death);
            if (cmd == BR_DEAD_BINDER)
                goto done; /* DEAD_BINDER notifications can cause transactions */
        } break;
        }

        if (!t)
            continue;

        BUG_ON(t->buffer == NULL);
        if (t->buffer->target_node) {
            struct binder_node *target_node = t->buffer->target_node;

            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            cmd = BR_TRANSACTION;
        } else {
            tr.target.ptr = 0;
            tr.cookie = 0;
            cmd = BR_REPLY;
        }
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

        if (t->from) {
            struct task_struct *sender = t->from->proc->tsk;

            tr.sender_pid = task_tgid_nr_ns(sender,
                            task_active_pid_ns(current));
        } else {
            tr.sender_pid = 0;
        }

        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (binder_uintptr_t)(
                    (uintptr_t)t->buffer->data +
                    proc->user_buffer_offset);
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));

        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);

        trace_binder_transaction_received(t);
        binder_stat_br(proc, thread, cmd);
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
                 proc->pid, thread->pid,
                 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
                 "BR_REPLY",
                 t->debug_id, t->from ? t->from->proc->pid : 0,
                 t->from ? t->from->pid : 0, cmd,
                 t->buffer->data_size, t->buffer->offsets_size,
                 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);

        list_del(&t->work.entry);
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;
        } else {
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats_deleted(BINDER_STAT_TRANSACTION);
        }
        break;
    }

done:

    *consumed = ptr - buffer;
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        binder_debug(BINDER_DEBUG_THREADS,
                 "%d:%d BR_SPAWN_LOOPER\n",
                 proc->pid, thread->pid);
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
        binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
    }
    return 0;
}

二、如何获取service_manager服务

主要是native层。native/cmds/libs/binder/下
IServiceManager.cpp中的defaultServiceManager方法
获取sm的情况:

1.frameworks/native/libs/binder/IServiceManager.cpp#defaultServiceManager()

这里主要分析defaultServiceManager中调用的三个部分:

sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
    
    {
        AutoMutex _l(gDefaultServiceManagerLock);
        // 这里的while循环,就是为了防止SM还没有注册,或者是正在注册但是未注册完成
        // 这样的情况下,进入循环的进入睡眠状态进行等待SM注册完成
        while (gDefaultServiceManager == NULL) {
            // ProcessState::self()->getContextObject(NULL)返回的其实就是BpBinder对象,
            // BpBinder其实就是执行服务端的
            // BBinder是服务端的对象,而BpBinder可以认为是BBinder的代理
            // 这里就可以把BpBinder认为是SM的代理,而BBinder认为是SM
            // 因为客户端没有办法拿到BBinder(sm),而客户端可以创建BpBinder
            // 可以认为BpBinder是BBinder的代理(sm的代理),客户端就可以通过BpBinder操作BBinder
            // 即通过sm代理跨进程远程操作sm进程
            gDefaultServiceManager = interface_cast<IServiceManager>(
                ProcessState::self()->getContextObject(NULL));
            // 如果没有获取到,则睡眠
            if (gDefaultServiceManager == NULL)
                sleep(1);
        }
    }
    
    return gDefaultServiceManager;
}

2.frameworks/native/libs/binder/ProcessState.cpp#self()

这里主要就是处理ProcessState,new了一个ProcessState对象。
而ProcessState::self()其实就是创建进程对象,然后通过ProcessState的getContextObject函数获取到对应的BpBinder对象,用BpBinder对象与服务端的BBinder进行通信。而在这里,就会创建对应的内存大小,这里其实就是native层对binder的内存大小限制,这里限制的大小就是1M-8K。就是10241024-(40962)
new ProcessState做的事情:

// 在IServiceManager.cpp中的defaultServiceManager中调用
sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != NULL) {
        return gProcess;
    }
    gProcess = new ProcessState;
    return gProcess;
}

看ProcessState在new的时候做了什么操作:

ProcessState::ProcessState()
    : mDriverFD(open_driver())// 打开设备(其实就是binder)
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)// 最大线程数 -- 15个
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {
        // XXX Ideally, there should be a specific define for whether we
        // have mmap (or whether we could possibly have the kernel module
        // availabla).
#if !defined(HAVE_WIN32_IPC)
        // mmap the binder, providing a chunk of virtual address space to receive transactions.
        // 这里是注册服务到SM中 -- ProcessState也是服务端
        // 这里就是设置ProcessState与binder驱动的内存映射
        // 通过mmap设置共享内存(binder和服务端之间的内存大小),大小是((1*1024*1024) - (4096 *2))=1M-8K
        // 这是普通服务的内存大小
        // #define BINDER_VM_SIZE ((1*1024*1024) - (4096 *2))
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
        if (mVMStart == MAP_FAILED) {
            // *sigh*
            ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
            close(mDriverFD);
            mDriverFD = -1;
        }
#else
        mDriverFD = -1;
#endif
    }

    LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened.  Terminating.");
}
(1)打开binder驱动 -- 设置最大线程数(默认最大线程数15)
// 打开设备(其实就是打开binder驱动)
static int open_driver()
{
    // 打开binder驱动,这里就是调用了kernel层的binder_open方法
    int fd = open("/dev/binder", O_RDWR);
    if (fd >= 0) {
        fcntl(fd, F_SETFD, FD_CLOEXEC);
        int vers = 0;
        status_t result = ioctl(fd, BINDER_VERSION, &vers);
        if (result == -1) {
            ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
            close(fd);
            fd = -1;
        }
        if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
            ALOGE("Binder driver protocol does not match user space protocol!");
            close(fd);
            fd = -1;
        }
        // 设置最大线程数 -- 15个
        size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
        // 这里就是将最大线程数写入,具体实现设置的功能
        result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
        if (result == -1) {
            ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
        }
    } else {
        ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
    }
    return fd;
}

3.frameworks/native/libs/binder/ProcessState.cpp#getContextObject

这里通过调用getStrongProxyForHandle方法,传入handle=0
getContextObject做的事情:

其实BpBinder就相当于service_manager的引用,是相对于服务端的BBinder,服务端的BBinder其实就是相当于service_manager的引用,而客户端的BpBinder就是相当于服务端BBinder的代理,客户端使用SM的时候,其实就是通过BpBinder来使用SM,而BpBinder使用其内部具体是依赖于服务端的BBinder来使用SM。

可以根据Java层的来看:

Java层操作ServiceManager,在低版本中使用类似AIDL的方式,而在高版本系统是直接使用AIDL的方式。而AIDL在编译完成之后,会有对应的Java代码,即对应的实现了android.os.IInterface接口的interface,在这个interface中会有一个内部抽象类Stub,而在Stub中会有一个Proxy(其实Proxy可以不需要在Stub中),这个Proxy就是interface的实现类。
比如ServiceManager相关的ServiceManagerProxy可以认为是AIDL中的Proxy,而在Proxy中会通过操作mRemote来进行跨进程,而mRemote就是一个IBinder,而mRemote的值其实在ServiceManagerProxy中就是BinderProxy,而BinderProxy就是代理了native层的BpBinder,而BpBinder其实就是客户端在native层的对于服务端BBinder的代理,服务端的BBinder其实就是SM(service_manager)
ServiceManagerProxy-->BinderProxy-->BpBinder

从native层来看:

native层操作ServiceManager是通过BpServiceManager操作BpBinder。
BpServiceManager-->BpBinder
操作ServiceManager,Java层和native层最终都是通过BpBinder来操作,最终都是由BpBinder进行transact操作,因为BpBinder其实是BBinder的代理,类似于上面的Proxy的方式,BpBinder会代理一个BBinder对象,通过BpBinder的transact之后调用到BBinder对象的transact之后,然后调用到BBinder的onTransact,这样就进行了跨进程,从客户端跨越到了服务端。
因为客户端是需要跨进程,客户端部分是没有真正的SM的,而服务端的SM,其实就是BBinder,客户端是通过BBinder的代理,即BpBinder来实现对SM的调用。

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);

    // 查找handle对应的资源线,获取handle_entry结构体对象
    handle_entry* e = lookupHandleLocked(handle);
    // 这个e不为null
    if (e != NULL) {
        // We need to create a new BpBinder if there isn't currently one, OR we
        // are unable to acquire a weak reference on this current one.  See comment
        // in getWeakProxyForHandle() for more info about this.
        // 获取handle_entry中的binder对象
        IBinder* b = e->binder;
        // 查看弱引用是否存在(当前是不存在的)
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            // handle为0
            if (handle == 0) {
                // Special case for context manager...
                // The context manager is the only object for which we create
                // a BpBinder proxy without already holding a reference.
                // Perform a dummy transaction to ensure the context manager
                // is registered before we create the first local reference
                // to it (which will occur when creating the BpBinder).
                // If a local reference is created for the BpBinder when the
                // context manager is not present, the driver will fail to
                // provide a reference to the context manager, but the
                // driver API does not return status.
                //
                // Note that this is not race-free if the context manager
                // dies while this code runs.
                //
                // TODO: add a driver API to wait for context manager, or
                // stop special casing handle 0 for context manager and add
                // a driver API to get a handle to the context manager with
                // proper reference counting.

                // 这里就是测试binder是否准备就绪
                // 通过PING命令测试
                Parcel data;
                status_t status = IPCThreadState::self()->transact(
                        0, IBinder::PING_TRANSACTION, data, NULL, 0);
                if (status == DEAD_OBJECT)
                   return NULL;
            }

            // 创建了BpBinder,里面调用incWeakHandle,会进行弱引用计数
            b = new BpBinder(handle); 
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            // This little bit of nastyness is to allow us to add a primary
            // reference to the remote proxy when this team doesn't have one
            // but another team is sending the handle to us.
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }
    // 返回BpBinder
    return result;
}

4.frameworks/native/include/binder/IInterface.h#interface_cast

interface_cast主要是:

这里使用的是一个模板方法,具体的模板实现就是在IInterface.h中定义的。而asInterface就是在IInterface.h通过#define宏定义进行展开

template<typename INTERFACE>
// 这里的obj其实就是BpBinder
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
    // 这里的INTERFACE可以认为是泛型,就是将IServiceManager替换INTERFACE
    return INTERFACE::asInterface(obj);
}

宏定义,定义了asInterface方法,而这里的就是需要把INTERFACE替换成IServiceManager

#define DECLARE_META_INTERFACE(INTERFACE)                               
    static const android::String16 descriptor;                          
    static android::sp<I##INTERFACE> asInterface(                       
            const android::sp<android::IBinder>& obj);                  
    virtual const android::String16& getInterfaceDescriptor() const;    
    I##INTERFACE();                                                     
    virtual ~I##INTERFACE();                                            


#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME)                       
    const android::String16 I##INTERFACE::descriptor(NAME);             
    const android::String16&                                            
            I##INTERFACE::getInterfaceDescriptor() const {              
        return I##INTERFACE::descriptor;                                
    }                                                                   
    android::sp<I##INTERFACE> I##INTERFACE::asInterface(                
            const android::sp<android::IBinder>& obj)                   
    {                                                                   
        android::sp<I##INTERFACE> intr;                                 
        if (obj != NULL) {                                              
            intr = static_cast<I##INTERFACE*>(                          
                obj->queryLocalInterface(                               
                        I##INTERFACE::descriptor).get());               
            if (intr == NULL) {                                        
                intr = new Bp##INTERFACE(obj);                          
            }                                                           
        }                                                               
        return intr;                                                    
    }                                                                   
    I##INTERFACE::I##INTERFACE() { }                                    
    I##INTERFACE::~I##INTERFACE() { }                                   

针对DECLARE_META_INTERFACE部分做替换展开
DECLARE_META_INTERFACE部分主要是声明asInterface和getInterfaceDescriptor方法的

static const android::String16 descriptor;
static android::sp< IServiceManager > asInterface(const
    android::sp<android::IBinder>& obj)
virtual const android::String16& getInterfaceDescriptor() const;
IServiceManager ();
virtual ~IServiceManager();

针对IMPLEMENT_META_INTERFACE部分做替换展开
这部分是asInterface和getInterfaceDescriptor方法的具体实现

const android::String16
IServiceManager::descriptor(“android.os.IServiceManager”);
const android::String16& IServiceManager::getInterfaceDescriptor() const
{
    return IServiceManager::descriptor;
}
// 这里的obj其实就是BpBinder
android::sp<IServiceManager> IServiceManager::asInterface(const
    android::sp<android::IBinder>& obj)
{
    android::sp<IServiceManager> intr;
    if(obj != NULL) {
        intr = static_cast<IServiceManager *>(
        obj->queryLocalInterface(IServiceManager::descriptor).get());
        if (intr == NULL) {
            // 等价于 new BpServiceManager(BpBinder)
            // BpServiceManager是在IServiceManager的内部类
            intr = new BpServiceManager(obj);
        }
    }
    return intr;
}
IServiceManager::IServiceManager () { }
IServiceManager::~ IServiceManager() { }

调用asInterface之后,其内部会new BpServiceManager,在new BpServiceManager的时候,会传入BpBinder对象。

    BpServiceManager(const sp<IBinder>& impl)
        : BpInterface<IServiceManager>(impl)
    {
    }
template<typename INTERFACE>
// 调用了BpRefBase,这里的remote其实就是BpBinder
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
    : BpRefBase(remote)
{
}

而BpInterface是实现了frameworks/native/libs/binder/Binder.cpp中的

BpRefBase::BpRefBase(const sp<IBinder>& o)
    : mRemote(o.get()), mRefs(NULL), mState(0)
{
    extendObjectLifetime(OBJECT_LIFETIME_WEAK);

    if (mRemote) {
        mRemote->incStrong(this);           // Removed on first IncStrong().
        mRefs = mRemote->createWeak(this);  // Held for our entire lifetime.
    }
}

这里的o其实还是BpBinder
而这里又有mRemote,说明BpServiceManager中也会有一个mRemote,这个mRemote是对BpBinder的封装,而调用mRemote.transact的时候,其实就是调用了BpBinder.transact,而BpBinder又是BBinder的代理,所以就是调用了BBinder.transact,这样就跨进程调用。

上一篇下一篇

猜你喜欢

热点阅读