framework学习笔记

framework学习笔记9. 服务的查找过程(Java层)

2020-12-14  本文已影响0人  加个标志位

一 ServiceManager的查询:
1.1 在Activity的startActivity()时,会调用到
Instrumentation.ActivityResult ar = mInstrumentation.execStartActivity(
this, mMainThread.getApplicationThread(), mToken, this,
intent, requestCode, options); //一共有两次进程间通信;
==============>
Instrumentation中:
ActivityManagerNative.getDefault().startActivity(...);
==============>
ActivityManagerNative中:
ActivityManagerNative.getDefault();
==============>
ActivityManagerNative中:
static public IActivityManager getDefault() {
return gDefault.get();
}
===============>
ActivityManagerNative中:

private static final Singleton<IActivityManager> gDefault = new Singleton<IActivityManager>() {
        protected IActivityManager create() {
            IBinder b = ServiceManager.getService("activity");
            if (false) {
                Log.v("ActivityManager", "default service binder = " + b);
            }
            IActivityManager am = asInterface(b);
            if (false) {
                Log.v("ActivityManager", "default service = " + am);
            }
            return am;
        }
    };
}

==============>
最终:通过IBinder b = ServiceManager.getService("activity") 查找ActivityServiceManager
ServiceManager.getService(String name)方法里返回的是:
return getIServiceManager().getService(name);
【后面分析后的结论是:new ServiceManagerProxy(new BpBinder(0)) 中的 getService(name)】
下面就是ServiceManager类中的getIServiceManager()方法;

 private static IServiceManager getIServiceManager() {
        if (sServiceManager != null) {
            return sServiceManager;
        }

        // Find the service manager       BinderInternal.getContextObject() 得到的是new BpBinder(0)
        sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject()); //注释1
        return sServiceManager;    //注释2
    }

注释1:
BinderInternal.getContextObject()是一个native方法: public static final native IBinder getContextObject();
其native层的代码在frameworks\base\core\jni\android_util_Binder.cpp#android_os_BinderInternal_getContextObject()中

//frameworks\base\core\jni\android_util_Binder.cpp
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
    sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
    return javaObjectForIBinder(env, b);
}

ProcessState::self()->getContextObject(NULL)就是ProcessState中的getContextObject方法

//frameworks\native\libs\binder\ProcessState.cpp
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);  //注意这里传入的值为0
}

getStrongProxyForHandle(0)方法:

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle){
    sp<IBinder> result
    AutoMutex _l(mLock);
    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            if (handle == 0) {
                Parcel data;
                //ping一下,和之前的笔记中分析一样
                status_t status = IPCThreadState::self()->transact(
                        0, IBinder::PING_TRANSACTION, data, NULL, 0);
                if (status == DEAD_OBJECT)
                   return NULL;
            }

            b = new BpBinder(handle);  //在这里;
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }
    return result;
}

注释2:sServiceManager = ServiceManagerNative.asInterface(new BpBinder(0));
//我们这里的 new BpBinder(0) 并不是真正C++的BpBinder,为了理解才这么说的,其实在return javaObjectForIBinder(env, b)
//中已经将C++的BpBinder 转换成的了Java的Object;
最终得到的是new ServiceManagerProxy.asInterface(new BpBinder(0));
而且我们知道

 static public IServiceManager asInterface(IBinder obj)
    {
        if (obj == null) {
            return null;
        }
        IServiceManager in = (IServiceManager)obj.queryLocalInterface(descriptor);
        if (in != null) {
            return in;
        } 
        return new ServiceManagerProxy(obj);
    }

1.2 ServiceManagerNative的内部类ServiceManagerProxy 中的 getService(name):
mRemote即传入的new BpBinder(0) [java层的Object],是IBinder的实现类Binder.java中的代理类BinderProxy;

    public IBinder getService(String name) throws RemoteException {
        Parcel data = Parcel.obtain();
        Parcel reply = Parcel.obtain();
        data.writeInterfaceToken(IServiceManager.descriptor);
        data.writeString(name);
        //data:写入的数据;    reply:回写的数据;      cmd命令:GET_SERVICE_TRANSACTION
        mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);    //注释3
        IBinder binder = reply.readStrongBinder();
        reply.recycle();
        data.recycle();
        return binder;
    }

BinderProxy中的transact()
code是 cmd命令:GET_SERVICE_TRANSACTION

public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
        Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
        return transactNative(code, data, reply, flags);
    }

    public native boolean transactNative(int code, Parcel data, Parcel reply, int flags) throws RemoteException;

public native boolean transactNative():android_util_Binder.cpp中android_os_BinderProxy_transact

static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
        jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException{

    Parcel* data = parcelForJavaObject(env, dataObj);
    Parcel* reply = parcelForJavaObject(env, replyObj);
    // 根据上面的 new BpBinder(0) [java层的Object] 转化成BinderProxy时会有一个long mObject保存BpBinder(0)
    IBinder* target = (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject);  
    // 所以这里就是BpBinder.cpp类中的transact()
    status_t err = target->transact(code, *data, reply, flags);
}

BpBinder::transact()方法:

status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        //到这里就是IPCThreadState::self()->transact() 就是和笔记5中的流程一致了;
        //唯一不同的就是cmd命令是 GET_SERVICE_TRANSACTION
        status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }
    return DEAD_OBJECT;
}

二 服务端返回结果分析
首先服务端是如何读到数据的:客户端在IPCThreadState::self()->transact()后,Binder驱动会向客户端发送一条BINDER_WORK_TRANSACTION_COMPLETE,使其进入等待状态,向ServiceManager发送一条BINDER_WORK_TRANSACTION命令,唤醒SM并binder_thread_read()方法中读数据。

    // 读数据并将 cmd = BR_TRANSACTION
    static int binder_thread_read(struct binder_proc *proc,
                                  struct binder_thread *thread,
                                  binder_uintptr_t binder_buffer,
                                  size_t size,
                                  binder_size_t *consumed,
                                  int non_block)
    {
        ...
            while (1) {
            
            uint32_t cmd;
            struct binder_transaction_data tr;
            struct binder_work *w;
            struct binder_transaction *t = NULL;
            ...
            //只有BINDER_WORK_TRANSACTION命令,即t不为空才能继续往下执行
            if (!t)
                continue;
            
            //判断事物t中是否有目标进程的Binder实体
            if (t->buffer->target_node) {
                struct binder_node *target_node = t->buffer->target_node;
                tr.target.ptr = target_node->ptr;
                tr.cookie =  target_node->cookie;
                t->saved_priority = task_nice(current);
                ...
                cmd = BR_TRANSACTION;  //设置命令为BR_TRANSACTION
            } else {
                tr.target.ptr = NULL;
                tr.cookie = NULL;
                cmd = BR_REPLY; //设置命令为BR_REPLY
            }
            //给刚刚定义的事务信息体设置成员的值
            tr.code = t->code;
            tr.flags = t->flags;
            tr.sender_euid = t->sender_euid;
            ...
     
        ...
    }

//a. service_manager.c 中回调方法svcmgr_handler中根据命令执行:
//GET_SERVICE_TRANSACTION在binder驱动中 ===》SVC_MGR_GET_SERVICE
 switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        bio_put_ref(reply, handle);  //见b
        return 0;
}

//b. Binder.c 中bio_put_ref()回写的数据
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
    struct flat_binder_object *obj;  //注释3中会用到

    if (handle)
        obj = bio_alloc_obj(bio);
    else
        obj = bio_alloc(bio, sizeof(*obj));

    if (!obj)
        return;

    obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    obj->type = BINDER_TYPE_HANDLE;  //注意这条命令,注释3中会用到
    obj->handle = handle;
    obj->cookie = 0;
}

//c. 回写数据在binder_looper()死循环中的binder_parse()中:binder_parse()
binder_parse(...){
//...
 case BR_TRANSACTION: {    //在笔记8中,sm 接收到binder驱动发送的BR_TRANSACTION
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;  //初始值为0,在d中有分析

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);
                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);  //见d
            }
            ptr += sizeof(*txn);
            break;
        }
//...

//d. 
void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status){
    struct {
        uint32_t cmd_free;
        binder_uintptr_t buffer;
        uint32_t cmd_reply;
        struct binder_transaction_data txn;
    } __attribute__((packed)) data;
    // 释放资源命令
    data.cmd_free = BC_FREE_BUFFER;
    data.buffer = buffer_to_free;
    //回复命令
    data.cmd_reply = BC_REPLY;
    data.txn.target.ptr = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {
      //...
    } else { //走这条分支
        //把回复的数据写入data.txn,
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
        data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
    }
    //
    binder_write(bs, &data, sizeof(data));
}

//e进入binder驱动
int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);  //binder_ioctl()
    return res;
}

2.1 ioctl(bs->fd, BINDER_WRITE_READ, &bwr)
===》BINDER_WRITE_READ:binder_ioctl_write_read();
===》bwr.write_size > 0:binder_thread_write();//注释2

客户端如何进入等待的?===》笔记8中注释5
驱动层的接收到的客户端数据是如何进行回复的?

注释2:binder_thread_write();

static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed){

    switch (cmd) {
        // 第一条命令:释放内存资源
    case BC_FREE_BUFFER: {
        
        }
    case BC_TRANSACTION:
        // 第二条命令:回复命令
    case BC_REPLY: {
        struct binder_transaction_data tr;
        if (copy_from_user(&tr, ptr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);
        binder_transaction(proc, thread, &tr, cmd == BC_REPLY);  //注释3
        break;
        }
    return 0;
}

注释3:binder_transaction(proc, thread, &tr, cmd == BC_REPLY);

static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply){
    struct binder_transaction *in_reply_to = NULL;

    if (reply) {
                // 获取 发送请求源头的 binder_transaction 
        in_reply_to = thread->transaction_stack;
        if (in_reply_to == NULL) {
            //...此处肯定不为空
        }
        binder_set_nice(in_reply_to->saved_priority);
            if (in_reply_to->to_thread != thread) {
                        //这里肯定要和源头的 thread 一致
            binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
            //...
        }
        thread->transaction_stack = in_reply_to->to_parent;
                // 获取源头的thread;
        target_thread = in_reply_to->from;
        if (target_thread == NULL) {  //不为空
                        //...省略部分代码
        }
        if (target_thread->transaction_stack != in_reply_to) {
            //...省略部分代码
        }
                // 获取源头(客户端)的proc
        target_proc = target_thread->proc;
    } else {
        //...省略部分代码
    }
        //...省略部分代码
    if (target_thread) {  //拿到pid、todo队列、wait队列
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }
    e->to_proc = target_proc->pid;

        //分配binder_transaction结构体
    t = kzalloc(sizeof(*t), GFP_KERNEL); 
    binder_stats_created(BINDER_STAT_TRANSACTION);
        // 分配了binder_work结构体
    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

        // 加载内存(客户端)
        t->buffer = binder_alloc_buf(target_proc, tr->data_size,tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
        // 数据缓冲区拷贝到data中
    if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size)) {
             //...
    }
        // 偏移数组拷贝到data中,偏移数组位于数据缓冲区之后
    if (copy_from_user(offp, (const void __user *)(uintptr_t)tr->data.ptr.offsets, tr->offsets_size)) {
            //...
    }
    off_end = (void *)offp + tr->offsets_size;
        // 唤醒自己 和 目标的队列
        t->work.type = BINDER_WORK_TRANSACTION; // 向客户端写入BINDER_WORK_TRANSACTION
    list_add_tail(&t->work.entry, target_list);
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;  // 向SM中写入
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
}

小结:
服务端回复客户端数据:因为客户端在请求的时候就记录了 从哪里来,也就是记录了客户端的binder_proc对象;这里的getService()返回给客户端的是 flat_binder_object。
客户端进入等待:请求服务端的时候,驱动层会写入一个BINDER_WORK_TRANSACTION_COMPLETE命令,然后清空writeData,重新进入驱动层并进入读方法,此时todo队列中没有数据,从而等待服务端执行完毕唤醒客户端;

上一篇下一篇

猜你喜欢

热点阅读