Binder(二)
上一篇对MediaPlayer底层播放器创建做了具体的分析。我们知道,Binder是C/S结构,MediaPlayer.cpp相当于C,而MediaPlayerService则是S。MediaPlayerService运行在MediaServer的子进程中。我们先看它的启动过程,MediaServer定义在main_mediaserver.cpp文件中。
//由init进程启动
int main(int argc __unused, char** argv)
{
signal(SIGPIPE, SIG_IGN);
char value[PROPERTY_VALUE_MAX];
bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
pid_t childPid;
//主进程
if (doLog && (childPid = fork()) != 0) {
strcpy(argv[0], "media.log");
//单例创建ProcessState实例,同时打开Binder驱动
sp<ProcessState> proc(ProcessState::self());
MediaLogService::instantiate();
ProcessState::self()->startThreadPool();
for (;;) {
siginfo_t info;
//等子进程结束
int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
if (ret == EINTR) {
continue;
}
if (ret < 0) {
break;
}
......
}
} else {
//子进程
if (doLog) {
prctl(PR_SET_PDEATHSIG, SIGKILL);
setpgid(0, 0);
}
//单例创建ProcessState实例,同时打开Binder驱动
sp<ProcessState> proc(ProcessState::self());
//获取ServiceManger
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
//初始化各种媒体服务
AudioFlinger::instantiate();
//初始化MediaPlayerService
MediaPlayerService::instantiate();
CameraService::instantiate();
AudioPolicyService::instantiate();
SoundTriggerHwService::instantiate();
registerExtensions();
//新建子线程读写
ProcessState::self()->startThreadPool();
//启动线程读写
IPCThreadState::self()->joinThreadPool();
}
}
MediaServer是由init.rc进程启动的。MediaServer启动后fork了一个子进程,在子进程将各种多媒体进行注册,其中就包括MediaPlayerService。
每个进程只有一个ProcessState,显然self()是一个单例函数,ProcessState构建同时打开了Binder驱动。
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
//创建保存为全局变量
gProcess = new ProcessState;
return gProcess;
}
#define BINDER_VM_SIZE ((1*1024*1024) - (4096 *2))//默认映射大小为1M-8k
ProcessState::ProcessState()
: mDriverFD(open_driver())//打开Binder驱动,并将文件描述符保存在mDriverFD中
, mVMStart(MAP_FAILED)//映射地址初始化为MAP_FAILED
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
//Binder驱动文件描述符
if (mDriverFD >= 0) {
#if !defined(HAVE_WIN32_IPC)
//在Binder驱动中获取一块内存做映射,并返回映射地址
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
//失败则关闭驱动
if (mVMStart == MAP_FAILED) {
ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
}
#else
mDriverFD = -1;
#endif
}
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
ProcessState构造函数调用了open_driver()打开/dev/binder这个设备,并通过mmap进行内存映射,映射成功将返回内存首地址,那么,从这块内存读/写数据,也就是在Binder驱动上进行数据的读/写,无需进行用户和内核态的切换。对于这个知识点的理解,可以看《Binder简介》中的示例。
static int open_driver()
{
//打开binder驱动节点,返回文件描述符
int fd = open("/dev/binder", O_RDWR);
if (fd >= 0) {
fcntl(fd, F_SETFD, FD_CLOEXEC);
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
//默认最大线程数为15
size_t maxThreads = 15;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
ioctl向Binder驱动指定了当前的fd最大线程数为15,返回fd。到此,Binder驱动已经被打开,并分配了一块内存来做进程和驱动之间数据的交换,由于ProcessState是进程唯一的,因此,同一个进程Binder驱动只能被打开一次,映射一次。
我们接着看defaultServiceManager函数,它返回IServiceManager对象,我们知道,像MediaPlayerService等服务都是注册在它里面的,它的实现在IServiceManager.cpp中。
sp<IServiceManager> defaultServiceManager()
{
//gDefaultServiceManager全局变量,保存ServiceManager
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
while (gDefaultServiceManager == NULL) {
//通过ProcessState的getContextObject获取ServiceManger
gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
if (gDefaultServiceManager == NULL)
sleep(1);
}
}
return gDefaultServiceManager;
}
IServiceManager也是一个单例,通过ProcessState的getContextObject来获取。
class ProcessState : public virtual RefBase
{
......
private:
......
//entry结构体
struct handle_entry {
IBinder* binder;
RefBase::weakref_type* refs;
};
handle_entry* lookupHandleLocked(int32_t handle);
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
//传入了0
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
//根据索引,返回一个handle_entry
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
//新创建的资源项,b为null
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
Parcel data;
//ping下是否成功
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
//创建BpBinder,handle为0
b = new BpBinder(handle);
//赋值给handle_entry
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
//返回new BpBinder(0)
return result;
}
传入索引为0的handle,返回的是Entry结构体,它持有IBinder的地址。使用0索引,表示要获取ServerManager在客户端代理的IBinder。如果IBinder为空,则新建一个BpBinder对象。与之对应的还有一个BBinder类,它们都从IBinder派生而来。

-
BpBinder是客户端,用来与Server交互的代理类,p即Proxy。此时的MediaServer是相对ServerManager的客户端,BpBinder使用transcat向服务端发起请求。
-
BBinder是服务端,它由子类BnInterface继承,onTranscat()用来响应客户端的请求。
我们先看BpBinder的构建。
BpBinder::BpBinder(int32_t handle)
: mHandle(handle)//此时是0
, mAlive(1)
, mObitsSent(0)
, mObituaries(NULL)
{
ALOGV("Creating BpBinder %p handle %d\n", this, mHandle);
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
IPCThreadState::self()->incWeakHandle(handle);
}
此时已经获取到BpBinder对象,也就是说,
gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
相当于
gDefaultServiceManager = interface_cast<IServiceManager>(new BpBinder(0));
interface_cast将BpBinder强制转换成IServiceManager对象,interface_cast实现在IInterface.h中。
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}
这是一个泛型函数,即 INTERFACE::asInterface(obj)等价于IServiceManager::asInterface(obj);我们先看IServiceManager.h
class IServiceManager : public IInterface
{
public:
//很关键的宏定义
DECLARE_META_INTERFACE(ServiceManager);
//获取服务
virtual sp<IBinder> getService( const String16& name) const = 0;
//检查服务
virtual sp<IBinder> checkService( const String16& name) const = 0;
//注册服务
virtual status_t addService( const String16& name,
const sp<IBinder>& service,
bool allowIsolated = false) = 0;
virtual Vector<String16> listServices() = 0;
enum {
GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
CHECK_SERVICE_TRANSACTION,
ADD_SERVICE_TRANSACTION,
LIST_SERVICES_TRANSACTION,
};
};
DECLARE_META_INTERFACE宏只是做了定义,它在IServiceManager.cpp中实现。
IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
那么DECLARE_META_INTERFACE和IMPLEMENT_META_INTERFACE这两个宏出自哪里呢?它们在IIinterface.h中。
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16 descriptor; \
static android::sp<I##INTERFACE> asInterface( \
const android::sp<android::IBinder>& obj); \
virtual const android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const android::String16 I##INTERFACE::descriptor(NAME); \
const android::String16& \
I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
android::sp<I##INTERFACE> I##INTERFACE::asInterface( \
const android::sp<android::IBinder>& obj) \
{ \
android::sp<I##INTERFACE> intr; \
if (obj != NULL) { \
intr = static_cast<I##INTERFACE*>( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
//返回BpServiceManager对象
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { } \
根据模板特性,只需将INTERFACE替换为IServiceManager,NAME替换为"android.os.IServiceManager",由此可知,通过interface_cast强转后将获得BpServiceManager对象。
这里又再次出现BpXXX的类型,我们通过UML图来展示IServiceManger相关类之间完整的继承关系。

在创建BpServiceManager对象时传入了一个obj,它就是BpBinder(0),它将保存在父类BpRefBase的mRemote变量中。
class BpServiceManager : public BpInterface<IServiceManager>
{
public:
//将BpBinder(0)传给BpInterface
BpServiceManager(const sp<IBinder>& impl)
: BpInterface<IServiceManager>(impl)
{
}
//将BpBinder(0)传给BpRefBase
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote)
{
}
//将BpBinder(0)赋值给mRemote
BpRefBase::BpRefBase(const sp<IBinder>& o)
: mRemote(o.get()), mRefs(NULL), mState(0)
{
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
if (mRemote) {
mRemote->incStrong(this);
mRefs = mRemote->createWeak(this);
}
}
到此,MediaServer的main函数,已将Binder 驱动打开,并且通过defaultServiceManager函数,就获得一个BpServiceManager 对象,它的成员变量mRemote指向BpBinder(0)对象。
我们回到MediaServer的main函数中,接着分析MediaServerService的注册过程,看内部函数是如何工作的。
//注册MediaServerService
void MediaPlayerService::instantiate() {
//获取到ServiceManager 对象,将服务加入
defaultServiceManager()->addService(String16("media.player"), new MediaPlayerService());
}
由上面分析,addService在BpServiceManager 中。
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated)
{
Parcel data, reply;//信息容器,存储RPC数据
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());//"android.os.IServiceManager"
data.writeString16(name);//"media.player"
data.writeStrongBinder(service);//MediaPlayerService被序列化
data.writeInt32(allowIsolated ? 1 : 0);//默认0
//发送ADD_SERVICE_TRANSACTION请求
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);//将服务添加到列表中
//接收添加后的应答状态
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
remote()返回mRemote,即BpBinder(0)对象。
class BpRefBase : public virtual RefBase
{
protected:
......
//返回mRemote
inline IBinder* remote() { return mRemote; }
inline IBinder* remote() const { return mRemote; }
也就是说,BpServiceManager只是将数据序列化成Parcel类型的RPC数据,与Binder驱动进行通讯的是BpBinder,但它还要借助另外一个类,我们接着看transact函数。
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
//通过IPCThreadState对象来传输
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
IPCThreadState在前面也出现过许多次,ProcessState是进程独有的,IPCThreadState则是线程独有。前面分析过,ProcessState用来打开Binder驱动,建立线程池。IPCThreadState在构造时就取得了ProcessState的引用,并保存在mProcess中。
IPCThreadState* IPCThreadState::self()
{
//gHaveTLS首次为false,TLS为Thread Local Storage缩写
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
//获取IPCThreadState
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
//新建IPCThreadState
return new IPCThreadState;
}
if (gShutdown) return NULL;
//分配线程空间,
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
//传出内存地址gTLS
if (pthread_key_create(&gTLS, threadDestructor) != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
//置为true
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
//回到restart
goto restart;
}
IPCThreadState的self会先分配一块本地线程独有的内存,并构建IPCThreadState,将其保存到内存中。
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),//获取ProcessState
mMyThreadId(androidGetTid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
//将自己保存到本地线程空间中
pthread_setspecific(gTLS, this);
clearCaller();
//接收命令的缓冲区Parcel
mIn.setDataCapacity(256);
//发送命令的缓冲区Parcel
mOut.setDataCapacity(256);
}
IPCThreadState构造函数保存了ProcessState引用,将自己缓存到本地线程空间,并初始化了mIn和mOut大小,前者用来接收Binder驱动传过来的数据,后者则往Binder驱动发数据。
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
......
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
//发送数据,handle为0,code为ADD_SERVICE_TRANSACTION,data包含MediaPlayerService
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
......
if (reply) {
//等待数据响应
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
......
return err;
}
writeTransactionData的第一个参数,BC_TRANSACTION是Binder定义的协议命令,以BC_开头表示IPC层向Binder发送数据,BR_开头则相反。
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
//和binder驱动通信的数据结构
binder_transaction_data tr;
tr.target.ptr = 0;
tr.target.handle = handle; //target.handle赋值为0
tr.code = code; //ADD_SERVICE_TRANSACTION
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
//将指令BC_TRANSACTION和封装的数据写入mOut缓冲区
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
此函数只是将数据写入线程IPCThreadState的mOut缓冲区中,并没有传给Binder驱动。我们接着看waitForResponse函数。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
//talkWithDriver和驱动交互
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
//读取响应指令判断处理
cmd = mIn.readInt32();
switch (cmd) {
//读取完成
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
......
//从mIn读取数据
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
//处理响应数据
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
看到关键函数talkWithDriver,看名字显然很关键。
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
//驱动文件描述符正常
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
//用来给驱动写数据,和读驱动数据的结构体
binder_write_read bwr:
//是否需接收数据
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
//定义写数据的缓冲区大小
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
//指向mOut缓冲区
bwr.write_buffer = (uintptr_t)mOut.data();
//定义读数据的缓冲区大小
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
//指向mIn缓冲区
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
//传输和读取的缓冲区大小都为0
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
//通过ioctl循环读写,直到EINTR退出
do {
#if defined(HAVE_ANDROID_OS)
//向Binder写数据,同时将响应数据读到mIn缓冲区
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
.....
return err;
}
talkWithDriver,才是用于向Binder驱动写数据,同时将响应数据读到mIn缓冲区中的函数,那么读取到数据后将如何处理?看executeCommand。
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch (cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
......
case BR_TRANSACTION:
{
binder_transaction_data tr;
//读取结果
result = mIn.read(&tr, sizeof(tr));
if (result != NO_ERROR) break;
//构建Parcel 来接数据
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
if (gDisableBackgroundScheduling) {
//默认ANDROID_PRIORITY_NORMAL优先
if (curPrio > ANDROID_PRIORITY_NORMAL) {
setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
}
} else {
//后台
if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
set_sched_policy(mMyThreadId, SP_BACKGROUND);
}
}
//
Parcel reply;
status_t error;
if (tr.target.ptr) {
//强转cookie为BBinder,实际为BnServiceManager
sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
}
break;
// Binder 驱动传来Service死亡消息
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
......
//驱动指示要创建新的线程,用于和Binder通讯
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
printf("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
}
到此,MediaPlayerService就将被添加到ServiceManager中。再回到MediaServer的main函数,在最后启动线程循环talkWithDriver等待客户端的请求和处理Binder回传的数据。
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
//生成线程
spawnPooledThread(true);
}
}
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
//创建线程
sp<Thread> t = new PoolThread(isMain);
//启动
t->run(name.string());
}
}
PoolThread实际只是一个Thread,定义在ProcessState中。
class PoolThread : public Thread
{
public:
PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()
{
//开启循环读取
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
run方法被执行后,会进入while循环,回调threadLoop函数,返回false即跳出循环。调用IPCThreadState的joinThreadPool。
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
//发送命令读取请求数据
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
//处理消息
result = executeCommand(cmd);
set_sched_policy(mMyThreadId, SP_FOREGROUND);
}
return result;
}
void IPCThreadState::processPendingDerefs()
{
if (mIn.dataPosition() >= mIn.dataSize()) {
size_t numPending = mPendingWeakDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
RefBase::weakref_type* refs = mPendingWeakDerefs[i];
refs->decWeak(mProcess.get());
}
mPendingWeakDerefs.clear();
}
//处理已经死亡的BBinder对象
numPending = mPendingStrongDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
BBinder* obj = mPendingStrongDerefs[i];
obj->decStrong(mProcess.get());
}
mPendingStrongDerefs.clear();
}
}
}
void IPCThreadState::joinThreadPool(bool isMain)
{
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
set_sched_policy(mMyThreadId, SP_FOREGROUND);
//开始循环读写
status_t result;
do {
//读写数据
processPendingDerefs();
//处理返回结果
result = getAndExecuteCommand();
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
abort();
}
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
//已退出循环
mOut.writeInt32(BC_EXIT_LOOPER);
//不再接收数据
talkWithDriver(false);
}
MediaServer的子进程同时运行了5个Service,但只启动了2个线程来读写Binder驱动:一个是通过startThreadPool新建一个线程来读写,一个是在主线程中直接执行joinThreadPool进行读写。
那么,BpServiceManager通过Binder驱动,申请将MediaPlayerService注册在ServiceManager中的消息,将由谁来处理呢?源码中并没有BnServiceManager这个类,它是由service_manager.c来实现的。
init进程是系统启动的第一个用户级进程。init进程启动后就会启动ServiceManager和MediaServer,以下为init.rc脚本。
//启动ServiceManager
service servicemanager /system/bin/servicemanager
user system
critical
//启动zygote
onrestart restart zygote
//启动MediaServer
onrestart restart media
也就是init进程会调用ServiceManager的main函数。
int main(int argc, char **argv)
{
//记录状态
struct binder_state *bs;
//打开驱动
bs = binder_open(128*1024);
if (!bs) {
ALOGE("failed to open binder driver\n");
return -1;
}
//记录当前进程为ServiceManager
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
//linux是否启动
selinux_enabled = is_selinux_enabled();
sehandle = selinux_android_service_context_handle();
if (selinux_enabled > 0) {
if (sehandle == NULL) {
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}
if (getcon(&service_manager_context) != 0) {
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}
}
union selinux_callback cb;
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
//这里的svcmgr_handle是变量,
//BINDER_SERVICE_MANAGER为0
svcmgr_handle = BINDER_SERVICE_MANAGER;
//这里的svcmgr_handle是函数指针
//循环处理客户端发送过来的请求
binder_loop(bs, svcmgr_handler);
return 0;
}
Binder 驱动的打开函数实现在binder.c中。
struct binder_state *binder_open(size_t mapsize)
{
//存储结构体
struct binder_state *bs;
struct binder_version vers;
//分配空间
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
//打开驱动,将fd存到bs中
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
//获取版本
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr, "binder: driver version differs from user space\n");
goto fail_open;
}
//上面传入大小为128x1024
bs->mapsize = mapsize;
//内存映射
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
//返回bs
return bs;
//失败操作
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
打开驱动的流程和ProcessState中一样,这里不再赘述。接着binder_become_context_manager如何成为独一无二的manager。
int binder_become_context_manager(struct binder_state *bs)
{
//设置handle为0
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
BINDER_SET_CONTEXT_MGR指令告知Binder驱动,当前进程为ServiceManager,并且通过传递0来做标识,由于其他Binder实体在Binder驱动中对应的handle都是大于0的,以此保证ServiceManager唯一,而其它用户进程只需使用handle为0的索引,便可以通过Binder驱动访问ServiceManager。
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
//写入BC_ENTER_LOOPER循环指令
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
//循环读取数据
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
//通过func来处理发送过来的请求
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
循环读取发送过来的请求,并通过func来处理,这个函数指针是上面传递进来的svcmgr_handle。
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
//target.handle是否为BINDER_SERVICE_MANAGER,即0
if (txn->target.handle != svcmgr_handle)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
//根据msg获取服务名
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
switch(txn->code) {
//获取某个Service,通过msg来取
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
//s为对应Service名字
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
//对应addService请求
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
//获取handle
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
//注册服务
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
//扫描已注册的所有Service的名字
case SVC_MGR_LIST_SERVICES: {
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
也就是说,defaultServiceManager()->addService(String16("media.player"), new MediaPlayerService());的调用,最终会进入到SVC_MGR_ADD_SERVICE这个case,接着分析do_add_service看具体注册流程。
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
//单链表
struct svcinfo *si;
if (!handle || (len == 0) || (len > 127))
return -1;
//检查申请的服务是否有权限注册
if (!svc_can_register(s, len, spid)) {
return -1;
}
//服务已注册
si = find_svc(s, len);
if (si) {
if (si->handle) {
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
//服务未注册,为svcinfo 分配内存
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
return -1;
}
si->handle = handle;
//赋值服务名
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
//服务退出回调函数
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
//头插法
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
//接受到某个Service死亡后,做清理工作
binder_link_to_death(bs, handle, &si->death);
return 0;
}
ServiceManager中维护了一条链表,每个服务都被封装成一个svcinfo节点,并使用头插法的方式添加到现有的链表中。
到此,MediaServer怎样通过Binder驱动,将MediaPlayerService注册到ServiceManager中的整个流程就讲解完毕。
现在ServiceManager中已经注册了我们需要的MediaPlayerService服务了,而MediaPlayer是怎样从ServiceManager中获取MediaPlayerService并开启播放的?将留到下一篇分析。