Android进程间通信(三)——系统服务管理进程servicemanager启动源码分析这部分已经介绍了servicemanager进程的启动流程,知道了servicemanager进程启动后就进入了挂起状态,等待其他进程的唤醒。在本部分,我们要介绍的时系统服务AMS注册到servicemanager的流程,通过这个流程来明白binder时如何跨进程的,整体的流程就是从Java进程ServerManager.addService方法,最终通过binder跨进程调用servicemanger的do_add_service方法将服务注册到servicemanager中。
源码github地址
AMS服务的创建
Android进程间通信(二)——Binder通信相关系统进程
这一章介绍了系统服务的启动如库方法,AMS的启动方法为
private void startBootstrapServices() {
Installer installer = mSystemServiceManager.startService(Installer.class);
// Activity manager runs the show.
mActivityManagerService = mSystemServiceManager.startService(
ActivityManagerService.Lifecycle.class).getService();
mActivityManagerService.setSystemServiceManager(mSystemServiceManager);
mActivityManagerService.setInstaller(installer);
mActivityManagerService.setSystemProcess();
}
mSystemServiceManager.startService(ActivityManagerService.Lifecycle.class)方法参数中ActivityManagerService.Lifecycle.class类
public static final class Lifecycle extends SystemService {
private final ActivityManagerService mService;
public Lifecycle(Context context) {
super(context);
mService = new ActivityManagerService(context);
}
@Override
public void onStart() {
mService.start();
}
public ActivityManagerService getService() {
return mService;
}
}
可以看到,在ActivityManagerService.Lifecycle.class类的构造方法new 了AMS示例在mService.start中启动了服务
mSystemServiceManager.startService
public SystemService startService(String className) {//根据传入的Class名,加载该类ActivityManagerService.Lifecycle.class
final Class<SystemService> serviceClass;
try {
serviceClass = (Class<SystemService>)Class.forName(className);
} catch (ClassNotFoundException ex) {
}
return startService(serviceClass);
}
startService(Class<T> serviceClass)
public <T extends SystemService> T startService(Class<T> serviceClass) {
final String name = serviceClass.getName();
Slog.i(TAG, "Starting " + name);
// Create the service.
if (!SystemService.class.isAssignableFrom(serviceClass)) {
throw new RuntimeException("Failed to create " + name
+ ": service must extend " + SystemService.class.getName());
}
final T service;
try {
Constructor<T> constructor = serviceClass.getConstructor(Context.class);
service = constructor.newInstance(mContext);//调用ActivityManagerService.Lifecycle.class的构造方法
} catch (InstantiationException ex) {
// Start it.
try {
service.onStart();//调用ActivityManagerService.Lifecycle对象的onStart也就是调用AMS的onStart
}
return service;//返回ActivityManagerService.Lifecycle对象
}
mSystemServiceManager.startService(ActivityManagerService.Lifecycle.class)方法返回了一个ActivityManagerService.Lifecycle对象,调用其getService方法,获得了AMS对象赋值给mActivityManagerService之后调用
mActivityManagerService.setSystemProcess()
public void setSystemProcess() {
try {
ServiceManager.addService(Context.ACTIVITY_SERVICE, this, true);
.....
}
调用到ServiceManager.addService
public static void addService(String name, IBinder service, boolean allowIsolated) {
try {
getIServiceManager().addService(name, service, allowIsolated);
} catch (RemoteException e) {
Log.e(TAG, "error in addService", e);
}
}
这里我们注意到addService的第二个参数为IBinder,也就是ActivityManagerService服务实现了IBinder接口,我们看到了ActivityManagerService继承了ActivityManagerNative,而ActivityManagerNative是继承了Binder
public final class ActivityManagerService extends ActivityManagerNative implements Watchdog.Monitor, BatteryStatsImpl.BatteryCallback{}
public abstract class ActivityManagerNative extends Binder implements IActivityManager{}
getIServiceManager()
private static IServiceManager getIServiceManager() {
if (sServiceManager != null) {//注意这里是单例模式
return sServiceManager;
}
// Find the service manager 代码只执行一次
sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
return sServiceManager;
}
首先我们看下
BinderInternal.getContextObject()
public static final native IBinder getContextObject();
这是一个native方法,native层的方法名为
android_os_BinderInternal_getContextObject
这个方法的实现位于frameworks/base/core/jni/android_util_Binder.cpp
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
ProcessState::self()
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;//创建了ProcessState对象,我们看下ProcessState的构造方法
return gProcess;
}
ProcessState::ProcessState()
: mDriverFD(open_driver())//关键点,打开驱动
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);//执行mmap
}
open_driver
static int open_driver()
{
int fd = open("/dev/binder", O_RDWR);//打开binder驱动
if (fd >= 0) {
fcntl(fd, F_SETFD, FD_CLOEXEC);
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;//设置最大线程数为15
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
所以ProcessState::self()做的工作是创建ProcessState对象并且打开进程的Binder驱动,所以ServerManager进程的binder驱动就是在这里打开。我们知道,在普通APP中使用进程间通信的时候,我们并发现打开Binder驱动的方法,其实普通应用的Binder驱动打开也是调用ProcessState::self()进行的,调用点就是APP启动的时候,在frameworks/base/cmds/app_process/app_main.cpp代码中
virtual void onStarted()
{
sp<ProcessState> proc = ProcessState::self();
ALOGV("App process: starting thread pool.\n");
proc->startThreadPool();
AndroidRuntime* ar = AndroidRuntime::getRuntime();
ar->callMain(mClassName, mClass, mArgs);
IPCThreadState::self()->stopProcess();
}
virtual void onZygoteInit()
{
sp<ProcessState> proc = ProcessState::self();
ALOGV("App process: starting thread pool.\n");
proc->startThreadPool();
}
我们继续看ProcessState::self()->getContextObject(NULL)的getContextObject方法
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);//e不为空
if (e != NULL) {//找到handle为0的
IBinder* b = e->binder;//根据lookupHandleLocked e->binder是NULL e->ref 为NULL
if (b == NULL || !e->refs->attemptIncWeak(this)) {//条件成立
if (handle == 0) {//如果handle为0 则是servicemanager的Binder
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);//通过ping查看smgr是否准备好了
if (status == DEAD_OBJECT)
return NULL;
}
b = new BpBinder(handle); //创建一个BpBinder(0) BinderProxy
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
ProcessState::handle_entry* ProcessState::lookupHandleLocked(int32_t handle)
{
const size_t N=mHandleToObject.size();//在数组中查找
if (N <= (size_t)handle) {//没有找到则创建并插入
handle_entry e;
e.binder = NULL;
e.refs = NULL;
status_t err = mHandleToObject.insertAt(e, N, handle+1-N);
if (err < NO_ERROR) return NULL;
}
return &mHandleToObject.editItemAt(handle);//找到handle为0的handle_entry,返回不为空
}
所以ProcessState::self()->getContextObject(NULL)返回了一个BpBinder(0) 对象这个是Native层的对象 ,下面继续看javaObjectForIBinder方法是如何把一个Native层的对象转化为Java层的对象返回。
javaObjectForIBinder
//将本地IBinder对象转为Java层的IBinder对象
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
if (val == NULL) return NULL;
//这里jobject的实际Java类型是Binder &gBinderOffsets代表Java层Binder类
if (val->checkSubclass(&gBinderOffsets)) {//检测val是不是Binder类
// One of our own!
jobject object = static_cast<JavaBBinder*>(val.get())->object();//如果是转化为JavaBBinder
LOGDEATH("objectForBinder %p: it's our own %p!\n", val.get(), object);
return object;
}
// For the rest of the function we will hold this lock, to serialize
// looking/creation of Java proxies for native Binder proxies.
AutoMutex _l(mProxyLock);
// Someone else's... do we know about it?
///如果是binder客户端进程,则需要返回Java层的BinderProxy对象
jobject object = (jobject)val->findObject(&gBinderProxyOffsets);
if (object != NULL) {
// 如果已有用Java层WeakReference保存的BinderProxy对象,则返回该对象
jobject res = jniGetReferent(env, object);
if (res != NULL) {
ALOGV("objectForBinder %p: found existing %p!\n", val.get(), res);
return res;
}
LOGDEATH("Proxy object %p of IBinder %p no longer in working set!!!", object, val.get());
android_atomic_dec(&gNumProxyRefs);
val->detachObject(&gBinderProxyOffsets);
env->DeleteGlobalRef(object);
}
//创建BinderProxy对象
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
if (object != NULL) {
LOGDEATH("objectForBinder %p: created new proxy %p !\n", val.get(), object);
// The proxy holds a reference to the native object.
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
val->incStrong((void*)javaObjectForIBinder);
//设置BinderProxy对象的mObject字段为本地IBinder对象指针,
//本地IBinder对象的实际类型是BpBinde
// The native object needs to hold a weak reference back to the
// proxy, so we can retrieve the same proxy if it is still active.
jobject refObject = env->NewGlobalRef(
env->GetObjectField(object, gBinderProxyOffsets.mSelf));
//关联gBinderProxyOffsets,故此第20行代码用findObject才能找到
val->attachObject(&gBinderProxyOffsets, refObject,
jnienv_to_javavm(env), proxy_cleanup);
// Also remember the death recipients registered on this proxy
sp<DeathRecipientList> drl = new DeathRecipientList;
drl->incStrong((void*)javaObjectForIBinder);
env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get()));
// Note that a new object reference has been created.
android_atomic_inc(&gNumProxyRefs);
incRefsCreated(env);
}
return object;
}
首先我们看下checkSubclass(&gBinderOffsets)中的参数gBinderOffsets是什么
static struct bindernative_offsets_t
{
// Class state.
jclass mClass;
jmethodID mExecTransact;
// Object state.
jfieldID mObject; //jfieldId mObjetc ,
} gBinderOffsets; //一个结构体,用于保存Java层的对象信息
&gBinderOffsets其实就是读取mClass地址上的数据,我们看下mClass的值是什么
const char* const kBinderPathName = "android/os/Binder";
//设置gBinderOffsets的字段,注册Binder类的native方法
static int int_register_android_os_Binder(JNIEnv* env)
{
jclass clazz = FindClassOrDie(env, kBinderPathName);
gBinderOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderOffsets.mExecTransact = GetMethodIDOrDie(env, clazz, "execTransact", "(IJJI)Z");
gBinderOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
return RegisterMethodsOrDie(
env, kBinderPathName,
gBinderMethods, NELEM(gBinderMethods));
}
所以gBinderOffsets.mClass 为android/os/Binder,而val->checkSubclass中val为一个BpBinder,所条件不成立,继续看jobject object = (jobject)val->findObject(&gBinderProxyOffsets);因为我们创建的BpBinder(0)只是内部的handle为0,其他成员变量都没有赋值。所以object肯定也为NULL,接下来就执行了
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
这行代码为native调用java层代码,创建了一个类的对象。我们看gBinderProxyOffsets是持有了哪个java层类的信息
const char* const kBinderProxyPathName = "android/os/BinderProxy";
static int int_register_android_os_BinderProxy(JNIEnv* env)
{
jclass clazz = FindClassOrDie(env, "java/lang/Error");
gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
clazz = FindClassOrDie(env, kBinderProxyPathName);
gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "()V");
gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz, "sendDeathNotice",
"(Landroid/os/IBinder$DeathRecipient;)V");
gBinderProxyOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
gBinderProxyOffsets.mSelf = GetFieldIDOrDie(env, clazz, "mSelf",
"Ljava/lang/ref/WeakReference;");
gBinderProxyOffsets.mOrgue = GetFieldIDOrDie(env, clazz, "mOrgue", "J");
clazz = FindClassOrDie(env, "java/lang/Class");
gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "()Ljava/lang/String;");
return RegisterMethodsOrDie(
env, kBinderProxyPathName,
gBinderProxyMethods, NELEM(gBinderProxyMethods));
}
上面代码中我们可以看到,gBinderProxyOffsets是保存了Java层的BinderProxy类的信息所以javaObjectForIBinder方法最终在native层创建了一个Java层的BinderProxy对象,返回给Java层。也就是BinderInternal.getContextObject()返回了一个BinderProxy对象,这个BinderProxy中mObject持有了nativce层的BpBinder(0),代码回到ServiceManagerNative.asInterface(BinderProxy),我们看下ServiceManagerNative.asInterface方法做了什么工作。
static public IServiceManager asInterface(IBinder obj)
{
if (obj == null) {
return null;
}
IServiceManager in =
(IServiceManager)obj.queryLocalInterface(descriptor);
if (in != null) {
return in;
}
return new ServiceManagerProxy(obj);
}
我们看,最终asInterface返回了一个ServiceManagerProxy对象
class ServiceManagerProxy implements IServiceManager {
public ServiceManagerProxy(IBinder remote) {
mRemote = remote;
}
public IBinder asBinder() {
return mRemote;
}
public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
data.writeStrongBinder(service);
data.writeInt(allowIsolated ? 1 : 0);
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
reply.recycle();
data.recycle();
}
}
其中ServiceManagerProxy的mRemote就是BinderProxy对象。所以ServiceManager.addService方法调用到的就是ServiceManagerProxy.addService方法,也就是调用BinderProxy的addService
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
从现在开始的流程就是从Java层的SystemServer进程与servicemanager进程开始通信,注册AMS到servicemanager进程中。我们要记得,SystemServer发送的数据为
- ADD_SERVICE_TRANSACTION
- Parcel 序列化对象data,持有这个对象中包含了AMS 的Binder对象
我们看下BinderProxy,BinderProxy类定义在Binder.java中
//如何持有Native层的BpBinder 引用
final class BinderProxy implements IBinder {
public IInterface queryLocalInterface(String descriptor) {
return null;
}
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
return transactNative(code, data, reply, flags);
}
public native boolean transactNative(int code, Parcel data, Parcel reply,
int flags) throws RemoteException;
BinderProxy() {
mSelf = new WeakReference(this);
}
final private WeakReference mSelf;//
private long mObject;//持有native层的BpBinder引用
private long mOrgue;
}
BinderProxy的transact调用到了native方法transactNative,我们看下transactNative的实现,在android_util_Binder.cpp中,在注册jni方法时,可以看到transactNative的实现方法为
android_os_BinderProxy_transact
//其中gBinderProxyMethods,记录java层和C/C++层方法的一一映射关系。
static const JNINativeMethod gBinderProxyMethods[] = {
/* name, signature, funcPtr */
{"pingBinder", "()Z", (void*)android_os_BinderProxy_pingBinder},
{"isBinderAlive", "()Z", (void*)android_os_BinderProxy_isBinderAlive},
{"getInterfaceDescriptor", "()Ljava/lang/String;", (void*)android_os_BinderProxy_getInterfaceDescriptor},
{"transactNative", "(ILandroid/os/Parcel;Landroid/os/Parcel;I)Z", (void*)android_os_BinderProxy_transact},
{"linkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)V", (void*)android_os_BinderProxy_linkToDeath},
{"unlinkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)Z", (void*)android_os_BinderProxy_unlinkToDeath},
{"destroy", "()V", (void*)android_os_BinderProxy_destroy},
};
//对应BinderProxy java类的transact方法
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
if (dataObj == NULL) {
jniThrowNullPointerException(env, NULL);
return JNI_FALSE;
}
Parcel* data = parcelForJavaObject(env, dataObj);
if (data == NULL) {
return JNI_FALSE;
}
Parcel* reply = parcelForJavaObject(env, replyObj);
if (reply == NULL && replyObj != NULL) {
return JNI_FALSE;
}
//mObject是保存的BpBinder的Native层指针
IBinder* target = (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject);
//printf("Transact from Java code to %p sending: ", target); data->print();
//调用BpBinder的transact了
status_t err = target->transact(code, *data, reply, flags);
}
继续看BpBinder的transact
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
继续看 IPCThreadState::self()->transact(mHandle, code, data, reply, flags);前面我们知道BpBinder(0)对象,所以此处的mHandle为0,code和data还是Java层传递过来的
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {//第一次进来为false ////TLS是Thread Local Storage的意思
restart:
const pthread_key_t k = gTLS;//
//从线程本地存储空间中获得保存在其中的IPCThreadState对象
//这段代码写法很晦涩,看见没,只有pthread_getspecific,那么肯定有地方调用
// pthread_setspecific。
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}
IPCThreadState::self()返回了一个IPCThreadState对象,继续看IPCThreadState的transact方法
status_t IPCThreadState::transact(int32_t handle,uint32_t code, const Parcel& data,Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
if (err == NO_ERROR) {
//写入传输的数据
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply);
} else {
........
其中writeTransactionData目的时把Jave层传递过来的数据进程封装,因为要进行Binder通信了,就像TCP协议一样,从应用层到物理层,每一层都对数据进行了封装。
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;//前面在servicemanager启动一章中我们已经介绍过这个结构体
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;//此时目标 handle =0也就是代表 servicemanager
tr.code = code;//code == ADD_SERVICE_TRANSACTION;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();//实际数据大小,重要
tr.data.ptr.buffer = data.ipcData();//实际的数据,重要
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);//Binder跨进程传输对象的地址偏移
tr.data.ptr.offsets = data.ipcObjects();//对象
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd);//mOUt的第一个为命令 cmd为BC_TRANSACTION
mOut.write(&tr, sizeof(tr));//后面的为数据
return NO_ERROR;
}
这里我们要注意以下,对于数据封装,其实做的就是把Java层传过来的数据的信息进行了一层的包装,此时data数据仍然在用户空间中没有拷贝到内核,之后native层都是用binder_transaction_data来传输数据信息,直到真正的把数据拷贝到内核空间,交给servicemanager进程。
下面执行waitForResponse
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;//开始调用驱动发送数据
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;//doReceive = false needRead == true
bwr.write_size = outAvail;//所以writesize > 0
bwr.write_buffer = (uintptr_t)mOut.data();//write_buffer = mOut.data注意bwr中的data字段包含了cmd和另一个tr,也就是这个bwr是对mOut的封装
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;//read_size = 0
bwr.read_buffer = 0;
}
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
#if defined(HAVE_ANDROID_OS)
//关键操作来了
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
其中ioctl我们直到这个肯定就是与binder驱动交互了,那么mProcess是当前进程持有binder驱动的文件描述符
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),//通过ProcessState::self获取进程信息
mMyThreadId(gettid()),
{
pthread_setspecific(gTLS, this);
}
我们关注ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) ,BINDER_WRITE_READ是发给驱动层的命令,表述驱动读写数据,而&bwr则表示数据的地址,这个bwr其实只是实际数据的包装,主要是数据的信息。下面我们进入binder.c驱动层代码中,binder_ioctl我们之前已经分析过
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;//获取进程信息
struct binder_thread *thread;//进程的线程
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;//用户空间的数据指针
thread = binder_get_thread(proc);//从proc信息中获取线程
switch (cmd) {
case BINDER_WRITE_READ://binder读写数据 arg是数据指针 filp为驱动的句柄,thread为进程中获取的线程
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
继续进入binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp,unsigned int cmd, unsigned long arg,struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;//
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
//注意此时的ubuf表示的是binder_write_read数据结构,即把它从“用户空间”拷贝到“内核空间”
//前面我们已经强调,binder_write_read只是对用户空间的一层包装,或者可以说数用户空间实际数据的数据头,这里的拷贝只是把用户空间实际数据的数据头,拷贝到了内核空间,实际数据data还在用户空间中
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
if (bwr.write_size > 0) {//从talkWithDriver我们知道write_size >0
ret = binder_thread_write(proc, thread,bwr.write_buffer,bwr.write_size,&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
.......
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;//这里指的是用户空间的mOut地址
void __user *ptr = buffer + *consumed;//数据开始地址
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
//从用户空间获取write_buffer指向的内存数据
if (get_user(cmd, (uint32_t __user *)ptr))//也就是读取了cmd命令BC_TRANSACTION
return -EFAULT;
ptr += sizeof(uint32_t);//数据指针移动到bwr结构体位置
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))//这里又来了一次拷贝 将数据拷贝到内核空间 将用户空间的mOut中的数据拷贝到内核空间,通过这次拷贝会把用户空间的那个tr,也就是IPCThreadState.mOut,给拷贝到内核中来
//所以此时还是没有和服务端被映射的内存扯上关系,我们继续往下分析binder_transaction()函数,看看它的处理流程
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
binder_transaction
binder_transaction是跨进程通信中最核心的方法,在这里binder驱动会根据传入的数据找到目标进程,并在目标进程中分配内存,将发送进程中的真正数据data从其用户空间拷贝到目标进程的内核空间,也就是一次拷贝真正发生的地方,因为binder通过mmap把内核空间的内存映射到了用户空间,所以也就等同把数据发送给了目标进程的用户控件,然后会唤醒目标进程处理接收到的数据,完成跨进程通信。首先了解相关的结构体
struct binder_transaction {//跨进程发送的数据t
int debug_id;
struct binder_work work;
struct binder_thread *from;
struct binder_transaction *from_parent;
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
unsigned need_reply:1;
/* unsigned is_dead:1; */ /* not used at the moment */
struct binder_buffer *buffer;
unsigned int code;
unsigned int flags;
long priority;
long saved_priority;
kuid_t sender_euid;
};
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
int ret;
//Binder事物
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
if (reply) {//replay为false}
else {
if (tr->target.handle) {//如果target.handler不为0
struct binder_ref *ref;//binder_ref = 对应客户端的引用,但指向的都是binder_node binder_node = 服务进程引用
//binder驱动会为每个客户端的引用维护一个handle,存在binder_ref中。总结就是把binder的代理对象handle写在flat_binder_object中
ref = binder_get_ref(proc, tr->target.handle, true);//根据target的handler编号获取binder_ref
target_node = ref->node;//
} else {
target_node = context->binder_context_mgr_node;//handle为0则target node为smr找到了servicemanager的进程信息
}
e->to_node = target_node->debug_id;
target_proc = target_node->proc;//找到目标进程
}
// 创建binder_transaction节点
t = kzalloc(sizeof(*t), GFP_KERNEL);
//创建一个binder_work节点
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
t->sender_euid = task_euid(proc->tsk);// 源线程用户id
t->to_proc = target_proc;//事物的目标进程
t->to_thread = target_thread;//事物的目标线程
t->code = tr->code;//事物代码// ADD_SERVICE_TRANSACTION
t->flags = tr->flags;
t->priority = task_nice(current);//线程的优先级的迁移
//从target进程的binder内存空间分配所需的内存大小(tr->data_size+tr->offsets_size)
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;//该binder_buffer对应的事务
t->buffer->target_node = target_node;//该事物对应的目标binder实体
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;//内核空间地址中flat_binder_object的地址偏移,用于恢复Binder对象
//其中一次是copy parcel的data数据操作,此次就是把发起方用户空间的数据直接拷贝到了接收方内核的内存映射中,
//这就是所谓“一次拷贝”的关键点(而这也是Android所宣传的一次拷贝的核心点)
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
}
//紧接着还有一个copy_from_user()调用,这里拷贝的是和数据相关的一些跨境程对象的偏移量,和前面拷贝bwr和tr在体量上来讲与数据的体量相比不是主要矛盾,所以说“一次拷贝”指的就是上面对数据的拷贝。
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
}
......
t ->work.type = BINDER_WORK_TRANSACTION;//唤醒smgr
list_add_tail(&t->work.entry, target_list);//将work.entry加入到target_list尾部 target_list = &target_thread->todo
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);//将tcomplete->entry加入到thread
if (target_wait) // 传输动作完毕,现在可以唤醒系统中其它相关线程,wake up!
wake_up_interruptible(target_wait);
至次,binder已经将源进程(ServerManager)的用户空间数据拷贝到目标进程(servicemanager)的内核空间,并且在目标进程中创建了todo任务,然后通过wake_up_interruptible唤醒目标进程
此时我们再看看servicemanger的进程,
binder_thread_read{
.....
wait_event_freezable_exclusive//被唤醒继续执行
if (wait_for_proc_work)
proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {//thread->todo不为空了
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
//我们知道container_of()的作用就是通过一个结构变量中一个成员的地址找到这个结构体变量的首地址。
// container_of(ptr,type,member),这里面有ptr,type,member分别代表指针、类型、成员。
t = container_of(w, struct binder_transaction, work);//成员是w,从to_to中获取,w由加入到了binder_transaction结构体中,所以t是服务端内核中数据的地址
}
if (!t)
continue;
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;//目标binder的node
//tr为binder_transaction_data
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
//事物对应的数据的大小
tr.data_size = t->buffer->data_size;
//得到事物对应的内核数据在用户空间的访问地址
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
//得到事务对应的数据在用户空间的访问地址
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
//注意:只是拷贝了命令对应的固定大小的tr参数,并没有拷贝tr.data.ptr.buffer指向的内容
//binder_write_read.read_buffer中(在此之前把内核映射的数据地址指针转换为用户空间的指针赋值给tr.data.ptr.buffer),
//所以此时我们的接收端进程的用户空间可以直接使用tr.data.ptr.buffer的值就可以访问到远端传递过来的数据,
//而不需要继续做拷贝动作将内核的数据通过copy_to_user来拷贝到用户空间,
//同样的道理用户空间直接使用tr.data.ptr.offsets的值就可以直接访问binder对象的偏移数组。
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
}
binder_thread_read方法执行完成,出栈,回到
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));//写入BC_ENTER_LOOPER命令
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
//唤醒继续执行
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
}
}
servicemanager binder_parse
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_TRANSACTION: {//调用fun对数据进行处理
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
binder_dump_txn(txn);
if (func) {//func为svcmgr_handler
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);//调用svcmgr_handler
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
//txn->code=ADD_SERVICE_TRANSACTION = 3 枚举
switch(txn->code) {
case SVC_MGR_ADD_SERVICE://枚举SVC_MGR_ADD_SERVICE = 3
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))//
return -1;
break;
至此,从ServerManager进程调用addService到servicemanager进程的完整代码流程分析完毕。