本文从Binder源码中分析Android底层进程间通信的过程;希望大家能耐心看下去;
首先看头文件:
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include "binder.h"
#define MAX_BIO_SIZE (1 << 30)
#define TRACE 0
#define LOG_TAG "Binder"
#include <cutils/log.h>
void bio_init_from_txn(struct binder_io *io, struct binder_transaction_data *txn);
#if TRACE
//应该是调试函数
void hexdump(void *_data, size_t len)
{
unsigned char *data = _data;
size_t count;
for (count = 0; count < len; count++) {
if ((count & 15) == 0)
fprintf(stderr,"%04zu:", count);
fprintf(stderr," %02x %c", *data,
(*data < 32) || (*data > 126) ? '.' : *data);
data++;
if ((count & 15) == 15)
fprintf(stderr,"\n");
}
if ((count & 15) != 0)
fprintf(stderr,"\n");
}
void binder_dump_txn(struct binder_transaction_data *txn)
{
struct flat_binder_object *obj;
binder_size_t *offs = (binder_size_t *)(uintptr_t)txn->data.ptr.offsets;
size_t count = txn->offsets_size / sizeof(binder_size_t);
fprintf(stderr," target %016"PRIx64" cookie %016"PRIx64" code %08x flags %08x\n",
(uint64_t)txn->target.ptr, (uint64_t)txn->cookie, txn->code, txn->flags);
fprintf(stderr," pid %8d uid %8d data %"PRIu64" offs %"PRIu64"\n",
txn->sender_pid, txn->sender_euid, (uint64_t)txn->data_size, (uint64_t)txn->offsets_size);
hexdump((void *)(uintptr_t)txn->data.ptr.buffer, txn->data_size);
while (count--) {
obj = (struct flat_binder_object *) (((char*)(uintptr_t)txn->data.ptr.buffer) + *offs++);
fprintf(stderr," - type %08x flags %08x ptr %016"PRIx64" cookie %016"PRIx64"\n",
obj->type, obj->flags, (uint64_t)obj->binder, (uint64_t)obj->cookie);
}
}
#define NAME(n) case n: return #n
//定义了Binder的命令类型
const char *cmd_name(uint32_t cmd)
{
switch(cmd) {
NAME(BR_NOOP);
NAME(BR_TRANSACTION_COMPLETE);
NAME(BR_INCREFS);
NAME(BR_ACQUIRE);
NAME(BR_RELEASE);
NAME(BR_DECREFS);
NAME(BR_TRANSACTION);
NAME(BR_REPLY);
NAME(BR_FAILED_REPLY);
NAME(BR_DEAD_REPLY);
NAME(BR_DEAD_BINDER);
default: return "???";
}
}
#else
#define hexdump(a,b) do{} while (0)
#define binder_dump_txn(txn) do{} while (0)
#endif
#define BIO_F_SHARED 0x01 /* needs to be buffer freed */
#define BIO_F_OVERFLOW 0x02 /* ran out of space */
#define BIO_F_IOERROR 0x04
#define BIO_F_MALLOCED 0x08 /* needs to be free()'d */
//注意这个结构体
struct binder_state
{
int fd;
void *mapped;
size_t mapsize;
};
这一部分不多说;基本都是一些预定义。接着往下:
打开Binder驱动源码:
一:
//打开Binder驱动,传递的参数是需要申请的内存大小
//返回一个binder_state数据结构
struct binder_state *binder_open(size_t mapsize)
{
//要返回的binder_state,我们主要看这个
struct binder_state *bs;
struct binder_version vers;
//申请内存
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
//根据一定的权限打开binder驱动,返回文件描述符
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr, "binder: driver version differs from user space\n");
goto fail_open;
}
//将要申请的内存大小记录在这个结构体中
bs->mapsize = mapsize;
//利用mmap操作进行内存映射
//第一个参数表示被映射到进程空间的起始地址,第二个参数表示被映射到进程空间中的内存块大小;第三个参数表示权限;第四个参数表示程度对内存块所作的改变将造成的影响;第五个参数代表被映射到进程空间的文件描述符;第六个表示操作返回值(成功0);
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
//出错处理
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
因此打开Binder驱动的大概逻辑如下:
进程传入需要映射的内存大小来将Binder映射到内存中;记录映射的文件描述符,申请内存的大小;
接着往下:
关闭Binder:
void binder_close(struct binder_state *bs)
{
//函数说明 munmap()用来取消参数start所指的映射内存起始地址,参数length则是欲取消的内存大小
munmap(bs->mapped, bs->mapsize);
//关闭文件
close(bs->fd);
//释放资源
free(bs);
}
后面的代码经常出现这个方法:int ioctl(int fd, ind cmd, …);
下面出现这个方法比较多,我先把百度的解释给出:
一、 什么是ioctl
ioctl是设备驱动程序中对设备的I/O通道进行管理的函数。所谓对I/O通道进行管理,就是对设备的一些特性进行控制,例如串口的传输波特率、马达的转速等等。它的调用个数如下:
int ioctl(int fd, ind cmd, …);
其中fd是用户程序打开设备时使用open函数返回的文件标示符,cmd是用户程序对设备的控制命令,至于后面的省略号,那是一些补充参数,一般最多一个,这个参数的有无和cmd的意义相关。
ioctl函数是文件结构中的一个属性分量,就是说如果你的驱动程序提供了对ioctl的支持,用户就可以在用户程序中使用ioctl函数来控制设备的I/O通道。
接着:
这个应该是将自己注册成Binder管理者的意思
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
//给Binder里面写数据,其实就是给映射到的那块内存尽心数据写入;也就是进程间开始了通信;进程A要给进程B发送消息的话,首先要将消息写入到映射到Binder的那块内存中;然后由Binder将该消息复制到和B共享的内存中;同样,反过来,Binder也可以将进程B的消息复制到A和Binder共同映射的内存中;这就是进程间的数据进行了交互。
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
//返回的参数,重点跟踪
int res;
//write_size的长度为len,read_size的长度为0;应该是写操作
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
//一切都是文件,操作描述符
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
//返回操作描述符
return res;
}
//对需要写入的数据进行一个封装,然后写入
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
//解析消息,重点分析
//注意最后一个形参,应该是一个方法的地址,实际上就是通过这个func来对消息进行处理
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
//返回的参数,重点跟踪
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
//没有到消息的尾部
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
#if TRACE
fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
//根据命令来解析
switch(cmd) {
case BR_NOOP:
break;
case BR_TRANSACTION_COMPLETE:
break;
case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
#if TRACE
fprintf(stderr," %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
#endif
ptr += sizeof(struct binder_ptr_cookie);
break;
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
//处理消息
res = func(bs, txn, &msg, &reply);
//将处理结果写入结果
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
//跳过这一条
ptr += sizeof(*txn);
break;
}
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof(*txn);
r = 0;
break;
}
case BR_DEAD_BINDER: {
struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
ptr += sizeof(binder_uintptr_t);
death->func(bs, death->ptr);
break;
}
case BR_FAILED_REPLY:
r = -1;
break;
case BR_DEAD_REPLY:
r = -1;
break;
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
//这三个应该是给Binder驱动发送命令,你们主要看cmd[0]仔细研究下; void binder_acquire(struct binder_state *bs, uint32_t target) { uint32_t cmd[2]; cmd[0] = BC_ACQUIRE; cmd[1] = target; binder_write(bs, cmd, sizeof(cmd)); } void binder_release(struct binder_state *bs, uint32_t target) { uint32_t cmd[2]; cmd[0] = BC_RELEASE; cmd[1] = target; binder_write(bs, cmd, sizeof(cmd)); } void binder_link_to_death(struct binder_state *bs, uint32_t target, struct binder_death *death) { struct { uint32_t cmd; struct binder_handle_cookie payload; } __attribute__((packed)) data; data.cmd = BC_REQUEST_DEATH_NOTIFICATION; data.payload.handle = target; data.payload.cookie = (uintptr_t) death; binder_write(bs, &data, sizeof(data)); }
重点分析
int binder_call(struct binder_state *bs,
struct binder_io *msg, struct binder_io *reply,
uint32_t target, uint32_t code)
{
//定义一些数据
int res;
struct binder_write_read bwr;
struct {
uint32_t cmd;
struct binder_transaction_data txn;
} __attribute__((packed)) writebuf;
unsigned readbuf[32];
if (msg->flags & BIO_F_OVERFLOW) {
fprintf(stderr,"binder: txn buffer overflow\n");
goto fail;
}
//对数据进行转化
writebuf.cmd = BC_TRANSACTION;
writebuf.txn.target.handle = target;
writebuf.txn.code = code;
writebuf.txn.flags = 0;
writebuf.txn.data_size = msg->data - msg->data0;
writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);
writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;
bwr.write_size = sizeof(writebuf);
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) &writebuf;
//打印数据信息
hexdump(msg->data0, msg->data - msg->data0);
//进入循环
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//进行读写操作,具体是读还是写应该和bwr里面的参数有关
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
goto fail;
}
//返回结果
res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
if (res == 0) return 0;
if (res < 0) goto fail;
}
fail:
memset(reply, 0, sizeof(*reply));
reply->flags |= BIO_F_IOERROR;
return -1;
}
九:
//注意看这个里面会调用binder_parse进行处理命令,假设调用的进程是A
void binder_loop(struct binder_state *bs, binder_handler func)
{
//定义一些需要操作的数据
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//A将命令写入到自己和Binder驱动共用的内存空间
binder_write(bs, readbuf, sizeof(uint32_t));
//循环对命令进行处理
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//Binder将A中的命令复制到bwr中
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
//调用binder_parse处理bwr中的命令,然后返回结果
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
其他的:都比较简单,大家自行分析
void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
bio->data_avail = txn->data_size;
bio->offs_avail = txn->offsets_size / sizeof(size_t);
bio->flags = BIO_F_SHARED;
}
void bio_init(struct binder_io *bio, void *data,
size_t maxdata, size_t maxoffs)
{
size_t n = maxoffs * sizeof(size_t);
if (n > maxdata) {
bio->flags = BIO_F_OVERFLOW;
bio->data_avail = 0;
bio->offs_avail = 0;
return;
}
bio->data = bio->data0 = (char *) data + n;
bio->offs = bio->offs0 = data;
bio->data_avail = maxdata - n;
bio->offs_avail = maxoffs;
bio->flags = 0;
}
static void *bio_alloc(struct binder_io *bio, size_t size)
{
size = (size + 3) & (~3);
if (size > bio->data_avail) {
bio->flags |= BIO_F_OVERFLOW;
return NULL;
} else {
void *ptr = bio->data;
bio->data += size;
bio->data_avail -= size;
return ptr;
}
}
void binder_done(struct binder_state *bs,
struct binder_io *msg,
struct binder_io *reply)
{
struct {
uint32_t cmd;
uintptr_t buffer;
} __attribute__((packed)) data;
if (reply->flags & BIO_F_SHARED) {
data.cmd = BC_FREE_BUFFER;
data.buffer = (uintptr_t) reply->data0;
binder_write(bs, &data, sizeof(data));
reply->flags = 0;
}
}
static struct flat_binder_object *bio_alloc_obj(struct binder_io *bio)
{
struct flat_binder_object *obj;
obj = bio_alloc(bio, sizeof(*obj));
if (obj && bio->offs_avail) {
bio->offs_avail--;
*bio->offs++ = ((char*) obj) - ((char*) bio->data0);
return obj;
}
bio->flags |= BIO_F_OVERFLOW;
return NULL;
}
void bio_put_uint32(struct binder_io *bio, uint32_t n)
{
uint32_t *ptr = bio_alloc(bio, sizeof(n));
if (ptr)
*ptr = n;
}
void bio_put_obj(struct binder_io *bio, void *ptr)
{
struct flat_binder_object *obj;
obj = bio_alloc_obj(bio);
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_BINDER;
obj->binder = (uintptr_t)ptr;
obj->cookie = 0;
}
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;
obj->handle = handle;
obj->cookie = 0;
}
void bio_put_string16(struct binder_io *bio, const uint16_t *str)
{
size_t len;
uint16_t *ptr;
if (!str) {
bio_put_uint32(bio, 0xffffffff);
return;
}
len = 0;
while (str[len]) len++;
if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) {
bio_put_uint32(bio, 0xffffffff);
return;
}
/* Note: The payload will carry 32bit size instead of size_t */
bio_put_uint32(bio, (uint32_t) len);
len = (len + 1) * sizeof(uint16_t);
ptr = bio_alloc(bio, len);
if (ptr)
memcpy(ptr, str, len);
}
void bio_put_string16_x(struct binder_io *bio, const char *_str)
{
unsigned char *str = (unsigned char*) _str;
size_t len;
uint16_t *ptr;
if (!str) {
bio_put_uint32(bio, 0xffffffff);
return;
}
len = strlen(_str);
if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) {
bio_put_uint32(bio, 0xffffffff);
return;
}
/* Note: The payload will carry 32bit size instead of size_t */
bio_put_uint32(bio, len);
ptr = bio_alloc(bio, (len + 1) * sizeof(uint16_t));
if (!ptr)
return;
while (*str)
*ptr++ = *str++;
*ptr++ = 0;
}
static void *bio_get(struct binder_io *bio, size_t size)
{
size = (size + 3) & (~3);
if (bio->data_avail < size){
bio->data_avail = 0;
bio->flags |= BIO_F_OVERFLOW;
return NULL;
} else {
void *ptr = bio->data;
bio->data += size;
bio->data_avail -= size;
return ptr;
}
}
uint32_t bio_get_uint32(struct binder_io *bio)
{
uint32_t *ptr = bio_get(bio, sizeof(*ptr));
return ptr ? *ptr : 0;
}
uint16_t *bio_get_string16(struct binder_io *bio, size_t *sz)
{
size_t len;
/* Note: The payload will carry 32bit size instead of size_t */
len = (size_t) bio_get_uint32(bio);
if (sz)
*sz = len;
return bio_get(bio, (len + 1) * sizeof(uint16_t));
}
static struct flat_binder_object *_bio_get_obj(struct binder_io *bio)
{
size_t n;
size_t off = bio->data - bio->data0;
/* TODO: be smarter about this? */
for (n = 0; n < bio->offs_avail; n++) {
if (bio->offs[n] == off)
return bio_get(bio, sizeof(struct flat_binder_object));
}
bio->data_avail = 0;
bio->flags |= BIO_F_OVERFLOW;
return NULL;
}
uint32_t bio_get_ref(struct binder_io *bio)
{
struct flat_binder_object *obj;
obj = _bio_get_obj(bio);
if (!obj)
return 0;
if (obj->type == BINDER_TYPE_HANDLE)
return obj->handle;
return 0;
}
有以上分析可以,Binder主要提供的操作如下:
一:帮助进程申请内存以及释放内存。
二:操作(进行读写)进程所映射的文件(内存块)。
现在想一下;如果进程A要和B进行通信,那么怎么做呢,比如A需要传递给B一些数据,然后B进行处理传回给A?
首先,要知道进程B的文件描述符(就是binder_state中的fd),然后还需要A提供的原始数据;就这两个参数;
但是如果A里面存储着B的地址,那么这样可以吗?
大家设想一下,那么这样的话,每一个进程中都要有其他进程的一些信息,这样是不是只是存储其他进程的信息就要花费很多内存呢?因此设计者提供了这么一个机制,让一个进程专门负责存储所有进程的描述符地址和一些信息;每一次别的进程通过这个进程来查询它所需要的进程的地址之后,在进行通信;这个管理所有进程信息的进程在Android中称为ServiceManager;并且ServiceManager的地址规定为0;所以A,B通信模式如下:
A通过访问ServiceManager获取B的地址
A和B进行通信
他们之间的桥梁都是Binder驱动。
Binder在这个里面的作用只不过是一个中转人的作用,可以回过头在看看binder_loop的代码;
因此请关注对ServiceManager源码的分析章节。