- Framework是一個中間層,它對接了底層的實現,封裝了復雜的內部邏輯,并提供外部使用接口。
- Binder Framework層為了C++和Java兩個部分,為了達到功能的復用,中間通過JNI進行銜接。
- Binder Framework的C++部分,頭文件位于這個路徑:/frameworks/native/include/binder/。實現位于這個路徑:/frameworks/native/libs/binder/。
- binder庫最終會編譯成一個動態鏈接庫:/libbinder.so,供其他進程連接使用。
1 ServiceManager啟動簡述
- ServiceManager(后邊簡稱 SM) 是 Binder的守護進程,它本身也是一個Binder的服務。
- 是通過編寫binder.c直接和Binder驅動來通信,里面含量一個循環binder_looper來進行讀取和處理事務。
SM的工作也很簡單,就是兩個
- 1、注冊服務
- 2、查詢
源碼的位置
framework/native/cmds/servicemanager/
- service_manager.c
- binder.c
system/core/rootdir
-/init.rc
kernel/drivers/ (不同Linux分支路徑略有不同)
- android/binder.c
kernel下binder.c這個文件已經不在android的源碼里面了,在Linux源碼里面
強調一下這里面有兩個binder.c文件,一個是framework/native/cmds/servicemanager/binder.c,另外一個是kernel/drivers/android/binder.c ,絕對不是同一個東西,千萬不要弄混了。
2啟動過程
任何使用Binder機制的進程都必須要對/dev/binder設備進行open以及mmap之后才能使用,這部分邏輯是所有使用Binder機制進程通用的,SM也不例外。
啟動流程圖下:
ServiceManager是由init進程通過解析init.rc文件而創建的,其所對應的可執行程序是/system/bin/servicemanager,所對應的源文件是service_manager.c,進程名為/system/bin/servicemanager。
代碼如下:
// init.rc 602行
service servicemanager /system/bin/servicemanager
class core
user system
group system
critical
onrestart restart healthd
onrestart restart zygote
onrestart restart media
onrestart restart surfaceflinger
onrestart restart drm
2.1 service_manager.c
啟動Service Manager的入口函數是service_manager.c的main()方法如下:
//service_manager.c 347行
int main(int argc, char **argv)
{
struct binder_state *bs;
//打開binder驅動,申請128k字節大小的內存空間
bs = binder_open(128*1024);
...
//省略部分代碼
...
//成為上下文管理者
if (binder_become_context_manager(bs)) {
return -1;
}
selinux_enabled = is_selinux_enabled(); //selinux權限是否使能
sehandle = selinux_android_service_context_handle();
selinux_status_open(true);
if (selinux_enabled > 0) {
if (sehandle == NULL) {
abort(); //無法獲取sehandle
}
if (getcon(&service_manager_context) != 0) {
abort(); //無法獲取service_manager上下文
}
}
union selinux_callback cb;
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
//進入無限循環,充當Server角色,處理client端發來的請求
binder_loop(bs, svcmgr_handler);
return 0;
}
PS:svcmgr_handler是一個方向指針,相當于binder_loop的每一次循環調用到svcmgr_handler()函數。
這部分代碼 主要分為3塊
- bs = binder_open(128*1024):打開binder驅動,申請128k字節大小的內存空間
- binder_become_context_manager(bs):變成上下文的管理者
- binder_loop(bs, svcmgr_handler):進入輪詢,處理來自client端發來的請求
2.2 binder_open(128*1024)
這塊代碼在framework/native/cmds/servicemanager/binder.c中
// framework/native/cmds/servicemanager/binder.c 96行
struct binder_state *binder_open(size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
//通過系統調用進入內核,打開Binder的驅動設備
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
//無法打開binder設備
goto fail_open;
}
//通過系統調用,ioctl獲取binder版本信息
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
//如果內核空間與用戶空間的binder不是同一版本
goto fail_open;
}
bs->mapsize = mapsize;
//通過系統調用,mmap內存映射,mmap必須是page的整數倍
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
//binder設備內存映射失敗
goto fail_map; // binder
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
- 打開binder相關操作,先調用open()打開binder設備,open()方法經過系統調用,進入Binder驅動,然后調用方法binder_open(),該方法會在Binder驅動層創建一個binder_proc對象,再將 binder_proc 對象賦值給fd->private_data,同時放入全局鏈表binder_proc。
- 再通過ioctl檢驗當前binder版本與Binder驅動層的版本是否一致。
- 調用mmap()進行內存映射,同理mmap()方法經過系統調用,對應Binder驅動層binde_mmap()方法,該方法會在Binder驅動層創建Binder_buffer對象,并放入當前binder_proc的proc->buffers 鏈表
這里重點說下binder_state
//framework/native/cmds/servicemanager/binder.c 89行
struct binder_state
{
int fd; //dev/binder的文件描述
void *mapped; //指向mmap的內存地址
size_t mapsize; //分配內存的大小,默認是128K
};
至此,整個binder_open就已經結束了。
2.3 binder_become_context_manager()函數解析
代碼很簡單,如下:
//framework/native/cmds/servicemanager/binder.c 146行
int binder_become_context_manager(struct binder_state *bs)
{
//通過ioctl,傳遞BINDER_SET_CONTEXT_MGR執行
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
變成上下文的管理者,整個系統中只有一個這樣的管理者。通過ioctl()方法經過系統調用,對應的是Binder驅動的binder_ioctl()方法。
2.3.1 binder_ioctl解析
Binder驅動在Linux 內核中,代碼在kernel中
//kernel/drivers/android/binder.c 3134行
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
...
//省略部分代碼
...
switch (cmd) {
...
//省略部分代碼
...
//3279行
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
goto err;
break;
}
...
//省略部分代碼
...
}
...
//省略部分代碼
...
}
根據參數BINDER_SET_CONTEXT_MGR,最終調用binder_ioctl_set_ctx_mgr()方法,這個過程會持有binder_main_lock。
2.3.2 binder_ioctl_set_ctx_mgr() 是屬于Linux kernel的部分,代碼
//kernel/drivers/android/binder.c 3198行
static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
kuid_t curr_euid = current_euid();
//保證binder_context_mgr_node對象只創建一次
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
}
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
//設置當前線程euid作為Service Manager的uid
context->binder_context_mgr_uid = curr_euid;
}
//創建ServiceManager的實體。
context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (!context->binder_context_mgr_node) {
ret = -ENOMEM;
goto out;
}
context->binder_context_mgr_node->local_weak_refs++;
context->binder_context_mgr_node->local_strong_refs++;
context->binder_context_mgr_node->has_strong_ref = 1;
context->binder_context_mgr_node->has_weak_ref = 1;
out:
return ret;
}
進入Binder驅動,在Binder驅動中定義的靜態變量
2.3.3 binder_context 結構體
//kernel/drivers/android/binder.c 228行
struct binder_context {
//service manager所對應的binder_node
struct binder_node *binder_context_mgr_node;
//運行service manager的線程uid
kuid_t binder_context_mgr_uid;
const char *name;
};
創建了全局的binder_node對象binder_context_mgr_node,并將binder_context_mgr_node的強弱引用各加1
2.3.4 binder_new_node()函數解析
//kernel/drivers/android/binder.c
static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t ptr,
binder_uintptr_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
//第一次進來是空
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}
//給創建的binder_node 分配內存空間
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
//將創建的node對象添加到proc紅黑樹
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
//設置binder_work的type
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
return node;
}
在Binder驅動層創建了binder_node結構體對象,并將當前的binder_pro加入到binder_node的node->proc。并創建binder_node的async_todo和binder_work兩個隊列
2.4 binder_loop()詳解
// framework/native/cmds/servicemanager/binder.c 372行
void binder_loop(struct binder_state *bs, binder_handler func) {
int res;
struct binder_write_read bwr;
uint32_t readbuf[ 32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//將BC_ENTER_LOOPER命令發送給Binder驅動,讓ServiceManager進行循環
binder_write(bs, readbuf, sizeof(uint32_t));
for (; ; ) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//進入循環,不斷地binder讀寫過程
res = ioctl(bs -> fd, BINDER_WRITE_READ, & bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
//解析binder信息
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
進入循環讀寫操作,由main()方法傳遞過來的參數func指向svcmgr_handler。binder_write通過ioctl()將BC_ENTER_LOOPER命令發送給binder驅動,此時bwr只有write_buffer有數據,進入binder_thread_write()方法。 接下來進入for循環,執行ioctl(),此時bwr只有read_buffer有數據,那么進入binder_thread_read()方法。
- 主要是循環讀寫操作,這里有3個重點是
- binder_thread_write結構體
- binder_write函數
- binder_parse函數
2.4.1 binder_thread_write
//kernel/drivers/android/binder.c 2248行
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
//獲取命令
get_user(cmd, (uint32_t __user *)ptr);
switch (cmd) {
//**** 省略部分代碼 ****
case BC_ENTER_LOOPER:
//設置該線程的looper狀態
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
//**** 省略部分代碼 ****
}
//**** 省略部分代碼 ****
return 0;
}
主要是從bwr.write_buffer中拿出數據,此處為BC_ENTER_LOOPER,可見上層調用binder_write()方法主要是完成當前線程的looper狀態為BINDER_LOOPER_STATE_ENABLE。
2.4.2 binder_write函數
// framework/native/cmds/servicemanager/binder.c 151行
int binder_write(struct binder_state *bs, void *data, size_t len) {
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
//此處data為BC_ENTER_LOOPER
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs -> fd, BINDER_WRITE_READ, & bwr);
if (res < 0) {
fprintf(stderr, "binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
根據傳遞進來的參數,初始化bwr,其中write_size大小為4,write_buffer指向緩沖區的起始地址,其內容為BC_ENTER_LOOPER請求協議號。通過ioctl將bwr數據發送給Binder驅動,則調用binder_ioctl函數
2.4.3 讓我們來看下binder_ioctl函數
//kernel/drivers/android/binder.c 3239行
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
//**** 省略部分代碼 ****
//獲取binder_thread
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ:
//進行binder的讀寫操作
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
//**** 省略部分代碼 ****
}
}
主要就是根據參數 BINDER_SET_CONTEXT_MGR,最終調用binder_ioctl_set_ctx_mgr()方法,這個過程會持有binder_main_lock。
binder_ioctl_write_read()函數解析
//kernel/drivers/android/binder.c 3134
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
//把用戶空間數據ubuf拷貝到bwr中
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);
// “寫緩存” 有數據
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
// "讀緩存" 有數據
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
//將內核數據bwr拷貝到用戶控件bufd
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
此處代碼就一個作用:就是講用戶空間的binder_write_read結構體 拷貝到內核空間。
2.4.3binder_parse函數解析
binder_parse在// framework/native/cmds/servicemanager/binder.c中
// framework/native/cmds/servicemanager/binder.c 204行
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func) {
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
#if TRACE
fprintf(stderr, "%s:\n", cmd_name(cmd));
#endif
switch (cmd) {
case BR_NOOP:
//誤操作,退出循環
break;
case BR_TRANSACTION_COMPLETE:
break;
case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
#if TRACE
fprintf(stderr, " %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
#endif
ptr += sizeof(struct binder_ptr_cookie);
break;
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *)ptr;
if ((end - ptr) < sizeof( * txn)){
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[ 256 / 4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init( & reply, rdata, sizeof(rdata), 4);
bio_init_from_txn( & msg, txn);
res = func(bs, txn, & msg, &reply);
binder_send_reply(bs, & reply, txn -> data.ptr.buffer, res);
}
ptr += sizeof( * txn);
break;
}
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *)ptr;
if ((end - ptr) < sizeof( * txn)){
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof( * txn);
r = 0;
break;
}
case BR_DEAD_BINDER: {
struct binder_death *death = (struct binder_death *)
(uintptr_t) * (binder_uintptr_t *) ptr;
ptr += sizeof(binder_uintptr_t);
//binder死亡消息
death -> func(bs, death -> ptr);
break;
}
case BR_FAILED_REPLY:
r = -1;
break;
case BR_DEAD_REPLY:
r = -1;
break;
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
主要是解析binder消息,此處參數ptr指向BC_ENTER_LOOPER,func指向svcmgr_handler,所以有請求來,則調用svcmgr
這里面我們重點分析BR_TRANSACTION里面的幾個函數
- bio_init()函數
- bio_init_from_txn()函數
bio_init()函數
// framework/native/cmds/servicemanager/binder.c 409行
void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
bio->data_avail = txn->data_size;
bio->offs_avail = txn->offsets_size / sizeof(size_t);
bio->flags = BIO_F_SHARED;
}
其中binder_io的結構體在 /frameworks/native/cmds/servicemanager/binder.h 里面
binder.h
//frameworks/native/cmds/servicemanager/binder.h 12行
struct binder_io
{
char *data; /* pointer to read/write from */
binder_size_t *offs; /* array of offsets */
size_t data_avail; /* bytes available in data buffer */
size_t offs_avail; /* entries available in offsets array */
char *data0; //data buffer起點位置
binder_size_t *offs0; //buffer偏移量的起點位置
uint32_t flags;
uint32_t unused;
};
** bio_init_from_txn()函數**
// framework/native/cmds/servicemanager/binder.c 409行
void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
bio->data_avail = txn->data_size;
bio->offs_avail = txn->offsets_size / sizeof(size_t);
bio->flags = BIO_F_SHARED;
}
其實很簡單,就是將readbuf的數據賦給bio對象的data
將readbuf的數據賦給bio對象的data
2.4.4 svcmgr_handler
//service_manager.c 244行
int svcmgr_handler(struct binder_state*bs,
struct binder_transaction_data*txn,
struct binder_io*msg,
struct binder_io*reply) {
struct svcinfo*si;
uint16_t * s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
if (txn -> target.ptr != BINDER_SERVICE_MANAGER)
return -1;
if (txn -> code == PING_TRANSACTION)
return 0;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, & len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr, "invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle*tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
switch (txn -> code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
//獲取服務名
s = bio_get_string16(msg, & len);
if (s == NULL) {
return -1;
}
//根據名稱查找相應服務
handle = do_find_service(bs, s, len, txn -> sender_euid, txn -> sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
case SVC_MGR_ADD_SERVICE:
//獲取服務名
s = bio_get_string16(msg, & len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
//注冊服務
if (do_add_service(bs, s, len, handle, txn -> sender_euid,
allow_isolated, txn -> sender_pid))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn -> sender_pid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn -> sender_euid);
return -1;
}
si = svclist;
while ((n-- > 0) && si)
si = si -> next;
if (si) {
bio_put_string16(reply, si -> name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn -> code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
代碼看著很多,其實主要就是servicemanger提供查詢服務和注冊服務以及列舉所有服務。
//service_manager.c 128行
struct svcinfo
{
struct svcinfo*next;
uint32_t handle;
struct binder_death death;
int allow_isolated;
size_t len;
uint16_t name[ 0];
};
每一個服務用svcinfo結構體來表示,該handle值是注冊服務的過程中,又服務所在進程那一端所確定。
3 總結
- ServiceManager集中管理系統內的所有服務,通過權限控制進程是否有權注冊服務,通過字符串名稱來查找對應的Service;
- 由于ServiceManager進程建立跟所有向其注冊服務的死亡通知,那么當前服務所在進程死亡后,會只需要告知ServiceManager。
- 每個Client通過查詢ServiceManager可獲取Service進程的情況,降低所有Client進程直接檢測導致負載過重。
ServiceManager 啟動流程:
- 打開binder驅動,并調用mmap()方法分配128k內存映射空間:binder_open()
- 通知binder驅動使其成為守護進程:binder_become_context_manager();
- 驗證selinux權限,判斷進程是否有權注冊或查看指定服務;
- 進入循環狀態,等待Client端的請求
- 注冊服務的過程,根據服務的名稱,但同一個服務已注冊,然后調用binder_node_release。這個過程便會發出死亡通知的回調。