一、GCD的应用与应用原理
1、单例 - dispatch_once_f()
单例代码块:
// 单例 _dispatch_once
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
<#code to be executed once#>
});
源码搜索:
#if DISPATCH_ONCE_INLINE_FASTPATH
DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
void
_dispatch_once(dispatch_once_t *predicate,
DISPATCH_NOESCAPE dispatch_block_t block)
{
if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) {
dispatch_once(predicate, block);
} else {
dispatch_compiler_barrier();// 栅栏
}
DISPATCH_COMPILER_CAN_ASSUME(*predicate == ~0l);
}
#undef dispatch_once
#define dispatch_once _dispatch_once
#endif
#endif // DISPATCH_ONCE_INLINE_FASTPATH
// 搜索 dispatch_once(dis :
#ifdef __BLOCKS__
void
dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}
#endif
1.1、找到单例源码 - dispatch_once_f()
:
DISPATCH_NOINLINE
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
// val --> 单例创建时,外部传来的 static 静态变量
dispatch_once_gate_t l = (dispatch_once_gate_t)val;
#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
if (likely(v == DLOCK_ONCE_DONE)) {
// 标记 DLOCK_ONCE_DONE,单例下次再来直接return
return;
}
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
if (likely(DISPATCH_ONCE_IS_GEN(v))) {
return _dispatch_once_mark_done_if_quiesced(l, v);
}
#endif
#endif
if (_dispatch_once_gate_tryenter(l)) {
return _dispatch_once_callout(l, ctxt, func);
}
// _dispatch_once_gate_tryenter
// 当前如果是锁住状态那么一直等,for的死循环,直到满足出口条件或 timeout
return _dispatch_once_wait(l);
}
单例如何只执行一次呢? --> 标识符?
源码分析
_dispatch_once_gate_tryenter()
锁:
DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_once_gate_tryenter(dispatch_once_gate_t l)
{
/**
compare change,对比,
once unlocked
*/
return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
(uintptr_t)_dispatch_lock_value_for_self(), relaxed);
}
// bool 是 YES 则 --> _dispatch_once_callout()
static void
_dispatch_once_callout(dispatch_once_gate_t l, void *ctxt,
dispatch_function_t func)
{
_dispatch_client_callout(ctxt, func);// return f(ctxt); 函数回调
// once done 处理
_dispatch_once_gate_broadcast(l);
}
_dispatch_once_gate_broadcast()
:
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_once_gate_broadcast(dispatch_once_gate_t l)
{
// 给自己加锁 lock --> 线程安全
dispatch_lock value_self = _dispatch_lock_value_for_self();
uintptr_t v;
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
v = _dispatch_once_mark_quiescing(l);
#else
// return os_atomic_xchg(&dgo->dgo_once, DLOCK_ONCE_DONE, release);
v = _dispatch_once_mark_done(l);// 标记 DLOCK_ONCE_DONE,单例下次再来直接return
#endif
if (likely((dispatch_lock)v == value_self)) return;
_dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v);
}
单例总结:
- 单例的流程:
- 第一次进来,锁是开着的,进入
_dispatch_once_callout()
处理block
任务f(ctxt)
; -
_dispatch_once_gate_broadcast()
,给自己加锁(保证了线程安全),然后对once
标识进行标记处理; - 再次进来,标识符已是
done
,直接return。
- 第一次进来,锁是开着的,进入
2、栅栏函数
2.1、应用测试代码:
- (void)my_barrier {
dispatch_queue_t queue01 = dispatch_queue_create("create_my_queue01", DISPATCH_QUEUE_CONCURRENT);
// 异步函数
dispatch_async(queue01, ^{
sleep(1);
NSLog(@"%@ -- 耗时任务",[NSThread currentThread]);
});
/**
栅栏函数的 同/异步
同步:阻塞后面流程
异步:不阻
*/
dispatch_barrier_async(queue01, ^{
NSLog(@"%@ -- 栅栏",[NSThread currentThread]);
});
// dispatch_barrier_async(queue01, ^{
//
// NSLog(@"%@ -- 栅栏",[NSThread currentThread]);
// });
dispatch_async(queue01, ^{
NSLog(@"%@ -- 我是前面加了栅栏函数",[NSThread currentThread]);
});
NSLog(@"-- 我是主线程 --");
}
/** 同步栅栏函数输出结果
<NSThread: 0x600002c2c680>{number = 5, name = (null)} -- 耗时任务
<NSThread: 0x600002c4e140>{number = 1, name = main} -- 栅栏
-- 我是主线程 --
<NSThread: 0x600002c2c680>{number = 5, name = (null)} -- 我是前面加了栅栏函数
*/
/** 异步栅栏函数输出结果
-- 我是主线程 --
<NSThread: 0x600003abad80>{number = 7, name = (null)} -- 耗时任务
<NSThread: 0x600003abad80>{number = 7, name = (null)} -- 栅栏
<NSThread: 0x600003abad80>{number = 7, name = (null)} -- 我是前面加了栅栏函数
*/
- 同步栅栏函数:阻塞当前线程(在哪堵哪),直至任务完成 --> 同步锁的效果
- 异步栅栏函数:不阻塞当前线程,只阻拦栅栏所在队列的的任务效果
- 栅栏函数,异步栏队列,同步栏线程;
- 栅栏函数只能用于给自定义的并行队列添加,串行队列加栅栏虽不报错但即浪费性能又没任何意义。
2.2、栅栏函数底层原理
同步函数 - dispatch_sync
DISPATCH_NOINLINE
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
uintptr_t dc_flags = DC_FLAG_BLOCK;
if (unlikely(_dispatch_block_has_private_data(work))) {
return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
}
_dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
_dispatch_sync_f()
--> _dispatch_sync_f_inline()
:
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
if (likely(dq->dq_width == 1)) {// width = 1 --> 串行队列
// _dispatch_barrier_sync_f 栅栏函数
return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
}
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
}
dispatch_lane_t dl = upcast(dq)._dl;
// Global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
/**
全局并发队列 和 绑定到非分派线程的 队列,总是属于慢速情况
*/
if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
}
if (unlikely(dq->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
}
_dispatch_introspection_sync_begin(dl);
_dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
_dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}
2.2.1、_dispatch_barrier_sync_f()
1)栅栏 - 串行队列
通过上面源码得知,串行队列和栅栏函数走的是同一行代码逻辑_dispatch_barrier_sync_f()
.
继续探索源码:_dispatch_barrier_sync_f()
-->_dispatch_barrier_sync_f_inline()
:
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
dispatch_tid tid = _dispatch_tid_self();// 当前线程的 tid <-- port ,每个线程都有自己的tid
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
}
dispatch_lane_t dl = upcast(dq)._dl;
// The more correct thing to do would be to merge the qos of the thread
// that just acquired the barrier lock into the queue state.
//
// However this is too expensive for the fast path, so skip doing it.
// The chosen tradeoff is that if an enqueue on a lower priority thread
// contends with this fast path, this thread may receive a useless override.
//
// Global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
/** 栅栏 锁
更正确的做法是将刚刚获得 barrier锁 的线程的qos合并到队列状态中。
但是,对于快速路径来说,这太昂贵了,所以跳过它。
选择的折衷是,如果一个队列在一个较低优先级的线程与这个快速路径竞争,这个线程可能会收到一个无用的总是落入缓慢的情况。
*/
if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
DC_FLAG_BARRIER | dc_flags);
}
if (unlikely(dl->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dl, ctxt, func,
DC_FLAG_BARRIER | dc_flags);// 递归 recurse
}
_dispatch_introspection_sync_begin(dl);// 一系列准备工作的处理
_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}
_dispatch_lane_barrier_sync_invoke_and_complete()
:
/*
* For queues we can cheat and inline the unlock code, which is invalid
* for objects with a more complex state machine (sources or mach channels)
*/
DISPATCH_NOINLINE
static void
_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq,
void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
{
// 对线程的 push -> callout -> work -> pop
_dispatch_sync_function_invoke_inline(dq, ctxt, func);
// 执行完毕
_dispatch_trace_item_complete(dc);
if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
return _dispatch_lane_barrier_complete(dq, 0, 0);
}
// Presence of any of these bits requires more work that only
// _dispatch_*_barrier_complete() handles properly
//
// Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without
// checking the role is sloppy, but is a super fast check, and neither of
// these bits should be set if the lock was never contended/discovered.
const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER |
DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
uint64_t old_state, new_state;
// similar to _dispatch_queue_drain_try_unlock
// 对队列中其他线程释放,开始执行其他线程任务
os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
if (unlikely(old_state & fail_unlock_mask)) {
os_atomic_rmw_loop_give_up({
// 栅栏已经完成了
return _dispatch_lane_barrier_complete(dq, 0, 0);
});
}
});
if (_dq_state_is_base_wlh(old_state)) {
_dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
}
}
如上,栅栏和串行队列,因串行队列并不开辟新线程,其后续任务都要等当前的完成后通知释放其他线程开始继续执行。
2)并发队列 加栅栏函数实现原理
并发队列是如何实现栅栏阻塞的呢?
_dispatch_queue_try_acquire_barrier_sync()
-->_dispatch_queue_try_acquire_barrier_sync_and_suspend()
:
/* Used by _dispatch_barrier_{try,}sync
*
* Note, this fails if any of e:1 or dl!=0, but that allows this code to be a
* simple cmpxchg which is significantly faster on Intel, and makes a
* significant difference on the uncontended codepath.
*
* See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h
*
* Initial state must be `completely idle`
* Final state forces { ib:1, qf:1, w:0 }
*/
DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
static inline bool
_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq,
uint32_t tid, uint64_t suspend_count)
{
uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER |
_dispatch_lock_value_from_tid(tid) |
(suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
uint64_t old_state, new_state;
return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
if (old_state != (init | role)) {
// give_up 暂时先放弃掉 和上面的那个栅栏完成的放开呼应
os_atomic_rmw_loop_give_up(break);
}
new_state = value | role;
});
}
系统级os_atomic_rmw_loop2o()
对线程的控制操作,暂时会将其他线程任务都放弃掉直至栅栏完成。
2.2.2、死锁
举例主线程死锁
通过源码分析死锁原因:
当在主线程添加同步任务时,主队列挂起,向当前队列添加任务:
_dispatch_sync_f_slow()
:
#pragma mark -
#pragma mark dispatch_sync / dispatch_barrier_sync
DISPATCH_NOINLINE
static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
dispatch_function_t func, uintptr_t top_dc_flags,
dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
dispatch_queue_t top_dq = top_dqu._dq;
dispatch_queue_t dq = dqu._dq;
if (unlikely(!dq->do_targetq)) {
return _dispatch_sync_function_invoke(dq, ctxt, func);
}
pthread_priority_t pp = _dispatch_get_priority();
struct dispatch_sync_context_s dsc = {
.dc_flags = DC_FLAG_SYNC_WAITER | dc_flags,
.dc_func = _dispatch_async_and_wait_invoke,
.dc_ctxt = &dsc,
.dc_other = top_dq,
.dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
.dc_voucher = _voucher_get(),
.dsc_func = func,
.dsc_ctxt = ctxt,
.dsc_waiter = _dispatch_tid_self(),
};
// 将任务 push 到队列中
_dispatch_trace_item_push(top_dq, &dsc);
// 队列是不是在 wait
__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);
/**
DISPATCH_NOINLINE
static void
__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
uint64_t dq_state = _dispatch_wait_prepare(dq);// 让 dq 准备等待
if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
// crash 系统抛出问题
DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
"dispatch_sync called on queue "
"already owned by current thread");
}
... 更多代码 ...
}
// _dq_state_drain_locked_by() --> _dispatch_lock_is_locked_by()
DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
// equivalent to _dispatch_lock_owner(lock_value) == tid
// 正在等待的队列和当前添加进任务的队列 tid 相同,是同一个队列
return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}
*/
if (dsc.dsc_func == NULL) {
// dsc_func being cleared means that the block ran on another thread ie.
// case (2) as listed in _dispatch_async_and_wait_f_slow.
dispatch_queue_t stop_dq = dsc.dc_other;
return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
}
_dispatch_introspection_sync_begin(top_dq);
_dispatch_trace_item_pop(top_dq, &dsc);
_dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
DISPATCH_TRACE_ARG(&dsc));
}
*
已经在等待的和要开始等待的队列(tid)
相同 == 相同抛出异常 == 死锁
3、信号量
示例代码:
//信号量
- (void)my_dispatch_semaphore {
dispatch_queue_t queue01 = dispatch_get_global_queue(0, 0);
// 2: 可同时执行 2 个任务
dispatch_semaphore_t sem = dispatch_semaphore_create(3);
/**
开始执行任务02
开始执行任务01
任务02完成
任务01完成
开始执行任务03
任务03完成
*/
dispatch_async(queue01, ^{
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
NSLog(@"开始任务01");
sleep(1);
NSLog(@"任务01完成");
dispatch_semaphore_signal(sem);
});
dispatch_async(queue01, ^{
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
NSLog(@"开始任务02");
sleep(1);
NSLog(@"任务02完成");
dispatch_semaphore_signal(sem);
});
dispatch_async(queue01, ^{
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
NSLog(@"开始任务03");
sleep(1);
NSLog(@"任务03完成");
dispatch_semaphore_signal(sem);
});
}
3.1、dispatch_semaphore_signal()
--> 信号++
long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
long value = os_atomic_inc2o(dsema, dsema_value, release);
if (likely(value > 0)) {
return 0;
}
if (unlikely(value == LONG_MIN)) {
DISPATCH_CLIENT_CRASH(value,
"Unbalanced call to dispatch_semaphore_signal()");
}
return _dispatch_semaphore_signal_slow(dsema);
}
拆宏:(dsema, dsema_value, release)
os_atomic_inc2o(p, f, m)
-->os_atomic_add2o(p, f, 1, m)
-->os_atomic_add(&(p)->f, (v), m)
: os_atomic_add(&(dsema->dsema_value), (1), m)
-->_os_atomic_c11_op((p), (v), m, add, +)
: _os_atomic_c11_op(dsema->dsema_value, 1, m, add, +)
--> atomic_fetch_add_explicit(dsema->dsema_value, 1)
({ _os_atomic_basetypeof(p) _v = (v), _r = \
atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
memory_order_##m); (__typeof__(_r))(_r op _v); })
})
atomic_fetch_add_explicit(dsema->dsema_value, 1)
即:对dsema->dsema_value
原子性加加操作。
3.2、dispatch_semaphore_wait()
--> 信号 --
long
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
long value = os_atomic_dec2o(dsema, dsema_value, acquire);
if (likely(value >= 0)) {// 1 - 1 = 0,所以 0 也是合理的
return 0;
}
return _dispatch_semaphore_wait_slow(dsema, timeout);
}
os_atomic_dec2o()
同理os_atomic_inc2o(dsema->dsema_value, 1)
atomic_fetch_sub_explicit()
对dsema->dsema_value
原子性减减
4、调度组
4.1、示例代码:
// 调度组
- (void)my_dispath_group {
dispatch_queue_t my_queue = dispatch_queue_create("my_queue001", DISPATCH_QUEUE_CONCURRENT);
dispatch_group_t group = dispatch_group_create();
dispatch_group_enter(group);
dispatch_async(dispatch_get_global_queue(0, 0), ^{
NSLog(@"开始任务01");
sleep(1);
dispatch_async(my_queue, ^{
NSLog(@"任务01里面的异步耗时任务开始");
sleep(3);
NSLog(@"任务01里面的异步耗时任务完成");
});
NSLog(@"任务01完成");
dispatch_group_leave(group);
});
dispatch_group_enter(group);
dispatch_async(dispatch_get_global_queue(0, 0), ^{
NSLog(@"开始任务02");
sleep(1);
NSLog(@"任务02完成");
dispatch_group_leave(group);
});
dispatch_group_notify(group, dispatch_get_main_queue(), ^{
NSLog(@"group 里的任务完成了");
});
NSLog(@"-- 我是主线程的任务 --");
}
/** 运行结果:
开始任务01
-- 我是主线程的任务 --
开始任务02
任务02完成
任务01完成
任务01里面的异步耗时任务开始
group 里的任务完成了
任务01里面的异步耗时任务完成
*/
4.1.1、假定需求场景 Domo
需求
:需要上述代码中的任务 01 02 03 无顺序要求,但在都执行完毕后通知主线程处理其他事务。
上述代码的运行结果已知,任务03并未完成,但是dispatch_group_notify()
已执行了,显然并不满足需求。
解决方案
:修改代码如下,将任务 01 中的异步任务 03 也加进调度组中,再次运行程序:
// 方案1
dispatch_group_enter(group);
dispatch_async(my_queue, ^{
NSLog(@"任务01里面的异步耗时任务03开始");
sleep(3);
NSLog(@"任务01里面的异步耗时任务03完成");
dispatch_group_leave(group);
});
// 方案2
dispatch_group_async(group, my_queue, ^{
NSLog(@"任务01里面的异步耗时任务03开始");
sleep(3);
NSLog(@"任务01里面的异步耗时任务03完成");
});
/** 上述2方案,运行结果相同:
-- 我是主线程的任务 --
开始任务01
开始任务02
任务01完成
任务02完成
任务01里面的异步耗时任务03开始
任务01里面的异步耗时任务03完成
group 里的任务完成了
*/
看似已满足需求,是完全没问题了吗???
4.1.2、下面以上述代码为例,我们对调度组进行多种场景测试:
1)代码修改如下,执行:
// 1、enter leave 不成对,enter多于leave
dispatch_group_notify(group, dispatch_get_main_queue(), ^{
NSLog(@"group 里的任务完成了");// 一直等 notify 不会执行
});
// 2、leave 不成对,enter少于leave
运行崩溃
2)代码再次修改如下,执行:
// 将`dispatch_group_notify`移动到所有任务`enter`进组前面
// 1、enter leave 成对
dispatch_group_notify(group, dispatch_get_main_queue(), ^{
NSLog(@"group 里的任务完成了");
});
// 2、leave 全部注释掉,只剩 enter
dispatch_group_enter(group);
dispatch_async(dispatch_get_global_queue(0, 0), ^{
..... 不做重复......
});
/** 1、2 运行结果相同,如下:
-- 我是主线程的任务 --
开始任务02
开始任务01
group 里的任务完成了
任务02完成
任务01完成
任务01里面的异步耗时任务03开始
任务01里面的异步耗时任务03完成
*/
问题01
: 为何group_notify
通知和 group_enter group_leave
好像无关了呢?
§
调度组应用总结:
- 在任务
enter
进组和leave
成对出现后就会通知完成dispatch_group_notify
. -
dispatch_group_notify
的通知,只要group_enter
和group_leave
成对出现了就可能被通知,不一定是group
中任务都已完成; -
group_enter
和group_leave
不成对时:-
group_enter
多时,程序dispatch_group_notify
的通知不会被执行 -
group_leave
多时,运行程序崩溃,崩溃位置不一定,leave
和enter
只要成组,不一定是按代码编写顺序的。
-
4.2、调度组 group 原理
问题02
:dispatch_group_async()
和group_enter / group_leave
为何效果相同?
下面带着2个问题,对调度组原理进行分析。
4.2.1、group
创建
dispatch_group_create()
: alloc
dispatch_group_t
dispatch_group_create(void)
{
return _dispatch_group_create_with_count(0);
}
// _dispatch_group_create_with_count()
DISPATCH_ALWAYS_INLINE
static inline dispatch_group_t
_dispatch_group_create_with_count(uint32_t n)
{
dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group),
sizeof(struct dispatch_group_s));
dg->do_next = DISPATCH_OBJECT_LISTLESS;
dg->do_targetq = _dispatch_get_default_queue(false);
if (n) {// 创建 group 对象 n = 0 所以这里不进
os_atomic_store2o(dg, dg_bits,
(uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed);
os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // <rdar://22318411>
}
return dg;
}
4.2.2、dispatch_group_enter()
void
dispatch_group_enter(dispatch_group_t dg)
{
// The value is decremented on a 32bits wide atomic so that the carry
// for the 0 -> -1 transition is not propagated to the upper 32bits.
// os_atomic_sub_orig2o:对 dg->dg_bits 进行 减减 操作 --> 类似信号量
uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits,
DISPATCH_GROUP_VALUE_INTERVAL, acquire);
uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK;
// 最初 0 - 1 = -1 ,为 0 的场景不多
if (unlikely(old_value == 0)) {
_dispatch_retain(dg); // <rdar://problem/22318411>
}
// DISPATCH_GROUP_VALUE_MAX : 0x0000000000000004ULL
// old_value 最大 4,old_value = 4 时会抛出异常 报错
if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) {
DISPATCH_CLIENT_CRASH(old_bits,
"Too many nested calls to dispatch_group_enter()");
}
}
dispatch_group_enter()
后是如何堵塞住dispatch_group_notify()
的呢?--> dispatch_group_notify()
源码分析解答。
4.2.3、dispatch_group_notify()
#ifdef __BLOCKS__
void
dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_block_t db)
{
dispatch_continuation_t dsn = _dispatch_continuation_alloc();
_dispatch_continuation_init(dsn, dq, db, 0, DC_FLAG_CONSUME);
_dispatch_group_notify(dg, dq, dsn);
}
#endif
_dispatch_group_notify()
:
// _dispatch_group_notify():
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_continuation_t dsn)
{
uint64_t old_state, new_state;
dispatch_continuation_t prev;
dsn->dc_data = dq;
_dispatch_retain(dq);
/**
#define os_mpsc_push_update_tail(Q, tail, _o_next) ({ \
os_mpsc_node_type(Q) _tl = (tail); \
os_atomic_store2o(_tl, _o_next, NULL, relaxed); \
os_atomic_xchg(_os_mpsc_tail Q, _tl, release); \
})
*/
// dg -> os state
prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next);
if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg);
os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next);
if (os_mpsc_push_was_empty(prev)) {
os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, {
new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS;
if ((uint32_t)old_state == 0) {
// old_state 还原为 0 则继续后续操作
os_atomic_rmw_loop_give_up({
// 唤醒
return _dispatch_group_wake(dg, new_state, false);
});
}
});
}
}
// _dispatch_group_wake():
DISPATCH_NOINLINE
static void
_dispatch_group_wake(dispatch_group_t dg, uint64_t dg_state, bool needs_release)
{
// needs_release = false --> refs = 0
uint16_t refs = needs_release ? 1 : 0; // <rdar://problem/22318411>
// dg_state = 2 --> 2 & 0x0000000000000002ULL = 2
if (dg_state & DISPATCH_GROUP_HAS_NOTIFS) {
dispatch_continuation_t dc, next_dc, tail;
// Snapshot before anything is notified/woken <rdar://problem/8554546>
dc = os_mpsc_capture_snapshot(os_mpsc(dg, dg_notify), &tail);
do {
dispatch_queue_t dsn_queue = (dispatch_queue_t)dc->dc_data;
next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next);
//
/** 这里操作和 异步函数 很像了 push
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
_dispatch_trace_item_push(dqu, dc);
}
#else
(void)dc_flags;
#endif
return dx_push(dqu._dq, dc, qos);
}
*/
_dispatch_continuation_async(dsn_queue, dc,
_dispatch_qos_from_pp(dc->dc_priority), dc->dc_flags);
_dispatch_release(dsn_queue);
} while ((dc = next_dc));
refs++;
}
if (dg_state & DISPATCH_GROUP_HAS_WAITERS) {
_dispatch_wake_by_address(&dg->dg_gen);
}
if (refs) _dispatch_release_n(dg, refs);
}
由上源码与场景可知,dispatch_group_enter()
时,old_bits: 0-->-1
;
而dispatch_group_notify()
由源码可知,old_state == 0
时才进行_dispatch_group_wake()
,so,若未dispatch_group_leave()
,以-1
的状态,是不会执行 weak 的。
4.2.4、dispatch_group_leave()
void
dispatch_group_leave(dispatch_group_t dg)
{
// The value is incremented on a 64bits wide atomic so that the carry for
// the -1 -> 0 transition increments the generation atomically.
// os_atomic_add_orig2o :++ 操作
uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state,
DISPATCH_GROUP_VALUE_INTERVAL, release);
// old_state -1 + 1 = 0
// 0 & 任意 = 0
uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);
// DISPATCH_GROUP_VALUE_1 0x00000000fffffffcULL
if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {
old_state += DISPATCH_GROUP_VALUE_INTERVAL;
do {
new_state = old_state;
if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) {
new_state &= ~DISPATCH_GROUP_HAS_WAITERS;
new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
} else {
// If the group was entered again since the atomic_add above,
// we can't clear the waiters bit anymore as we don't know for
// which generation the waiters are for
new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
}
if (old_state == new_state) break;
} while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state,
old_state, new_state, &old_state, relaxed)));
// 唤醒 group_weak
return _dispatch_group_wake(dg, old_state, true);
}
// 若leave 多于enter 不匹配 : 0 +1 = 1, 报错
if (unlikely(old_value == 0)) {
DISPATCH_CLIENT_CRASH((uintptr_t)old_value,
"Unbalanced call to dispatch_group_leave()");
}
}
dispatch_group_leave()
也可唤醒_dispatch_group_wake()
.
由上,关于问题01已解:dispatch_group_notify()
的weak 是通过 old_state = 0
来控制的,在将dispatch_group_notify()
添加在最前时,没有 enter --> -1
,调用时直接进去了。
4.2.5、dispatch_group_async()
关于问题02,dispatch_group_async()
内部是否封装了 dispatch_group_enter() / dispatch_group_leave()
呢?
#ifdef __BLOCKS__
void
dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_block_t db)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC;
dispatch_qos_t qos;
qos = _dispatch_continuation_init(dc, dq, db, 0, dc_flags);
_dispatch_continuation_group_async(dg, dq, dc, qos);
}
#endif
// _dispatch_continuation_group_async():
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_continuation_t dc, dispatch_qos_t qos)
{
dispatch_group_enter(dg);// dispatch_group_enter()
dc->dc_data = dg;
// 异步函数 类同
_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
由上源码,dispatch_group_async()
内部调了dispatch_group_enter(dg)
,但是dispatch_group_leave()
呢?--> 猜测应该在block
任务执行完毕后隐形内部调用了。
全局搜索_dispatch_client_callout
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
{
struct dispatch_object_s *dou = dc->dc_data;
unsigned long type = dx_type(dou);
if (type == DISPATCH_GROUP_TYPE) {// 是否是调度组 group
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
_dispatch_trace_item_complete(dc);
dispatch_group_leave((dispatch_group_t)dou);// dispatch_group_leave
} else {
DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
}
}
验证了:调度组group
进行调用client_callout
--> trace_item_complete
完成后执行了dispatch_group_leave()
.
以上。