dispatch_once
void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block);
其中第一个参数predicate,该参数是检查后面第二个参数所代表的代码块是否被调用的谓词。
第二个参数则是在整个应用程序中只会被调用一次的代码块。dispach_once函数中的代码块只会被执行一次,而且还是线程安全的。
使用dispatch_once创建单例
+ (instancetype)sharedInstance {
static dispatch_once_t onceToken;
static id instance = nil;
dispatch_once(&onceToken, ^{
instance = [[self alloc] init];
});
return instance;
}
使用dispatch_once
可以简化代码并且彻底保证线程安全,开发者根本无须担心加锁或同步。所有问题都有GCD在底层处理。由于每次调用时都必须使用完全相同的标记,所以标记要声明成static
。把该变量定义在static
作用域中,可以保证编译器在每次执行shareInstance
方法时都会复用这个变量,而不会创建新变量。此外,dispatch_once
更高效。
单例 dispatch_once
的浅析
once.h
#ifndef __DISPATCH_ONCE__
#define __DISPATCH_ONCE__
#ifndef __DISPATCH_INDIRECT__
#error "Please #include <dispatch/dispatch.h> instead of this file directly."
#include <dispatch/base.h> // for HeaderDoc
#endif
__BEGIN_DECLS
/*!
* @typedef dispatch_once_t
*
* @abstract
* A predicate for use with dispatch_once(). It must be initialized to zero.
* Note: static and global variables default to zero.
*/
typedef long dispatch_once_t;
/*!
* @function dispatch_once
*
* @abstract
* Execute a block once and only once.
*
* @param predicate
* A pointer to a dispatch_once_t that is used to test whether the block has
* completed or not.
*
* @param block
* The block to execute once.
*
* @discussion
* Always call dispatch_once() before using or testing any variables that are
* initialized by the block.
*/
#ifdef __BLOCKS__
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
void
dispatch_once(dispatch_once_t *predicate, dispatch_block_t block);
//注意这个内联函数
DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
void
_dispatch_once(dispatch_once_t *predicate, dispatch_block_t block)
{
if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) {
dispatch_once(predicate, block);
}
}
#undef dispatch_once
#define dispatch_once _dispatch_once
#endif
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
void
dispatch_once_f(dispatch_once_t *predicate, void *context,
dispatch_function_t function);
DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL1 DISPATCH_NONNULL3
DISPATCH_NOTHROW
void
_dispatch_once_f(dispatch_once_t *predicate, void *context,
dispatch_function_t function)
{
if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) {
dispatch_once_f(predicate, context, function);
}
}
#undef dispatch_once_f
#define dispatch_once_f _dispatch_once_f
__END_DECLS
#endif
once.c
#include "internal.h"
#undef dispatch_once
#undef dispatch_once_f
struct _dispatch_once_waiter_s
{
volatile struct _dispatch_once_waiter_s *volatile dow_next;
_dispatch_thread_semaphore_t dow_sema;
};
#define DISPATCH_ONCE_DONE ((struct _dispatch_once_waiter_s *)~0l)
#ifdef __BLOCKS__
// 1.应用程序调用的入口
void
dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
struct Block_basic *bb = (void *)block;
// 2. 内部逻辑
dispatch_once_f(val, block, (void *)bb->Block_invoke);
}
#endif
DISPATCH_NOINLINE
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
struct _dispatch_once_waiter_s * volatile *vval =
(struct _dispatch_once_waiter_s**)val;
// 3. 类似于简单的哨兵位
struct _dispatch_once_waiter_s dow = { NULL, 0 };
// 4. 在Dispatch_Once的block执行期进入的dispatch_once_t更改请求的链表
struct _dispatch_once_waiter_s *tail, *tmp;
// 5.局部变量,用于在遍历链表过程中获取每一个在链表上的更改请求的信号量
_dispatch_thread_semaphore_t sema;
// 6. Compare and Swap(用于首次更改请求)
if (dispatch_atomic_cmpxchg(vval, NULL, &dow))
{
dispatch_atomic_acquire_barrier();
// 7.调用dispatch_once的block
_dispatch_client_callout(ctxt, func);
// The next barrier must be long and strong.
//
// The scenario: SMP systems with weakly ordered memory models
// and aggressive out-of-order instruction execution.
//
// The problem:
//
// The dispatch_once*() wrapper macro causes the callee's
// instruction stream to look like this (pseudo-RISC):
//
// load r5, pred-addr
// cmpi r5, -1
// beq 1f
// call dispatch_once*()
// 1f:
// load r6, data-addr
//
// May be re-ordered like so:
//
// load r6, data-addr
// load r5, pred-addr
// cmpi r5, -1
// beq 1f
// call dispatch_once*()
// 1f:
//
// Normally, a barrier on the read side is used to workaround
// the weakly ordered memory model. But barriers are expensive
// and we only need to synchronize once! After func(ctxt)
// completes, the predicate will be marked as "done" and the
// branch predictor will correctly skip the call to
// dispatch_once*().
//
// A far faster alternative solution: Defeat the speculative
// read-ahead of peer CPUs.
//
// Modern architectures will throw away speculative results
// once a branch mis-prediction occurs. Therefore, if we can
// ensure that the predicate is not marked as being complete
// until long after the last store by func(ctxt), then we have
// defeated the read-ahead of peer CPUs.
//
// In other words, the last "store" by func(ctxt) must complete
// and then N cycles must elapse before ~0l is stored to *val.
// The value of N is whatever is sufficient to defeat the
// read-ahead mechanism of peer CPUs.
//
// On some CPUs, the most fully synchronizing instruction might
// need to be issued.
//在写入端,dispatch_once在执行了block之后,会调用dispatch_atomic_maximally_synchronizing_barrier()
//宏函数,在intel处理器上,这个函数编译出的是cpuid指令。
dispatch_atomic_maximally_synchronizing_barrier();
//dispatch_atomic_release_barrier(); // assumed contained in above
// 8. 更改请求成为DISPATCH_ONCE_DONE(原子性的操作)
tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE);
tail = &dow;
// 9. 发现还有更改请求,继续遍历
while (tail != tmp)
{
// 10. 如果这个时候tmp的next指针还没更新完毕,就等待一会,提示cpu减少额外处理,提升性能,节省电力。
while (!tmp->dow_next)
{
_dispatch_hardware_pause();
}
// 11. 取出当前的信号量,告诉等待者,这次更改请求完成了,轮到下一个了
sema = tmp->dow_sema;
tmp = (struct _dispatch_once_waiter_s*)tmp->dow_next;
_dispatch_thread_semaphore_signal(sema);
}
} else
{
// 12. 非首次请求,进入此逻辑块
dow.dow_sema = _dispatch_get_thread_semaphore();
// 13. 遍历每一个后续请求,如果状态已经是Done,直接进行下一个
// 同时该状态检测还用于避免在后续wait之前,信号量已经发出(signal)造成
// 的死锁
for (;;)
{
tmp = *vval;
if (tmp == DISPATCH_ONCE_DONE)
{
break;
}
dispatch_atomic_store_barrier();
// 14. 如果当前dispatch_once执行的block没有结束,那么就将这些
// 后续请求添加到链表当中
if (dispatch_atomic_cmpxchg(vval, tmp, &dow))
{
dow.dow_next = tmp;
_dispatch_thread_semaphore_wait(dow.dow_sema);
}
}
_dispatch_put_thread_semaphore(dow.dow_sema);
}
}
dispatch_once
不是只执行一次那么简单。内部还是很复杂的。onceToken在第一次执行block之前,它的值由NULL变为指向第一个调用者的指针(&dow)。dispatch_once
是可以接受多次请求的,内部会构造一个链表来维护之。如果在block完成之前,有其它的调用者进来,则会把这些调用者放到一个waiter
链表中(在else分支中的代码)。waiter
链表中的每个调用者会等待一个信号量(dow.dow_sema
)。在block执行完了后,除了将onceToken置为DISPATCH_ONCE_DONE
外,还会去遍历waiter
链中的所有waiter
,抛出相应的信号量,以告知waiter
们调用已经结束了。
dispatch_once
大致的过程
线程A执行block时,其它线程都需要等待。
线程A执行完block应该立即标记任务为完成状态,然后遍历信号量链来唤醒所有等待线程。
线程A遍历信号量链来signal时,任何其他新进入函数的线程都应该直接返回而无需等待。
线程A遍历信号量链来signal时,若有其它等待线程B仍在更新或试图更新信号量链表,应该保证线程B能正确完成其任务:a.直接返回 b.等待在信号量上并很快又被唤醒。
线程B构造信号量时,应该考虑线程A随时可能改变状态(等待、完成、遍历信号量链表)。
线程B构造信号量时,应该考虑到另一个线程C也可能正在更新或试图更新信号量链,应该保证B、C都能正常完成其任务:a.增加链节并等待在信号量上 b.发现线程A已经标记“完成”然后直接销毁信号量并退出函数。
要点
经常需要编写“只需要执行一次的线程安全代码”。通过GCD所提供的dispatch_once函数,很容易就能实现此功能。
标记应该声明在static或global作用域中,这样的话,在把只需执行一次的块传给dispatch_once函数时,传进去的标记也是相同的。