经过内存分配过程的准备阶段,我们分析到了Heap的AllocObjectWithAllocator()方法。
接下来我们将具体分析对象内存分配的过程。
ART对象分配过程解析——内存分配阶段
AllocObjectWithAllocator方法
首先我们来看Heap的AllocObjectWithAllocator()方法(位置:/art/runtime/gc/heap-inl.h):
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
ObjPtr<mirror::Class> klass,
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor) {
……
// 对大对象进行判断,因为大对象创建逻辑包含本方法,为了避免无限循环,这里需要做判断。
ObjPtr<mirror::Object> obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
pre_fence_visitor);
if (obj != nullptr) {
return obj.Ptr();
} else {
// There should be an OOM exception, since we are retrying, clear it.
self->ClearException();
}
// If the large object allocation failed, try to use the normal spaces (main space,
// non moving space). This can happen if there is significant virtual address space
// fragmentation.
}
// bytes allocated for the (individual) object.
size_t bytes_allocated; //对象需要分配的字节数
size_t usable_size;
size_t new_num_bytes_allocated = 0;
if (IsTLABAllocator(allocator)) { //TLAB中分配对象
byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment); //对象大小进行对齐处理
}
// If we have a thread local allocation we don't need to update bytes allocated.
if (IsTLABAllocator(allocator) && byte_count <= self->TlabSize()) {//TLAB内存分配
obj = self->AllocTlab(byte_count);
DCHECK(obj != nullptr) << "AllocTlab can't fail";
obj->SetClass(klass);
if (kUseBakerReadBarrier) {
obj->AssertReadBarrierState();
}
bytes_allocated = byte_count;
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else if (
!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
(obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
LIKELY(obj != nullptr)) {//尝试在RosAllocSpace上进行内存分配
DCHECK(!is_running_on_memory_tool_);
obj->SetClass(klass);
if (kUseBakerReadBarrier) {
obj->AssertReadBarrierState();
}
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else {
// bytes allocated that takes bulk thread-local buffer allocations into account.
size_t bytes_tl_bulk_allocated = 0;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
// AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
// or changes the allocator in a suspend point here, we need to retry the allocation.
obj = AllocateInternalWithGc(self,
allocator,
kInstrumented,
byte_count,
&bytes_allocated,
&usable_size,
&bytes_tl_bulk_allocated, &klass);//进行GC之后的内存分配
if (obj == nullptr) {
// The only way that we can get a null return if there is no pending exception is if the
// allocator or instrumentation changed.
if (!self->IsExceptionPending()) {//分配器类型发生变化之后,重新进行内存分配
// AllocObject will pick up the new allocator type, and instrumented as true is the safe
// default.
return AllocObject</*kInstrumented*/true>(self,
klass,
byte_count,
pre_fence_visitor);
}
return nullptr;
}
}
DCHECK_GT(bytes_allocated, 0u);
DCHECK_GT(usable_size, 0u);
obj->SetClass(klass);//设置新生成的对象类型
if (kUseBakerReadBarrier) {
obj->AssertReadBarrierState();
}
if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
// (Note this if statement will be constant folded away for the
// fast-path quick entry points.) Because SetClass() has no write
// barrier, if a non-moving space allocation, we need a write
// barrier as the class pointer may point to the bump pointer
// space (where the class pointer is an "old-to-young" reference,
// though rare) under the GSS collector with the remembered set
// enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
// cases because we don't directly allocate into the main alloc
// space (besides promotions) under the SS/GSS collector.
WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
}
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
new_num_bytes_allocated = num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated) +
bytes_tl_bulk_allocated;
if (bytes_tl_bulk_allocated > 0) {
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
TraceHeapSize(new_num_bytes_allocated + bytes_tl_bulk_allocated);
}
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
CHECK_LE(obj->SizeOf(), usable_size);
}
// TODO: Deprecate.
if (kInstrumented) {
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = self->GetStats();
++thread_stats->allocated_objects;
thread_stats->allocated_bytes += bytes_allocated;
RuntimeStats* global_stats = Runtime::Current()->GetStats();
++global_stats->allocated_objects;
global_stats->allocated_bytes += bytes_allocated;
}
} else {
DCHECK(!Runtime::Current()->HasStatsEnabled());
}
if (kInstrumented) {
if (IsAllocTrackingEnabled()) {
// allocation_records_ is not null since it never becomes null after allocation tracking is
// enabled.
DCHECK(allocation_records_ != nullptr);
allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
}
AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent();
if (l != nullptr) {
// Same as above. We assume that a listener that was once stored will never be deleted.
// Otherwise we'd have to perform this under a lock.
l->ObjectAllocated(self, &obj, bytes_allocated);
}
} else {
DCHECK(!IsAllocTrackingEnabled());
}
if (AllocatorHasAllocationStack(allocator)) {
PushOnAllocationStack(self, &obj);
}
if (kInstrumented) {
if (gc_stress_mode_) {
CheckGcStressMode(self, &obj);
}
} else {
DCHECK(!gc_stress_mode_);
}
// IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for
// the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
// optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
// the allocator_type should be constant propagated.
if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
self->VerifyStack();
return obj.Ptr();
}
主要参数解释:
- allocator表示分配器的类型,也就是描述要在哪个空间分配对象。AllocatorType是一个枚举类型,它的定义如下所示:
// Different types of allocators.
enum AllocatorType {
kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints.
kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints.
kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints.
kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints.
kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
kAllocatorTypeRegion,
kAllocatorTypeRegionTLAB,
};
- pre_fence_visitor是一个回调函数,用来在分配对象完成后在当前执行路径中执行初始化操作,例如分配完成一个数组对象,通过该回调函数立即设置数组的大小,这样就可以保证数组对象的完整性和一致性,避免多线程环境下通过加锁来完成相同的操作。
AllocObjectWithAllocator方法的主要工作:
- 判断是否是大对象。大对象在独立的堆上进行分配(Large Object Space)。如果是大对象,首先调用AllocLargeObject方法,该方法设置allocator参数为kAllocatorTypeLOS,然后再次调用到AllocObjectWithAllocator方法。
**大对象需要满足几个条件:**
1) 请求分配的内存大于等于large_object_threshold_(这个值等于3 * kPageSize,即3个页面的大小)。
2)被分配的对象是一个原子类型数组(即byte数组、int数组和boolean数组等)或者字符串。
3)kCheckLargeObject为ture。
- 如果分配器类型为kAllocatorTypeTLAB或kAllocatorTypeRegionTLAB,并且请求分配的对象大小小于等于线程的TLAB的剩余大小,就会在当前ART运行时线程的TLAB中分配对象(线程局部分配缓冲区中分配对象)。
这里会调用Thread对象的AllocTlab方法来进行内存分配。之后调用obj->SetClass(klass)来设置最终生成对象所属的类型。
如果allocator的值为kAllocatorTypeRosAlloc,则尝试在RosAllocSpace上进行内存分配。
否则,就会调用TryToAllocate方法进行内存分配。
如果4失败,就会调用AllocateInternalWithGC方法在GC后进行内存分配。
如果GC之后,还是分配失败,就代表本次对象的内存分配工作最终失败了。有个例外就是,如果分配过程中没有发生异常,并且内存分配器类型被改变了。这样,就会改变模板参kInstrumented为true,并调用AllocObject方法重新尝试进行对象内存分配。
经过上述过程,如果对象分配成功了,调用新对象的SetClass(klass)方法,设置对象所属的类型。
如果kUseRememberedSet变量为true,并且是在非移动空间进行分配的,这时需要设置写入屏障。
之后会进行一些有关工具化追踪、调试方面的设置操作。
最终返回新创建的对象。
接下来我们继续分析这个过程中的重要方法:TryToAllocate方法、AllocateInternalWithGC方法。
TryToAllocate方法
TryToAllocate方法(位置:/art/runtime/gc/heap-inl.h):
template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self,
AllocatorType allocator_type,
size_t alloc_size,
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
if (allocator_type != kAllocatorTypeTLAB &&
allocator_type != kAllocatorTypeRegionTLAB &&
allocator_type != kAllocatorTypeRosAlloc &&
UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
return nullptr;
}
mirror::Object* ret;
switch (allocator_type) {
case kAllocatorTypeBumpPointer: {
DCHECK(bump_pointer_space_ != nullptr);
alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
if (LIKELY(ret != nullptr)) {
*bytes_allocated = alloc_size;
*usable_size = alloc_size;
*bytes_tl_bulk_allocated = alloc_size;
}
break;
}
case kAllocatorTypeRosAlloc: {
if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
// If running on valgrind or asan, we should be using the instrumented path.
size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
max_bytes_tl_bulk_allocated,
kGrow))) {
return nullptr;
}
ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
} else {
DCHECK(!is_running_on_memory_tool_);
size_t max_bytes_tl_bulk_allocated =
rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
max_bytes_tl_bulk_allocated,
kGrow))) {
return nullptr;
}
if (!kInstrumented) {
DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
}
ret = rosalloc_space_->AllocNonvirtual(self,
alloc_size,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
}
break;
}
case kAllocatorTypeDlMalloc: {
if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
// If running on valgrind, we should be using the instrumented path.
ret = dlmalloc_space_->Alloc(self,
alloc_size,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
} else {
DCHECK(!is_running_on_memory_tool_);
ret = dlmalloc_space_->AllocNonvirtual(self,
alloc_size,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
}
break;
}
case kAllocatorTypeNonMoving: {
ret = non_moving_space_->Alloc(self,
alloc_size,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
break;
}
case kAllocatorTypeLOS: {
ret = large_object_space_->Alloc(self,
alloc_size,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
// Note that the bump pointer spaces aren't necessarily next to
// the other continuous spaces like the non-moving alloc space or
// the zygote space.
DCHECK(ret == nullptr || large_object_space_->Contains(ret));
break;
}
case kAllocatorTypeRegion: {
DCHECK(region_space_ != nullptr);
alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
ret = region_space_->AllocNonvirtual<false>(alloc_size,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
break;
}
case kAllocatorTypeTLAB:
FALLTHROUGH_INTENDED;
case kAllocatorTypeRegionTLAB: {
DCHECK_ALIGNED(alloc_size, kObjectAlignment);
static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
"mismatched alignments");
static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
"mismatched alignments");
if (UNLIKELY(self->TlabSize() < alloc_size)) {
// kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
// that is why the allocator is not passed down.
return AllocWithNewTLAB(self,
alloc_size,
kGrow,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
}
// The allocation can't fail.
ret = self->AllocTlab(alloc_size);
DCHECK(ret != nullptr);
*bytes_allocated = alloc_size;
*bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
*usable_size = alloc_size;
break;
}
default: {
LOG(FATAL) << "Invalid allocator type";
ret = nullptr;
}
}
return ret;
}
首先,如果不是指定在当前ART运行时线程的TLAB中分配,并且不是kAllocatorTypeRosAlloc类型,并且指定分配的对象大小超出了当前堆的限制,那么就会分配失败,返回一个nullptr指针。
-
接下来跟进分配器类型,分别进行处理:
kAllocatorTypeBumpPointer类型,会在Bump Pointer Space中分配对象,调用Heap类的成员变量bump_pointer_space_指向的一个BumpPointerSpace对象的成员函数AllocNonvirtual分配指定大小的内存。
kAllocatorTypeRosAlloc类型,会在Ros Alloc Space中分配对象。这里会根据kInstrumented的值和is_running_on_memory_tool_参数来进行判断,分别会调用Heap类的成员变量rosalloc_space_指向的RosAllocSpace对象的成员函数Alloc者AllocNonvirtual分配指定大小的内存。
kAllocatorTypeDlMalloc类型,会在DlMalloc Space中分配对象,调用Heap类的成员变量dlmalloc_space_指向的一个DlMallocSpace对象的成员函数Alloc或AllocNonvirtual分配指定大小的内存(判断条件同kAllocatorTypeRosAlloc类型)。
kAllocatorTypeNonMoving类型,会在Non Moving Space中分配对象,调用Heap类的成员变量non_moving_space_指向的一个RosAllocSpace对象或者DlMallocSpace对象的成员函数Alloc分配指定大小的内存。
kAllocatorTypeLOS类型,会在Large Object Space中分配对象,调用Heap类的成员变量large_object_space_指向的一个LargeObjectSpace对象的成员函数Alloc分配指定大小的内存。
kAllocatorTypeRegion类型,会在Region Space中分配对象,调用Heap类的成员变量region_space_指向的一个RegionSpace对象的成员函数AllocNonvirtual来分配指定大小的内存。
kAllocatorTypeTLAB或kAllocatorTypeRegionTLAB类型,在当前ART运行时线程的TLAB中分配对象。首先会判断当前TLAB剩余大小是否小于将要分配的大小,如果小于,就会调用Thread对象的AllocWithNewTLAB成员函数重新请求一块内存,然后进行对象分配。如果TLAB剩余大小足够大,就会直接调用当前Thread对象的成员函数AllocTlab进行内存分配。
AllocateInternalWithGc方法
AllocateInternalWithGc方法(位置:/art/runtime/gc/heap.cc)
mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
AllocatorType allocator,
bool instrumented,
size_t alloc_size,
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
ObjPtr<mirror::Class>* klass) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
DCHECK(klass != nullptr);
StackHandleScope<1> hs(self);
HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self); //当前是否正进行GC,如果是,则等待GC结束
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
if ((was_default_allocator && allocator != GetCurrentAllocator()) //如果分配器类型发生改变,则分配失败
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
if (last_gc != collector::kGcTypeNone) { //GC成功,则直接尝试分配内存
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
}
collector::GcType tried_type = next_gc_type_; //即将进行的GC类型
const bool gc_ran =
CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; //进行GC回收,不回收弱引用、软引用
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
if (gc_ran) {
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);//再次调用TryToAllocate成员方法尝试进行内存分配。
if (ptr != nullptr) {
return ptr;
}
}
// Loop through our different Gc types and try to Gc until we get enough free memory.
//根据GC类型由弱到强,进行多次内存分配,直至获得足够的内存进行内存分配。
for (collector::GcType gc_type : gc_plan_) {
if (gc_type == tried_type) {
continue;
}
// Attempt to run the collector, if we succeed, re-try the allocation.
const bool plan_gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
if (plan_gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
}
}
// Allocations have failed after GCs; this is an exceptional state.
// Try harder, growing the heap if necessary.
mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
// Most allocations should have succeeded by now, so the heap is really full, really fragmented,
// or the requested size is really big. Do another GC, collecting SoftReferences this time. The
// VM spec requires that all SoftReferences have been collected and cleared before throwing
// OOME.
VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
<< " allocation";
// TODO: Run finalization, but this may cause more allocations to occur.
// We don't need a WaitForGcToComplete here either.
DCHECK(!gc_plan_.empty());
CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
if (ptr == nullptr) {
const uint64_t current_time = NanoTime();
switch (allocator) {
case kAllocatorTypeRosAlloc:
// Fall-through.
case kAllocatorTypeDlMalloc: {
if (use_homogeneous_space_compaction_for_oom_ &&
current_time - last_time_homogeneous_space_compaction_by_oom_ >
min_interval_homogeneous_space_compaction_by_oom_) {
last_time_homogeneous_space_compaction_by_oom_ = current_time;
HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
// Thread suspension could have occurred.
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
switch (result) {
case HomogeneousSpaceCompactResult::kSuccess:
// If the allocation succeeded, we delayed an oom.
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
count_delayed_oom_++;
}
break;
case HomogeneousSpaceCompactResult::kErrorReject:
// Reject due to disabled moving GC.
break;
case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
// Throw OOM by default.
break;
default: {
UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
<< static_cast<size_t>(result);
UNREACHABLE();
}
}
// Always print that we ran homogeneous space compation since this can cause jank.
VLOG(heap) << "Ran heap homogeneous space compaction, "
<< " requested defragmentation "
<< count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
<< " performed defragmentation "
<< count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
<< " ignored homogeneous space compaction "
<< count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
<< " delayed count = "
<< count_delayed_oom_.LoadSequentiallyConsistent();
}
break;
}
case kAllocatorTypeNonMoving: {
if (kUseReadBarrier) {
// DisableMovingGc() isn't compatible with CC.
break;
}
// Try to transition the heap if the allocation failure was due to the space being full.
if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
// If we aren't out of memory then the OOM was probably from the non moving space being
// full. Attempt to disable compaction and turn the main space into a non moving space.
DisableMovingGc();
// Thread suspension could have occurred.
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
// If we are still a moving GC then something must have caused the transition to fail.
if (IsMovingGc(collector_type_)) {
MutexLock mu(self, *gc_complete_lock_);
// If we couldn't disable moving GC, just throw OOME and return null.
LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
<< disable_moving_gc_count_;
} else {
LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
}
}
break;
}
default: {
// Do nothing for others allocators.
}
}
}
// If the allocation hasn't succeeded by this point, throw an OOM error.
if (ptr == nullptr) {
ThrowOutOfMemoryError(self, alloc_size, allocator);
}
return ptr;
}
首先判断当前的GC状态,如果正在进行GC,则等待直至GC结束。
判断当前内存分配器类型是否发生了变化,如果发生了变化,则分配失败。
如果last_gc != collector::kGcTypeNone,表明刚刚进行了GC操作,这时可以直接调用TryToAllocate成员方法尝试进行内存分配。
调用CollectGarbageInternal进行垃圾回收,不回收弱引用、软引用。
GC成功,再次调用TryToAllocate成员方法尝试进行内存分配。
根据GC类型由弱到强,进行多次内存分配,直至获得足够的内存进行内存分配。这个过程可能会多次调用TryToAllocate成员方法尝试进行内存分配。
注意:以上过程的内存分配,堆大小不会增大。
直接增大堆的大小进行内存分配。具体方法是,调用TryToAllocate成员方法,传递的模板参数kGrow为true。
如果还没有分配成功,会再一次进行GC,这次将会回收软引用。
直接增大堆的大小进行内存分配。具体方法是,调用TryToAllocate成员方法,传递的模板参数kGrow为true。
-
如果失败了,会跟进内存分配器的类型分别进行处理。
如果是kAllocatorTypeRosAlloc、kAllocatorTypeDlMalloc类型,会判断是否支持同构空间压缩,并且距离上一次同构空间压缩的时间大于允许的最小时间间隔,则会调用PerformHomogeneousSpaceCompact方法进行同构空间压缩。如果压缩成功,则调用TryToAllocate最后一次尝试进行内存分配。
如果是kAllocatorTypeNonMoving类型,首先设置最大堆空间,如果成功,接着尝试禁用移动空间的GC,并将主空间转换为非移动空间。成功后再次调用TryToAllocate最后一次尝试进行内存分配。
如果上述步骤都失败了,最后会发送OOM的Error。
小结
对象的内存分配过程
AllocObjectWithAllocator方法进行对象内存的分配工作。
首先进行大对象的判断,调用AllocLargeObject方法进行相关内存分配。
如果满足TLAB分配条件,则在当前ART运行时线程的TLAB中分配对象。
如果allocator的值为kAllocatorTypeRosAlloc,则尝试在RosAllocSpace上进行内存分配。否则,就会调用TryToAllocate方法进行内存分配。
调用AllocateInternalWithGC方法在GC后进行内存分配。
如果GC之后,还是分配失败,就代表本次对象的内存分配工作最终失败了。有个例外就是,如果分配过程中没有发生异常,并且内存分配器类型被改变了。这样,就会改变模板参kInstrumented为true,并调用AllocObject方法重新尝试进行对象内存分配。
对象分配成功后,调用新对象的SetClass(klass)方法,设置对象所属的类型。
之后会进行一些有关工具化追踪、调试方面的设置操作。
最终返回新创建的对象。
尝试GC后的内存分配过程
首先判断当前的GC状态,如果正在进行GC,则等待直至GC结束。
如果last_gc != collector::kGcTypeNone,表明刚刚进行了GC操作,这时可以直接调用TryToAllocate成员方法尝试进行内存分配。
调用CollectGarbageInternal进行垃圾回收,不回收弱引用、软引用。GC成功,再次调用TryToAllocate成员方法尝试进行内存分配。
根据GC类型由弱到强,进行多次内存分配,直至获得足够的内存进行内存分配。这个过程可能会多次调用TryToAllocate成员方法尝试进行内存分配。
直接增大堆的大小进行内存分配。
如果还没有分配成功,会再一次进行GC,这次将会回收软引用。
直接增大堆的大小进行内存分配。
如果失败了,会跟进内存分配器的类型分别进行处理。例如,进行同构空间压缩或者切换内存分配器类型。再次调用TryToAllocate最后一次尝试进行内存分配。
如果上述步骤都失败了,最后会发送OOM的Error。