openjdk(HOTSPOT)垃圾回收源码解读
生活随笔
收集整理的這篇文章主要介紹了
openjdk(HOTSPOT)垃圾回收源码解读
小編覺得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
代碼位置:
gc_interface 接口和gc_implementation
這是jdk1.7 幾種垃圾回收器
先看公共接口類中的 代碼:collectedHeap.cpp
/** Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.** This code is free software; you can redistribute it and/or modify it* under the terms of the GNU General Public License version 2 only, as* published by the Free Software Foundation.** This code is distributed in the hope that it will be useful, but WITHOUT* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License* version 2 for more details (a copy is included in the LICENSE file that* accompanied this code).** You should have received a copy of the GNU General Public License version* 2 along with this work; if not, write to the Free Software Foundation,* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.** Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA* or visit www.oracle.com if you need additional information or have any* questions.**/#include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/init.hpp" #include "services/heapDumper.hpp" #ifdef TARGET_OS_FAMILY_linux # include "thread_linux.inline.hpp" #endif #ifdef TARGET_OS_FAMILY_solaris # include "thread_solaris.inline.hpp" #endif #ifdef TARGET_OS_FAMILY_windows # include "thread_windows.inline.hpp" #endif#ifdef ASSERT int CollectedHeap::_fire_out_of_memory_count = 0; #endifsize_t CollectedHeap::_filler_array_max_size = 0;// Memory state functions.CollectedHeap::CollectedHeap() : _n_par_threads(0){const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));const size_t elements_per_word = HeapWordSize / sizeof(jint);_filler_array_max_size = align_object_size(filler_array_hdr_size() +max_len * elements_per_word);_barrier_set = NULL;_is_gc_active = false;_total_collections = _total_full_collections = 0;_gc_cause = _gc_lastcause = GCCause::_no_gc;NOT_PRODUCT(_promotion_failure_alot_count = 0;)NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)if (UsePerfData) {EXCEPTION_MARK;// create the gc cause jvmstat counters_perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",80, GCCause::to_string(_gc_cause), CHECK);_perf_gc_lastcause =PerfDataManager::create_string_variable(SUN_GC, "lastCause",80, GCCause::to_string(_gc_lastcause), CHECK);}_defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. }void CollectedHeap::pre_initialize() {// Used for ReduceInitialCardMarks (when COMPILER2 is used);// otherwise remains unused. #ifdef COMPILER2_defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()&& (DeferInitialCardMark || card_mark_must_follow_store()); #elseassert(_defer_initial_card_mark == false, "Who would set it?"); #endif }#ifndef PRODUCT void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {if (CheckMemoryInitialization && ZapUnusedHeapArea) {for (size_t slot = 0; slot < size; slot += 1) {assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),"Found badHeapWordValue in post-allocation check");}} }void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {if (CheckMemoryInitialization && ZapUnusedHeapArea) {for (size_t slot = 0; slot < size; slot += 1) {assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),"Found non badHeapWordValue in pre-allocation check");}} } #endif // PRODUCT#ifdef ASSERT void CollectedHeap::check_for_valid_allocation_state() {Thread *thread = Thread::current();// How to choose between a pending exception and a potential// OutOfMemoryError? Don't allow pending exceptions.// This is a VM policy failure, so how do we exhaustively test it?assert(!thread->has_pending_exception(),"shouldn't be allocating with pending exception");if (StrictSafepointChecks) {assert(thread->allow_allocation(),"Allocation done by thread for which allocation is blocked ""by No_Allocation_Verifier!");// Allocation of an oop can always invoke a safepoint,// hence, the true argumentthread->check_for_valid_safepoint_state(true);} } #endifHeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {// Retain tlab and allocate object in shared space if// the amount free in the tlab is too large to discard.if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {thread->tlab().record_slow_allocation(size);return NULL;}// Discard tlab and allocate a new one.// To minimize fragmentation, the last TLAB may be smaller than the rest.size_t new_tlab_size = thread->tlab().compute_size(size);thread->tlab().clear_before_allocation();if (new_tlab_size == 0) {return NULL;}// Allocate a new TLAB...HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);if (obj == NULL) {return NULL;}if (ZeroTLAB) {// ..and clear it.Copy::zero_to_words(obj, new_tlab_size);} else {// ...and clear just the allocated object.Copy::zero_to_words(obj, size);}thread->tlab().fill(obj, obj + size, new_tlab_size);return obj; }void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {MemRegion deferred = thread->deferred_card_mark();if (!deferred.is_empty()) {assert(_defer_initial_card_mark, "Otherwise should be empty");{// Verify that the storage points to a parsable object in heapDEBUG_ONLY(oop old_obj = oop(deferred.start());)assert(is_in(old_obj), "Not in allocated heap");assert(!can_elide_initializing_store_barrier(old_obj),"Else should have been filtered in new_store_pre_barrier()");assert(!is_in_permanent(old_obj), "Sanity: not expected");assert(old_obj->is_oop(true), "Not an oop");assert(old_obj->is_parsable(), "Will not be concurrently parsable");assert(deferred.word_size() == (size_t)(old_obj->size()),"Mismatch: multiple objects?");}BarrierSet* bs = barrier_set();assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");bs->write_region(deferred);// "Clear" the deferred_card_mark fieldthread->set_deferred_card_mark(MemRegion());}assert(thread->deferred_card_mark().is_empty(), "invariant"); }// Helper for ReduceInitialCardMarks. For performance, // compiled code may elide card-marks for initializing stores // to a newly allocated object along the fast-path. We // compensate for such elided card-marks as follows: // (a) Generational, non-concurrent collectors, such as // GenCollectedHeap(ParNew,DefNew,Tenured) and // ParallelScavengeHeap(ParallelGC, ParallelOldGC) // need the card-mark if and only if the region is // in the old gen, and do not care if the card-mark // succeeds or precedes the initializing stores themselves, // so long as the card-mark is completed before the next // scavenge. For all these cases, we can do a card mark // at the point at which we do a slow path allocation // in the old gen, i.e. in this call. // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires // in addition that the card-mark for an old gen allocated // object strictly follow any associated initializing stores. // In these cases, the memRegion remembered below is // used to card-mark the entire region either just before the next // slow-path allocation by this thread or just before the next scavenge or // CMS-associated safepoint, whichever of these events happens first. // (The implicit assumption is that the object has been fully // initialized by this point, a fact that we assert when doing the // card-mark.) // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a // G1 concurrent marking is in progress an SATB (pre-write-)barrier is // is used to remember the pre-value of any store. Initializing // stores will not need this barrier, so we need not worry about // compensating for the missing pre-barrier here. Turning now // to the post-barrier, we note that G1 needs a RS update barrier // which simply enqueues a (sequence of) dirty cards which may // optionally be refined by the concurrent update threads. Note // that this barrier need only be applied to a non-young write, // but, like in CMS, because of the presence of concurrent refinement // (much like CMS' precleaning), must strictly follow the oop-store. // Thus, using the same protocol for maintaining the intended // invariants turns out, serendepitously, to be the same for both // G1 and CMS. // // For any future collector, this code should be reexamined with // that specific collector in mind, and the documentation above suitably // extended and updated. oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {// If a previous card-mark was deferred, flush it now.flush_deferred_store_barrier(thread);if (can_elide_initializing_store_barrier(new_obj)) {// The deferred_card_mark region should be empty// following the flush above.assert(thread->deferred_card_mark().is_empty(), "Error");} else {MemRegion mr((HeapWord*)new_obj, new_obj->size());assert(!mr.is_empty(), "Error");if (_defer_initial_card_mark) {// Defer the card markthread->set_deferred_card_mark(mr);} else {// Do the card markBarrierSet* bs = barrier_set();assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");bs->write_region(mr);}}return new_obj; }size_t CollectedHeap::filler_array_hdr_size() {return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long }size_t CollectedHeap::filler_array_min_size() {return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment }size_t CollectedHeap::filler_array_max_size() {return _filler_array_max_size; }#ifdef ASSERT void CollectedHeap::fill_args_check(HeapWord* start, size_t words) {assert(words >= min_fill_size(), "too small to fill");assert(words % MinObjAlignment == 0, "unaligned size");assert(Universe::heap()->is_in_reserved(start), "not in heap");assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); }void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) {if (ZapFillerObjects && zap) {Copy::fill_to_words(start + filler_array_hdr_size(),words - filler_array_hdr_size(), 0XDEAFBABE);} } #endif // ASSERTvoid CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) {assert(words >= filler_array_min_size(), "too small for an array");assert(words <= filler_array_max_size(), "too big for a single object");const size_t payload_size = words - filler_array_hdr_size();const size_t len = payload_size * HeapWordSize / sizeof(jint);// Set the length first for concurrent GC.((arrayOop)start)->set_length((int)len);post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);DEBUG_ONLY(zap_filler_array(start, words, zap);) }void CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) {assert(words <= filler_array_max_size(), "too big for a single object");if (words >= filler_array_min_size()) {fill_with_array(start, words, zap);} else if (words > 0) {assert(words == min_fill_size(), "unaligned size");post_allocation_setup_common(SystemDictionary::Object_klass(), start,words);} }void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) {DEBUG_ONLY(fill_args_check(start, words);)HandleMark hm; // Free handles before leaving.fill_with_object_impl(start, words, zap); }void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) {DEBUG_ONLY(fill_args_check(start, words);)HandleMark hm; // Free handles before leaving.#ifdef _LP64// A single array can fill ~8G, so multiple objects are needed only in 64-bit.// First fill with arrays, ensuring that any remaining space is big enough to// fill. The remainder is filled with a single object.const size_t min = min_fill_size();const size_t max = filler_array_max_size();while (words > max) {const size_t cur = words - max >= min ? max : max - min;fill_with_array(start, cur, zap);start += cur;words -= cur;} #endiffill_with_object_impl(start, words, zap); }HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {guarantee(false, "thread-local allocation buffers not supported");return NULL; }void CollectedHeap::ensure_parsability(bool retire_tlabs) {// The second disjunct in the assertion below makes a concession// for the start-up verification done while the VM is being// created. Callers be careful that you know that mutators// aren't going to interfere -- for instance, this is permissible// if we are still single-threaded and have either not yet// started allocating (nothing much to verify) or we have// started allocating but are now a full-fledged JavaThread// (and have thus made our TLAB's) available for filling.assert(SafepointSynchronize::is_at_safepoint() ||!is_init_completed(),"Should only be called at a safepoint or at start-up"" otherwise concurrent mutator activity may make heap "" unparsable again");const bool use_tlab = UseTLAB;const bool deferred = _defer_initial_card_mark;// The main thread starts allocating via a TLAB even before it// has added itself to the threads list at vm boot-up.assert(!use_tlab || Threads::first() != NULL,"Attempt to fill tlabs before main thread has been added"" to threads list is doomed to failure!");for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {if (use_tlab) thread->tlab().make_parsable(retire_tlabs); #ifdef COMPILER2// The deferred store barriers must all have been flushed to the// card-table (or other remembered set structure) before GC starts// processing the card-table (or other remembered set).if (deferred) flush_deferred_store_barrier(thread); #elseassert(!deferred, "Should be false");assert(thread->deferred_card_mark().is_empty(), "Should be empty"); #endif} }void CollectedHeap::accumulate_statistics_all_tlabs() {if (UseTLAB) {assert(SafepointSynchronize::is_at_safepoint() ||!is_init_completed(),"should only accumulate statistics on tlabs at safepoint");ThreadLocalAllocBuffer::accumulate_statistics_before_gc();} }void CollectedHeap::resize_all_tlabs() {if (UseTLAB) {assert(SafepointSynchronize::is_at_safepoint() ||!is_init_completed(),"should only resize tlabs at safepoint");ThreadLocalAllocBuffer::resize_all_tlabs();} }void CollectedHeap::pre_full_gc_dump() {if (HeapDumpBeforeFullGC) {TraceTime tt("Heap Dump: ", PrintGCDetails, false, gclog_or_tty);// We are doing a "major" collection and a heap dump before// major collection has been requested.HeapDumper::dump_heap();}if (PrintClassHistogramBeforeFullGC) {TraceTime tt("Class Histogram: ", PrintGCDetails, true, gclog_or_tty);VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);inspector.doit();} }void CollectedHeap::post_full_gc_dump() {if (HeapDumpAfterFullGC) {TraceTime tt("Heap Dump", PrintGCDetails, false, gclog_or_tty);HeapDumper::dump_heap();}if (PrintClassHistogramAfterFullGC) {TraceTime tt("Class Histogram", PrintGCDetails, true, gclog_or_tty);VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);inspector.doit();} }gcCause.cpp
Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.** This code is free software; you can redistribute it and/or modify it* under the terms of the GNU General Public License version 2 only, as* published by the Free Software Foundation.** This code is distributed in the hope that it will be useful, but WITHOUT* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License* version 2 for more details (a copy is included in the LICENSE file that* accompanied this code).** You should have received a copy of the GNU General Public License version* 2 along with this work; if not, write to the Free Software Foundation,* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.** Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA* or visit www.oracle.com if you need additional information or have any* questions.**/#include "precompiled.hpp" #include "gc_interface/gcCause.hpp"const char* GCCause::to_string(GCCause::Cause cause) {switch (cause) {case _java_lang_system_gc:return "System.gc()";case _full_gc_alot:return "FullGCAlot";case _scavenge_alot:return "ScavengeAlot";case _allocation_profiler:return "Allocation Profiler";case _jvmti_force_gc:return "JvmtiEnv ForceGarbageCollection";case _no_gc:return "No GC";case _allocation_failure:return "Allocation Failure";case _gc_locker:return "GCLocker Initiated GC";case _heap_inspection:return "Heap Inspection Initiated GC";case _heap_dump:return "Heap Dump Initiated GC";case _tenured_generation_full:return "Tenured Generation Full";case _permanent_generation_full:return "Permanent Generation Full";case _cms_generation_full:return "CMS Generation Full";case _cms_initial_mark:return "CMS Initial Mark";case _cms_final_remark:return "CMS Final Remark";case _old_generation_expanded_on_last_scavenge:return "Old Generation Expanded On Last Scavenge";case _old_generation_too_full_to_scavenge:return "Old Generation Too Full To Scavenge";case _g1_inc_collection_pause:return "G1 Evacuation Pause";case _last_ditch_collection:return "Last ditch collection";case _last_gc_cause:return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";default:return "unknown GCCause";}ShouldNotReachHere(); }parNew 垃圾回收器:
asParNewGeneration.cpp 代碼:
/** Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.** This code is free software; you can redistribute it and/or modify it* under the terms of the GNU General Public License version 2 only, as* published by the Free Software Foundation.** This code is distributed in the hope that it will be useful, but WITHOUT* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License* version 2 for more details (a copy is included in the LICENSE file that* accompanied this code).** You should have received a copy of the GNU General Public License version* 2 along with this work; if not, write to the Free Software Foundation,* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.** Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA* or visit www.oracle.com if you need additional information or have any* questions.**/#include "precompiled.hpp" #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" #include "gc_implementation/parNew/asParNewGeneration.hpp" #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/referencePolicy.hpp" #include "oops/markOop.inline.hpp" #include "oops/oop.pcgc.inline.hpp"ASParNewGeneration::ASParNewGeneration(ReservedSpace rs,size_t initial_byte_size,size_t min_byte_size,int level) :ParNewGeneration(rs, initial_byte_size, level),_min_gen_size(min_byte_size) {}const char* ASParNewGeneration::name() const {return "adaptive size par new generation"; }void ASParNewGeneration::adjust_desired_tenuring_threshold() {assert(UseAdaptiveSizePolicy,"Should only be used with UseAdaptiveSizePolicy"); }void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) {// Resize the generation if needed. If the generation resize// reports false, do not attempt to resize the spaces.if (resize_generation(eden_size, survivor_size)) {// Then we lay out the spaces inside the generationresize_spaces(eden_size, survivor_size);space_invariants();if (PrintAdaptiveSizePolicy && Verbose) {gclog_or_tty->print_cr("Young generation size: ""desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT" used: " SIZE_FORMAT " capacity: " SIZE_FORMAT" gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,eden_size, survivor_size, used(), capacity(),max_gen_size(), min_gen_size());}} }size_t ASParNewGeneration::available_to_min_gen() {assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");return virtual_space()->committed_size() - min_gen_size(); }// This method assumes that from-space has live data and that // any shrinkage of the young gen is limited by location of // from-space. size_t ASParNewGeneration::available_to_live() const { #undef SHRINKS_AT_END_OF_EDEN #ifdef SHRINKS_AT_END_OF_EDENsize_t delta_in_survivor = 0;ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();const size_t space_alignment = heap->intra_heap_alignment();const size_t gen_alignment = heap->object_heap_alignment();MutableSpace* space_shrinking = NULL;if (from_space()->end() > to_space()->end()) {space_shrinking = from_space();} else {space_shrinking = to_space();}// Include any space that is committed but not included in// the survivor spaces.assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),"Survivor space beyond high end");size_t unused_committed = pointer_delta(virtual_space()->high(),space_shrinking->end(), sizeof(char));if (space_shrinking->is_empty()) {// Don't let the space shrink to 0assert(space_shrinking->capacity_in_bytes() >= space_alignment,"Space is too small");delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;} else {delta_in_survivor = pointer_delta(space_shrinking->end(),space_shrinking->top(),sizeof(char));}size_t delta_in_bytes = unused_committed + delta_in_survivor;delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);return delta_in_bytes; #else// The only space available for shrinking is in to-space if it// is above from-space.if (to()->bottom() > from()->bottom()) {const size_t alignment = os::vm_page_size();if (to()->capacity() < alignment) {return 0;} else {return to()->capacity() - alignment;}} else {return 0;} #endif }// Return the number of bytes available for resizing down the young // generation. This is the minimum of // input "bytes" // bytes to the minimum young gen size // bytes to the size currently being used + some small extra size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {// Allow shrinkage into the current eden but keep eden large enough// to maintain the minimum young gen sizebytes = MIN3(bytes, available_to_min_gen(), available_to_live());return align_size_down(bytes, os::vm_page_size()); }// Note that the the alignment used is the OS page size as // opposed to an alignment associated with the virtual space // (as is done in the ASPSYoungGen/ASPSOldGen) bool ASParNewGeneration::resize_generation(size_t eden_size,size_t survivor_size) {const size_t alignment = os::vm_page_size();size_t orig_size = virtual_space()->committed_size();bool size_changed = false;// There used to be this guarantee there.// guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments");// Code below forces this requirement. In addition the desired eden// size and disired survivor sizes are desired goals and may// exceed the total generation size.assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),"just checking");// Adjust new generation sizeconst size_t eden_plus_survivors =align_size_up(eden_size + 2 * survivor_size, alignment);size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()),min_gen_size());assert(desired_size <= max_gen_size(), "just checking");if (desired_size > orig_size) {// Grow the generationsize_t change = desired_size - orig_size;assert(change % alignment == 0, "just checking");if (expand(change)) {return false; // Error if we fail to resize!}size_changed = true;} else if (desired_size < orig_size) {size_t desired_change = orig_size - desired_size;assert(desired_change % alignment == 0, "just checking");desired_change = limit_gen_shrink(desired_change);if (desired_change > 0) {virtual_space()->shrink_by(desired_change);reset_survivors_after_shrink();size_changed = true;}} else {if (Verbose && PrintGC) {if (orig_size == max_gen_size()) {gclog_or_tty->print_cr("ASParNew generation size at maximum: "SIZE_FORMAT "K", orig_size/K);} else if (orig_size == min_gen_size()) {gclog_or_tty->print_cr("ASParNew generation size at minium: "SIZE_FORMAT "K", orig_size/K);}}}if (size_changed) {MemRegion cmr((HeapWord*)virtual_space()->low(),(HeapWord*)virtual_space()->high());GenCollectedHeap::heap()->barrier_set()->resize_covered_region(cmr);if (Verbose && PrintGC) {size_t current_size = virtual_space()->committed_size();gclog_or_tty->print_cr("ASParNew generation size changed: "SIZE_FORMAT "K->" SIZE_FORMAT "K",orig_size/K, current_size/K);}}guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||virtual_space()->committed_size() == max_gen_size(), "Sanity");return true; }void ASParNewGeneration::reset_survivors_after_shrink() {GenCollectedHeap* gch = GenCollectedHeap::heap();HeapWord* new_end = (HeapWord*)virtual_space()->high();if (from()->end() > to()->end()) {assert(new_end >= from()->end(), "Shrinking past from-space");} else {assert(new_end >= to()->bottom(), "Shrink was too large");// Was there a shrink of the survivor space?if (new_end < to()->end()) {MemRegion mr(to()->bottom(), new_end);to()->initialize(mr,SpaceDecorator::DontClear,SpaceDecorator::DontMangle);}} } void ASParNewGeneration::resize_spaces(size_t requested_eden_size,size_t requested_survivor_size) {assert(UseAdaptiveSizePolicy, "sanity check");assert(requested_eden_size > 0 && requested_survivor_size > 0,"just checking");CollectedHeap* heap = Universe::heap();assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");// We require eden and to space to be emptyif ((!eden()->is_empty()) || (!to()->is_empty())) {return;}size_t cur_eden_size = eden()->capacity();if (PrintAdaptiveSizePolicy && Verbose) {gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "SIZE_FORMAT", requested_survivor_size: " SIZE_FORMAT ")",requested_eden_size, requested_survivor_size);gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "SIZE_FORMAT,eden()->bottom(),eden()->end(),pointer_delta(eden()->end(),eden()->bottom(),sizeof(char)));gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "SIZE_FORMAT,from()->bottom(),from()->end(),pointer_delta(from()->end(),from()->bottom(),sizeof(char)));gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "SIZE_FORMAT,to()->bottom(),to()->end(),pointer_delta( to()->end(),to()->bottom(),sizeof(char)));}// There's nothing to do if the new sizes are the same as the currentif (requested_survivor_size == to()->capacity() &&requested_survivor_size == from()->capacity() &&requested_eden_size == eden()->capacity()) {if (PrintAdaptiveSizePolicy && Verbose) {gclog_or_tty->print_cr(" capacities are the right sizes, returning");}return;}char* eden_start = (char*)eden()->bottom();char* eden_end = (char*)eden()->end();char* from_start = (char*)from()->bottom();char* from_end = (char*)from()->end();char* to_start = (char*)to()->bottom();char* to_end = (char*)to()->end();const size_t alignment = os::vm_page_size();const bool maintain_minimum =(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();// Check whether from space is below to spaceif (from_start < to_start) {// Eden, from, toif (PrintAdaptiveSizePolicy && Verbose) {gclog_or_tty->print_cr(" Eden, from, to:");}// Set eden// "requested_eden_size" is a goal for the size of eden// and may not be attainable. "eden_size" below is// calculated based on the location of from-space and// the goal for the size of eden. from-space is// fixed in place because it contains live data.// The calculation is done this way to avoid 32bit// overflow (i.e., eden_start + requested_eden_size// may too large for representation in 32bits).size_t eden_size;if (maintain_minimum) {// Only make eden larger than the requested size if// the minimum size of the generation has to be maintained.// This could be done in general but policy at a higher// level is determining a requested size for eden and that// should be honored unless there is a fundamental reason.eden_size = pointer_delta(from_start,eden_start,sizeof(char));} else {eden_size = MIN2(requested_eden_size,pointer_delta(from_start, eden_start, sizeof(char)));}eden_size = align_size_down(eden_size, alignment);eden_end = eden_start + eden_size;assert(eden_end >= eden_start, "addition overflowed");// To may resize into from space as long as it is clear of live data.// From space must remain page aligned, though, so we need to do some// extra calculations.// First calculate an optimal to-spaceto_end = (char*)virtual_space()->high();to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,sizeof(char));// Does the optimal to-space overlap from-space?if (to_start < (char*)from()->end()) {// Calculate the minimum offset possible for from_endsize_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));// Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!if (from_size == 0) {from_size = alignment;} else {from_size = align_size_up(from_size, alignment);}from_end = from_start + from_size;assert(from_end > from_start, "addition overflow or from_size problem");guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");// Now update to_start with the new from_endto_start = MAX2(from_end, to_start);} else {// If shrinking, move to-space down to abut the end of from-space// so that shrinking will move to-space down. If not shrinking// to-space is moving up to allow for growth on the next expansion.if (requested_eden_size <= cur_eden_size) {to_start = from_end;if (to_start + requested_survivor_size > to_start) {to_end = to_start + requested_survivor_size;}}// else leave to_end pointing to the high end of the virtual space.}guarantee(to_start != to_end, "to space is zero sized");if (PrintAdaptiveSizePolicy && Verbose) {gclog_or_tty->print_cr(" [eden_start .. eden_end): ""[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,eden_start,eden_end,pointer_delta(eden_end, eden_start, sizeof(char)));gclog_or_tty->print_cr(" [from_start .. from_end): ""[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,from_start,from_end,pointer_delta(from_end, from_start, sizeof(char)));gclog_or_tty->print_cr(" [ to_start .. to_end): ""[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,to_start,to_end,pointer_delta( to_end, to_start, sizeof(char)));}} else {// Eden, to, fromif (PrintAdaptiveSizePolicy && Verbose) {gclog_or_tty->print_cr(" Eden, to, from:");}// Calculate the to-space boundaries based on// the start of from-space.to_end = from_start;to_start = (char*)pointer_delta(from_start,(char*)requested_survivor_size,sizeof(char));// Calculate the ideal eden boundaries.// eden_end is already at the bottom of the generationassert(eden_start == virtual_space()->low(),"Eden is not starting at the low end of the virtual space");if (eden_start + requested_eden_size >= eden_start) {eden_end = eden_start + requested_eden_size;} else {eden_end = to_start;}// Does eden intrude into to-space? to-space// gets priority but eden is not allowed to shrink// to 0.if (eden_end > to_start) {eden_end = to_start;}// Don't let eden shrink down to 0 or less.eden_end = MAX2(eden_end, eden_start + alignment);assert(eden_start + alignment >= eden_start, "Overflow");size_t eden_size;if (maintain_minimum) {// Use all the space available.eden_end = MAX2(eden_end, to_start);eden_size = pointer_delta(eden_end, eden_start, sizeof(char));eden_size = MIN2(eden_size, cur_eden_size);} else {eden_size = pointer_delta(eden_end, eden_start, sizeof(char));}eden_size = align_size_down(eden_size, alignment);assert(maintain_minimum || eden_size <= requested_eden_size,"Eden size is too large");assert(eden_size >= alignment, "Eden size is too small");eden_end = eden_start + eden_size;// Move to-space down to eden.if (requested_eden_size < cur_eden_size) {to_start = eden_end;if (to_start + requested_survivor_size > to_start) {to_end = MIN2(from_start, to_start + requested_survivor_size);} else {to_end = from_start;}}// eden_end may have moved so again make sure// the to-space and eden don't overlap.to_start = MAX2(eden_end, to_start);// from-spacesize_t from_used = from()->used();if (requested_survivor_size > from_used) {if (from_start + requested_survivor_size >= from_start) {from_end = from_start + requested_survivor_size;}if (from_end > virtual_space()->high()) {from_end = virtual_space()->high();}}assert(to_start >= eden_end, "to-space should be above eden");if (PrintAdaptiveSizePolicy && Verbose) {gclog_or_tty->print_cr(" [eden_start .. eden_end): ""[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,eden_start,eden_end,pointer_delta(eden_end, eden_start, sizeof(char)));gclog_or_tty->print_cr(" [ to_start .. to_end): ""[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,to_start,to_end,pointer_delta( to_end, to_start, sizeof(char)));gclog_or_tty->print_cr(" [from_start .. from_end): ""[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,from_start,from_end,pointer_delta(from_end, from_start, sizeof(char)));}}guarantee((HeapWord*)from_start <= from()->bottom(),"from start moved to the right");guarantee((HeapWord*)from_end >= from()->top(),"from end moved into live data");assert(is_object_aligned((intptr_t)eden_start), "checking alignment");assert(is_object_aligned((intptr_t)from_start), "checking alignment");assert(is_object_aligned((intptr_t)to_start), "checking alignment");MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);// Let's make sure the call to initialize doesn't reset "top"!HeapWord* old_from_top = from()->top();// For PrintAdaptiveSizePolicy block belowsize_t old_from = from()->capacity();size_t old_to = to()->capacity();// If not clearing the spaces, do some checking to verify that// the spaces are already mangled.// Must check mangling before the spaces are reshaped. Otherwise,// the bottom or end of one space may have moved into another// a failure of the check may not correctly indicate which space// is not properly mangled.if (ZapUnusedHeapArea) {HeapWord* limit = (HeapWord*) virtual_space()->high();eden()->check_mangled_unused_area(limit);from()->check_mangled_unused_area(limit);to()->check_mangled_unused_area(limit);}// The call to initialize NULL's the next compaction spaceeden()->initialize(edenMR,SpaceDecorator::Clear,SpaceDecorator::DontMangle);eden()->set_next_compaction_space(from());to()->initialize(toMR ,SpaceDecorator::Clear,SpaceDecorator::DontMangle);from()->initialize(fromMR,SpaceDecorator::DontClear,SpaceDecorator::DontMangle);assert(from()->top() == old_from_top, "from top changed!");if (PrintAdaptiveSizePolicy) {GenCollectedHeap* gch = GenCollectedHeap::heap();assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: ""collection: %d ""(" SIZE_FORMAT ", " SIZE_FORMAT ") -> ""(" SIZE_FORMAT ", " SIZE_FORMAT ") ",gch->total_collections(),old_from, old_to,from()->capacity(),to()->capacity());gclog_or_tty->cr();} }void ASParNewGeneration::compute_new_size() {GenCollectedHeap* gch = GenCollectedHeap::heap();assert(gch->kind() == CollectedHeap::GenCollectedHeap,"not a CMS generational heap");CMSAdaptiveSizePolicy* size_policy =(CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();assert(size_policy->is_gc_cms_adaptive_size_policy(),"Wrong type of size policy");size_t survived = from()->used();if (!survivor_overflow()) {// Keep running averages on how much survivedsize_policy->avg_survived()->sample(survived);} else {size_t promoted =(size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();assert(promoted < gch->capacity(), "Conversion problem?");size_t survived_guess = survived + promoted;size_policy->avg_survived()->sample(survived_guess);}size_t survivor_limit = max_survivor_size();_tenuring_threshold =size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow,_tenuring_threshold,survivor_limit);size_policy->avg_young_live()->sample(used());size_policy->avg_eden_live()->sample(eden()->used());size_policy->compute_young_generation_free_space(eden()->capacity(),max_gen_size());resize(size_policy->calculated_eden_size_in_bytes(),size_policy->calculated_survivor_size_in_bytes());if (UsePerfData) {CMSGCAdaptivePolicyCounters* counters =(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();assert(counters->kind() ==GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,"Wrong kind of counters");counters->update_tenuring_threshold(_tenuring_threshold);counters->update_survivor_overflowed(_survivor_overflow);counters->update_young_capacity(capacity());} }#ifndef PRODUCT // Changes from PSYoungGen version // value of "alignment" void ASParNewGeneration::space_invariants() {const size_t alignment = os::vm_page_size();// Currently, our eden size cannot shrink to zeroguarantee(eden()->capacity() >= alignment, "eden too small");guarantee(from()->capacity() >= alignment, "from too small");guarantee(to()->capacity() >= alignment, "to too small");// Relationship of spaces to each otherchar* eden_start = (char*)eden()->bottom();char* eden_end = (char*)eden()->end();char* from_start = (char*)from()->bottom();char* from_end = (char*)from()->end();char* to_start = (char*)to()->bottom();char* to_end = (char*)to()->end();guarantee(eden_start >= virtual_space()->low(), "eden bottom");guarantee(eden_start < eden_end, "eden space consistency");guarantee(from_start < from_end, "from space consistency");guarantee(to_start < to_end, "to space consistency");// Check whether from space is below to spaceif (from_start < to_start) {// Eden, from, toguarantee(eden_end <= from_start, "eden/from boundary");guarantee(from_end <= to_start, "from/to boundary");guarantee(to_end <= virtual_space()->high(), "to end");} else {// Eden, to, fromguarantee(eden_end <= to_start, "eden/to boundary");guarantee(to_end <= from_start, "to/from boundary");guarantee(from_end <= virtual_space()->high(), "from end");}// More checks that the virtual space is consistent with the spacesassert(virtual_space()->committed_size() >=(eden()->capacity() +to()->capacity() +from()->capacity()), "Committed size is inconsistent");assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),"Space invariant");char* eden_top = (char*)eden()->top();char* from_top = (char*)from()->top();char* to_top = (char*)to()->top();assert(eden_top <= virtual_space()->high(), "eden top");assert(from_top <= virtual_space()->high(), "from top");assert(to_top <= virtual_space()->high(), "to top"); } #endifparNewGeneration.cpp
/** Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.** This code is free software; you can redistribute it and/or modify it* under the terms of the GNU General Public License version 2 only, as* published by the Free Software Foundation.** This code is distributed in the hope that it will be useful, but WITHOUT* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License* version 2 for more details (a copy is included in the LICENSE file that* accompanied this code).** You should have received a copy of the GNU General Public License version* 2 along with this work; if not, write to the Free Software Foundation,* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.** Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA* or visit www.oracle.com if you need additional information or have any* questions.**/#include "precompiled.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp" #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/parNew/parOopClosures.inline.hpp" #include "gc_implementation/shared/adaptiveSizePolicy.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/generation.hpp" #include "memory/generation.inline.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" #include "memory/sharedHeap.hpp" #include "memory/space.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/thread.hpp" #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/workgroup.hpp"#ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParScanThreadState::ParScanThreadState(Space* to_space_,ParNewGeneration* gen_,Generation* old_gen_,int thread_num_,ObjToScanQueueSet* work_queue_set_,Stack<oop>* overflow_stacks_,size_t desired_plab_sz_,ParallelTaskTerminator& term_) :_to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),_ageTable(false), // false ==> not the global age table, no perf data._to_space_alloc_buffer(desired_plab_sz_),_to_space_closure(gen_, this), _old_gen_closure(gen_, this),_to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),_older_gen_closure(gen_, this),_evacuate_followers(this, &_to_space_closure, &_old_gen_closure,&_to_space_root_closure, gen_, &_old_gen_root_closure,work_queue_set_, &term_),_is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),_keep_alive_closure(&_scan_weak_ref_closure),_promotion_failure_size(0),_strong_roots_time(0.0), _term_time(0.0) {#if TASKQUEUE_STATS_term_attempts = 0;_overflow_refills = 0;_overflow_refill_objs = 0;#endif // TASKQUEUE_STATS_survivor_chunk_array =(ChunkArray*) old_gen()->get_data_recorder(thread_num());_hash_seed = 17; // Might want to take time-based random value._start = os::elapsedTime();_old_gen_closure.set_generation(old_gen_);_old_gen_root_closure.set_generation(old_gen_); } #ifdef _MSC_VER #pragma warning( pop ) #endifvoid ParScanThreadState::record_survivor_plab(HeapWord* plab_start,size_t plab_word_size) {ChunkArray* sca = survivor_chunk_array();if (sca != NULL) {// A non-null SCA implies that we want the PLAB data recorded.sca->record_sample(plab_start, plab_word_size);} }bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {return new_obj->is_objArray() &&arrayOop(new_obj)->length() > ParGCArrayScanChunk &&new_obj != old_obj; }void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {assert(old->is_objArray(), "must be obj array");assert(old->is_forwarded(), "must be forwarded");assert(Universe::heap()->is_in_reserved(old), "must be in heap.");assert(!old_gen()->is_in(old), "must be in young generation.");objArrayOop obj = objArrayOop(old->forwardee());// Process ParGCArrayScanChunk elements now// and push the remainder back onto queueint start = arrayOop(old)->length();int end = obj->length();int remainder = end - start;assert(start <= end, "just checking");if (remainder > 2 * ParGCArrayScanChunk) {// Test above combines last partial chunk with a full chunkend = start + ParGCArrayScanChunk;arrayOop(old)->set_length(end);// Push remainder.bool ok = work_queue()->push(old);assert(ok, "just popped, push must be okay");} else {// Restore length so that it can be used if there// is a promotion failure and forwarding pointers// must be removed.arrayOop(old)->set_length(end);}// process our set of indices (include header in first chunk)// should make sure end is even (aligned to HeapWord in case of compressed oops)if ((HeapWord *)obj < young_old_boundary()) {// object is in to_spaceobj->oop_iterate_range(&_to_space_closure, start, end);} else {// object is in old generationobj->oop_iterate_range(&_old_gen_closure, start, end);} }void ParScanThreadState::trim_queues(int max_size) {ObjToScanQueue* queue = work_queue();do {while (queue->size() > (juint)max_size) {oop obj_to_scan;if (queue->pop_local(obj_to_scan)) {if ((HeapWord *)obj_to_scan < young_old_boundary()) {if (obj_to_scan->is_objArray() &&obj_to_scan->is_forwarded() &&obj_to_scan->forwardee() != obj_to_scan) {scan_partial_array_and_push_remainder(obj_to_scan);} else {// object is in to_spaceobj_to_scan->oop_iterate(&_to_space_closure);}} else {// object is in old generationobj_to_scan->oop_iterate(&_old_gen_closure);}}}// For the case of compressed oops, we have a private, non-shared// overflow stack, so we eagerly drain it so as to more evenly// distribute load early. Note: this may be good to do in// general rather than delay for the final stealing phase.// If applicable, we'll transfer a set of objects over to our// work queue, allowing them to be stolen and draining our// private overflow stack.} while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); }bool ParScanThreadState::take_from_overflow_stack() {assert(ParGCUseLocalOverflow, "Else should not call");assert(young_gen()->overflow_list() == NULL, "Error");ObjToScanQueue* queue = work_queue();Stack<oop>* const of_stack = overflow_stack();const size_t num_overflow_elems = of_stack->size();const size_t space_available = queue->max_elems() - queue->size();const size_t num_take_elems = MIN3(space_available / 4,ParGCDesiredObjsFromOverflowList,num_overflow_elems);// Transfer the most recent num_take_elems from the overflow// stack to our work queue.for (size_t i = 0; i != num_take_elems; i++) {oop cur = of_stack->pop();oop obj_to_push = cur->forwardee();assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");if (should_be_partially_scanned(obj_to_push, cur)) {assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");obj_to_push = cur;}bool ok = queue->push(obj_to_push);assert(ok, "Should have succeeded");}assert(young_gen()->overflow_list() == NULL, "Error");return num_take_elems > 0; // was something transferred? }void ParScanThreadState::push_on_overflow_stack(oop p) {assert(ParGCUseLocalOverflow, "Else should not call");overflow_stack()->push(p);assert(young_gen()->overflow_list() == NULL, "Error"); }HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {// Otherwise, if the object is small enough, try to reallocate the// buffer.HeapWord* obj = NULL;if (!_to_space_full) {ParGCAllocBuffer* const plab = to_space_alloc_buffer();Space* const sp = to_space();if (word_sz * 100 <ParallelGCBufferWastePct * plab->word_sz()) {// Is small enough; abandon this buffer and start a new one.plab->retire(false, false);size_t buf_size = plab->word_sz();HeapWord* buf_space = sp->par_allocate(buf_size);if (buf_space == NULL) {const size_t min_bytes =ParGCAllocBuffer::min_size() << LogHeapWordSize;size_t free_bytes = sp->free();while(buf_space == NULL && free_bytes >= min_bytes) {buf_size = free_bytes >> LogHeapWordSize;assert(buf_size == (size_t)align_object_size(buf_size),"Invariant");buf_space = sp->par_allocate(buf_size);free_bytes = sp->free();}}if (buf_space != NULL) {plab->set_word_size(buf_size);plab->set_buf(buf_space);record_survivor_plab(buf_space, buf_size);obj = plab->allocate(word_sz);// Note that we cannot compare buf_size < word_sz below// because of AlignmentReserve (see ParGCAllocBuffer::allocate()).assert(obj != NULL || plab->words_remaining() < word_sz,"Else should have been able to allocate");// It's conceivable that we may be able to use the// buffer we just grabbed for subsequent small requests// even if not for this one.} else {// We're used up._to_space_full = true;}} else {// Too large; allocate the object individually.obj = sp->par_allocate(word_sz);}}return obj; }void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,size_t word_sz) {// Is the alloc in the current alloc buffer?if (to_space_alloc_buffer()->contains(obj)) {assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),"Should contain whole object.");to_space_alloc_buffer()->undo_allocation(obj, word_sz);} else {CollectedHeap::fill_with_object(obj, word_sz);} }void ParScanThreadState::print_and_clear_promotion_failure_size() {if (_promotion_failure_size != 0) {if (PrintPromotionFailure) {gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",_thread_num, _promotion_failure_size);}_promotion_failure_size = 0;} }class ParScanThreadStateSet: private ResourceArray { public:// Initializes states for the specified number of threads;ParScanThreadStateSet(int num_threads,Space& to_space,ParNewGeneration& gen,Generation& old_gen,ObjToScanQueueSet& queue_set,Stack<oop>* overflow_stacks_,size_t desired_plab_sz,ParallelTaskTerminator& term);~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }inline ParScanThreadState& thread_state(int i);void reset(bool promotion_failed);void flush();#if TASKQUEUE_STATSstatic voidprint_termination_stats_hdr(outputStream* const st = gclog_or_tty);void print_termination_stats(outputStream* const st = gclog_or_tty);static voidprint_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);void print_taskqueue_stats(outputStream* const st = gclog_or_tty);void reset_stats();#endif // TASKQUEUE_STATSprivate:ParallelTaskTerminator& _term;ParNewGeneration& _gen;Generation& _next_gen; };ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, Space& to_space, ParNewGeneration& gen,Generation& old_gen, ObjToScanQueueSet& queue_set,Stack<oop>* overflow_stacks,size_t desired_plab_sz, ParallelTaskTerminator& term): ResourceArray(sizeof(ParScanThreadState), num_threads),_gen(gen), _next_gen(old_gen), _term(term) {assert(num_threads > 0, "sanity check!");assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),"overflow_stack allocation mismatch");// Initialize states.for (int i = 0; i < num_threads; ++i) {new ((ParScanThreadState*)_data + i)ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,overflow_stacks, desired_plab_sz, term);} }inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {assert(i >= 0 && i < length(), "sanity check!");return ((ParScanThreadState*)_data)[i]; }void ParScanThreadStateSet::reset(bool promotion_failed) {_term.reset_for_reuse();if (promotion_failed) {for (int i = 0; i < length(); ++i) {thread_state(i).print_and_clear_promotion_failure_size();}} }#if TASKQUEUE_STATS void ParScanThreadState::reset_stats() {taskqueue_stats().reset();_term_attempts = 0;_overflow_refills = 0;_overflow_refill_objs = 0; }void ParScanThreadStateSet::reset_stats() {for (int i = 0; i < length(); ++i) {thread_state(i).reset_stats();} }void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {st->print_raw_cr("GC Termination Stats");st->print_raw_cr(" elapsed --strong roots-- ""-------termination-------");st->print_raw_cr("thr ms ms % "" ms % attempts");st->print_raw_cr("--- --------- --------- ------ ""--------- ------ --------"); }void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {print_termination_stats_hdr(st);for (int i = 0; i < length(); ++i) {const ParScanThreadState & pss = thread_state(i);const double elapsed_ms = pss.elapsed_time() * 1000.0;const double s_roots_ms = pss.strong_roots_time() * 1000.0;const double term_ms = pss.term_time() * 1000.0;st->print_cr("%3d %9.2f %9.2f %6.2f ""%9.2f %6.2f " SIZE_FORMAT_W(8),i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());} }// Print stats related to work queue activity. void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {st->print_raw_cr("GC Task Stats");st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); }void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {print_taskqueue_stats_hdr(st);TaskQueueStats totals;for (int i = 0; i < length(); ++i) {const ParScanThreadState & pss = thread_state(i);const TaskQueueStats & stats = pss.taskqueue_stats();st->print("%3d ", i); stats.print(st); st->cr();totals += stats;if (pss.overflow_refills() > 0) {st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills "SIZE_FORMAT_W(10) " overflow objects",pss.overflow_refills(), pss.overflow_refill_objs());}}st->print("tot "); totals.print(st); st->cr();DEBUG_ONLY(totals.verify()); } #endif // TASKQUEUE_STATSvoid ParScanThreadStateSet::flush() {// Work in this loop should be kept as lightweight as// possible since this might otherwise become a bottleneck// to scaling. Should we add heavy-weight work into this// loop, consider parallelizing the loop into the worker threads.for (int i = 0; i < length(); ++i) {ParScanThreadState& par_scan_state = thread_state(i);// Flush stats related to To-space PLAB activity and// retire the last buffer.par_scan_state.to_space_alloc_buffer()->flush_stats_and_retire(_gen.plab_stats(),false /* !retain */);// Every thread has its own age table. We need to merge// them all into one.ageTable *local_table = par_scan_state.age_table();_gen.age_table()->merge(local_table);// Inform old gen that we're done._next_gen.par_promote_alloc_done(i);_next_gen.par_oop_since_save_marks_iterate_done(i);}if (UseConcMarkSweepGC && ParallelGCThreads > 0) {// We need to call this even when ResizeOldPLAB is disabled// so as to avoid breaking some asserts. While we may be able// to avoid this by reorganizing the code a bit, I am loathe// to do that unless we find cases where ergo leads to bad// performance.CFLS_LAB::compute_desired_plab_size();} }ParScanClosure::ParScanClosure(ParNewGeneration* g,ParScanThreadState* par_scan_state) :OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g) {assert(_g->level() == 0, "Optimized for youngest generation");_boundary = _g->reserved().end(); }void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,ParScanThreadState* par_scan_state): ScanWeakRefClosure(g), _par_scan_state(par_scan_state) {}void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }#ifdef WIN32 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ #endifParEvacuateFollowersClosure::ParEvacuateFollowersClosure(ParScanThreadState* par_scan_state_,ParScanWithoutBarrierClosure* to_space_closure_,ParScanWithBarrierClosure* old_gen_closure_,ParRootScanWithoutBarrierClosure* to_space_root_closure_,ParNewGeneration* par_gen_,ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,ObjToScanQueueSet* task_queues_,ParallelTaskTerminator* terminator_) :_par_scan_state(par_scan_state_),_to_space_closure(to_space_closure_),_old_gen_closure(old_gen_closure_),_to_space_root_closure(to_space_root_closure_),_old_gen_root_closure(old_gen_root_closure_),_par_gen(par_gen_),_task_queues(task_queues_),_terminator(terminator_) {}void ParEvacuateFollowersClosure::do_void() {ObjToScanQueue* work_q = par_scan_state()->work_queue();while (true) {// Scan to-space and old-gen objs until we run out of both.oop obj_to_scan;par_scan_state()->trim_queues(0);// We have no local work, attempt to steal from other threads.// attempt to steal work from promoted.if (task_queues()->steal(par_scan_state()->thread_num(),par_scan_state()->hash_seed(),obj_to_scan)) {bool res = work_q->push(obj_to_scan);assert(res, "Empty queue should have room for a push.");// if successful, goto Start.continue;// try global overflow list.} else if (par_gen()->take_from_overflow_list(par_scan_state())) {continue;}// Otherwise, offer termination.par_scan_state()->start_term_time();if (terminator()->offer_termination()) break;par_scan_state()->end_term_time();}assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,"Broken overflow list?");// Finish the last termination pause.par_scan_state()->end_term_time(); }ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :AbstractGangTask("ParNewGeneration collection"),_gen(gen), _next_gen(next_gen),_young_old_boundary(young_old_boundary),_state_set(state_set){}void ParNewGenTask::work(int i) {GenCollectedHeap* gch = GenCollectedHeap::heap();// Since this is being done in a separate thread, need new resource// and handle marks.ResourceMark rm;HandleMark hm;// We would need multiple old-gen queues otherwise.assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");Generation* old_gen = gch->next_gen(_gen);ParScanThreadState& par_scan_state = _state_set->thread_state(i);par_scan_state.set_young_old_boundary(_young_old_boundary);par_scan_state.start_strong_roots();gch->gen_process_strong_roots(_gen->level(),true, // Process younger gens, if any,// as strong roots.false, // no scope; this is parallel codefalse, // not collecting perm generation.SharedHeap::SO_AllClasses,&par_scan_state.to_space_root_closure(),true, // walk *all* scavengable nmethods&par_scan_state.older_gen_closure());par_scan_state.end_strong_roots();// "evacuate followers".par_scan_state.evacuate_followers_closure().do_void(); }#ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParNewGeneration:: ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level): DefNewGeneration(rs, initial_byte_size, level, "PCopy"),_overflow_list(NULL),_is_alive_closure(this),_plab_stats(YoungPLABSize, PLABWeight) {NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)NOT_PRODUCT(_num_par_pushes = 0;)_task_queues = new ObjToScanQueueSet(ParallelGCThreads);guarantee(_task_queues != NULL, "task_queues allocation failure.");for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {ObjToScanQueue *q = new ObjToScanQueue();guarantee(q != NULL, "work_queue Allocation failure.");_task_queues->register_queue(i1, q);}for (uint i2 = 0; i2 < ParallelGCThreads; i2++)_task_queues->queue(i2)->initialize();_overflow_stacks = NULL;if (ParGCUseLocalOverflow) {_overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads);for (size_t i = 0; i < ParallelGCThreads; ++i) {new (_overflow_stacks + i) Stack<oop>();}}if (UsePerfData) {EXCEPTION_MARK;ResourceMark rm;const char* cname =PerfDataManager::counter_name(_gen_counters->name_space(), "threads");PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,ParallelGCThreads, CHECK);} } #ifdef _MSC_VER #pragma warning( pop ) #endif// ParNewGeneration:: ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}template <class T> void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { #ifdef ASSERT{assert(!oopDesc::is_null(*p), "expected non-null ref");oop obj = oopDesc::load_decode_heap_oop_not_null(p);// We never expect to see a null reference being processed// as a weak reference.assert(obj->is_oop(), "expected an oop while scanning weak refs");} #endif // ASSERT_par_cl->do_oop_nv(p);if (Universe::heap()->is_in_reserved(p)) {oop obj = oopDesc::load_decode_heap_oop_not_null(p);_rs->write_ref_field_gc_par(p, obj);} }void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }// ParNewGeneration:: KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :DefNewGeneration::KeepAliveClosure(cl) {}template <class T> void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { #ifdef ASSERT{assert(!oopDesc::is_null(*p), "expected non-null ref");oop obj = oopDesc::load_decode_heap_oop_not_null(p);// We never expect to see a null reference being processed// as a weak reference.assert(obj->is_oop(), "expected an oop while scanning weak refs");} #endif // ASSERT_cl->do_oop_nv(p);if (Universe::heap()->is_in_reserved(p)) {oop obj = oopDesc::load_decode_heap_oop_not_null(p);_rs->write_ref_field_gc_par(p, obj);} }void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {T heap_oop = oopDesc::load_heap_oop(p);if (!oopDesc::is_null(heap_oop)) {oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);if ((HeapWord*)obj < _boundary) {assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");oop new_obj = obj->is_forwarded()? obj->forwardee(): _g->DefNewGeneration::copy_to_survivor_space(obj);oopDesc::encode_store_heap_oop_not_null(p, new_obj);}if (_gc_barrier) {// If p points to a younger generation, mark the card.if ((HeapWord*)obj < _gen_boundary) {_rs->write_ref_field_gc_par(p, obj);}}} }void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }class ParNewRefProcTaskProxy: public AbstractGangTask {typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; public:ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,Generation& next_gen,HeapWord* young_old_boundary,ParScanThreadStateSet& state_set);private:virtual void work(int i);private:ParNewGeneration& _gen;ProcessTask& _task;Generation& _next_gen;HeapWord* _young_old_boundary;ParScanThreadStateSet& _state_set; };ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,Generation& next_gen,HeapWord* young_old_boundary,ParScanThreadStateSet& state_set): AbstractGangTask("ParNewGeneration parallel reference processing"),_gen(gen),_task(task),_next_gen(next_gen),_young_old_boundary(young_old_boundary),_state_set(state_set) { }void ParNewRefProcTaskProxy::work(int i) {ResourceMark rm;HandleMark hm;ParScanThreadState& par_scan_state = _state_set.thread_state(i);par_scan_state.set_young_old_boundary(_young_old_boundary);_task.work(i, par_scan_state.is_alive_closure(),par_scan_state.keep_alive_closure(),par_scan_state.evacuate_followers_closure()); }class ParNewRefEnqueueTaskProxy: public AbstractGangTask {typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;EnqueueTask& _task;public:ParNewRefEnqueueTaskProxy(EnqueueTask& task): AbstractGangTask("ParNewGeneration parallel reference enqueue"),_task(task){ }virtual void work(int i){_task.work(i);} };void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {GenCollectedHeap* gch = GenCollectedHeap::heap();assert(gch->kind() == CollectedHeap::GenCollectedHeap,"not a generational heap");WorkGang* workers = gch->workers();assert(workers != NULL, "Need parallel worker threads.");ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),_generation.reserved().end(), _state_set);workers->run_task(&rp_task);_state_set.reset(_generation.promotion_failed()); }void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {GenCollectedHeap* gch = GenCollectedHeap::heap();WorkGang* workers = gch->workers();assert(workers != NULL, "Need parallel worker threads.");ParNewRefEnqueueTaskProxy enq_task(task);workers->run_task(&enq_task); }void ParNewRefProcTaskExecutor::set_single_threaded_mode() {_state_set.flush();GenCollectedHeap* gch = GenCollectedHeap::heap();gch->set_par_threads(0); // 0 ==> non-parallel.gch->save_marks(); }ScanClosureWithParBarrier:: ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :ScanClosure(g, gc_barrier) {}EvacuateFollowersClosureGeneral:: EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,OopsInGenClosure* cur,OopsInGenClosure* older) :_gch(gch), _level(level),_scan_cur_or_nonheap(cur), _scan_older(older) {}void EvacuateFollowersClosureGeneral::do_void() {do {// Beware: this call will lead to closure applications via virtual// calls._gch->oop_since_save_marks_iterate(_level,_scan_cur_or_nonheap,_scan_older);} while (!_gch->no_allocs_since_save_marks(_level)); }bool ParNewGeneration::_avoid_promotion_undo = false;void ParNewGeneration::adjust_desired_tenuring_threshold() {// Set the desired survivor size to half the real survivor space_tenuring_threshold =age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); }// A Generation that does parallel young-gen collection.void ParNewGeneration::collect(bool full,bool clear_all_soft_refs,size_t size,bool is_tlab) {assert(full || size > 0, "otherwise we don't want to collect");GenCollectedHeap* gch = GenCollectedHeap::heap();assert(gch->kind() == CollectedHeap::GenCollectedHeap,"not a CMS generational heap");AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();WorkGang* workers = gch->workers();_next_gen = gch->next_gen(this);assert(_next_gen != NULL,"This must be the youngest gen, and not the only gen");assert(gch->n_gens() == 2,"Par collection currently only works with single older gen.");// Do we have to avoid promotion_undo?if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {set_avoid_promotion_undo(true);}// If the next generation is too full to accomodate worst-case promotion// from this generation, pass on collection; let the next generation// do it.if (!collection_attempt_is_safe()) {gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt onereturn;}assert(to()->is_empty(), "Else not collection_attempt_is_safe");init_assuming_no_promotion_failure();if (UseAdaptiveSizePolicy) {set_survivor_overflow(false);size_policy->minor_collection_begin();}TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);// Capture heap used before collection (for printing).size_t gch_prev_used = gch->used();SpecializationStats::clear();age_table()->clear();to()->clear(SpaceDecorator::Mangle);gch->save_marks();assert(workers != NULL, "Need parallel worker threads.");ParallelTaskTerminator _term(workers->total_workers(), task_queues());ParScanThreadStateSet thread_state_set(workers->total_workers(),*to(), *this, *_next_gen, *task_queues(),_overflow_stacks, desired_plab_sz(), _term);ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);int n_workers = workers->total_workers();gch->set_par_threads(n_workers);gch->rem_set()->prepare_for_younger_refs_iterate(true);// It turns out that even when we're using 1 thread, doing the work in a// separate thread causes wide variance in run times. We can't help this// in the multi-threaded case, but we special-case n=1 here to get// repeatable measurements of the 1-thread overhead of the parallel code.if (n_workers > 1) {GenCollectedHeap::StrongRootsScope srs(gch);workers->run_task(&tsk);} else {GenCollectedHeap::StrongRootsScope srs(gch);tsk.work(0);}thread_state_set.reset(promotion_failed());// Process (weak) reference objects found during scavenge.ReferenceProcessor* rp = ref_processor();IsAliveClosure is_alive(this);ScanWeakRefClosure scan_weak_ref(this);KeepAliveClosure keep_alive(&scan_weak_ref);ScanClosure scan_without_gc_barrier(this, false);ScanClosureWithParBarrier scan_with_gc_barrier(this, true);set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,&scan_without_gc_barrier, &scan_with_gc_barrier);rp->setup_policy(clear_all_soft_refs);if (rp->processing_is_mt()) {ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);rp->process_discovered_references(&is_alive, &keep_alive,&evacuate_followers, &task_executor);} else {thread_state_set.flush();gch->set_par_threads(0); // 0 ==> non-parallel.gch->save_marks();rp->process_discovered_references(&is_alive, &keep_alive,&evacuate_followers, NULL);}if (!promotion_failed()) {// Swap the survivor spaces.eden()->clear(SpaceDecorator::Mangle);from()->clear(SpaceDecorator::Mangle);if (ZapUnusedHeapArea) {// This is now done here because of the piece-meal mangling which// can check for valid mangling at intermediate points in the// collection(s). When a minor collection fails to collect// sufficient space resizing of the young generation can occur// an redistribute the spaces in the young generation. Mangle// here so that unzapped regions don't get distributed to// other spaces.to()->mangle_unused_area();}swap_spaces();// A successful scavenge should restart the GC time limit count which is// for full GC's.size_policy->reset_gc_overhead_limit_count();assert(to()->is_empty(), "to space should be empty now");} else {assert(_promo_failure_scan_stack.is_empty(), "post condition");_promo_failure_scan_stack.clear(true); // Clear cached segments.remove_forwarding_pointers();if (PrintGCDetails) {gclog_or_tty->print(" (promotion failed)");}// All the spaces are in play for mark-sweep.swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.from()->set_next_compaction_space(to());gch->set_incremental_collection_failed();// Inform the next generation that a promotion failure occurred._next_gen->promotion_failure_occurred();// Reset the PromotionFailureALot counters.NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)}// set new iteration safe limit for the survivor spacesfrom()->set_concurrent_iteration_safe_limit(from()->top());to()->set_concurrent_iteration_safe_limit(to()->top());adjust_desired_tenuring_threshold();if (ResizePLAB) {plab_stats()->adjust_desired_plab_sz();}if (PrintGC && !PrintGCDetails) {gch->print_heap_change(gch_prev_used);}if (PrintGCDetails && ParallelGCVerbose) {TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());}if (UseAdaptiveSizePolicy) {size_policy->minor_collection_end(gch->gc_cause());size_policy->avg_survived()->sample(from()->used());}update_time_of_last_gc(os::javaTimeMillis());SpecializationStats::print();rp->set_enqueuing_is_done(true);if (rp->processing_is_mt()) {ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);rp->enqueue_discovered_references(&task_executor);} else {rp->enqueue_discovered_references(NULL);}rp->verify_no_references_recorded(); }static int sum; void ParNewGeneration::waste_some_time() {for (int i = 0; i < 100; i++) {sum += i;} }static const oop ClaimedForwardPtr = oop(0x4);// Because of concurrency, there are times where an object for which // "is_forwarded()" is true contains an "interim" forwarding pointer // value. Such a value will soon be overwritten with a real value. // This method requires "obj" to have a forwarding pointer, and waits, if // necessary for a real one to be inserted, and returns it.oop ParNewGeneration::real_forwardee(oop obj) {oop forward_ptr = obj->forwardee();if (forward_ptr != ClaimedForwardPtr) {return forward_ptr;} else {return real_forwardee_slow(obj);} }oop ParNewGeneration::real_forwardee_slow(oop obj) {// Spin-read if it is claimed but not yet written by another thread.oop forward_ptr = obj->forwardee();while (forward_ptr == ClaimedForwardPtr) {waste_some_time();assert(obj->is_forwarded(), "precondition");forward_ptr = obj->forwardee();}return forward_ptr; }#ifdef ASSERT bool ParNewGeneration::is_legal_forward_ptr(oop p) {return(_avoid_promotion_undo && p == ClaimedForwardPtr)|| Universe::heap()->is_in_reserved(p); } #endifvoid ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {if (m->must_be_preserved_for_promotion_failure(obj)) {// We should really have separate per-worker stacks, rather// than use locking of a common pair of stacks.MutexLocker ml(ParGCRareEvent_lock);preserve_mark(obj, m);} }// Multiple GC threads may try to promote an object. If the object // is successfully promoted, a forwarding pointer will be installed in // the object in the young generation. This method claims the right // to install the forwarding pointer before it copies the object, // thus avoiding the need to undo the copy as in // copy_to_survivor_space_avoiding_with_undo.oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {// In the sequential version, this assert also says that the object is// not forwarded. That might not be the case here. It is the case that// the caller observed it to be not forwarded at some time in the past.assert(is_in_reserved(old), "shouldn't be scavenging this oop");// The sequential code read "old->age()" below. That doesn't work here,// since the age is in the mark word, and that might be overwritten with// a forwarding pointer by a parallel thread. So we must save the mark// word in a local and then analyze it.oopDesc dummyOld;dummyOld.set_mark(m);assert(!dummyOld.is_forwarded(),"should not be called with forwarding pointer mark word.");oop new_obj = NULL;oop forward_ptr;// Try allocating obj in to-space (unless too old)if (dummyOld.age() < tenuring_threshold()) {new_obj = (oop)par_scan_state->alloc_in_to_space(sz);if (new_obj == NULL) {set_survivor_overflow(true);}}if (new_obj == NULL) {// Either to-space is full or we decided to promote// try allocating obj tenured// Attempt to install a null forwarding pointer (atomically),// to claim the right to install the real forwarding pointer.forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);if (forward_ptr != NULL) {// someone else beat us to it.return real_forwardee(old);}new_obj = _next_gen->par_promote(par_scan_state->thread_num(),old, m, sz);if (new_obj == NULL) {// promotion failed, forward to self_promotion_failed = true;new_obj = old;preserve_mark_if_necessary(old, m);// Log the size of the maiden promotion failurepar_scan_state->log_promotion_failure(sz);}old->forward_to(new_obj);forward_ptr = NULL;} else {// Is in to-space; do copying ourselves.Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);forward_ptr = old->forward_to_atomic(new_obj);// Restore the mark word copied above.new_obj->set_mark(m);// Increment age if obj still in new generationnew_obj->incr_age();par_scan_state->age_table()->add(new_obj, sz);}assert(new_obj != NULL, "just checking");if (forward_ptr == NULL) {oop obj_to_push = new_obj;if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {// Length field used as index of next element to be scanned.// Real length can be obtained from real_forwardee()arrayOop(old)->set_length(0);obj_to_push = old;assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,"push forwarded object");}// Push it on one of the queues of to-be-scanned objects.bool simulate_overflow = false;NOT_PRODUCT(if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {// simulate a stack overflowsimulate_overflow = true;})if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {// Add stats for overflow pushes.if (Verbose && PrintGCDetails) {gclog_or_tty->print("queue overflow!\n");}push_on_overflow_list(old, par_scan_state);TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));}return new_obj;}// Oops. Someone beat us to it. Undo the allocation. Where did we// allocate it?if (is_in_reserved(new_obj)) {// Must be in to_space.assert(to()->is_in_reserved(new_obj), "Checking");if (forward_ptr == ClaimedForwardPtr) {// Wait to get the real forwarding pointer value.forward_ptr = real_forwardee(old);}par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);}return forward_ptr; }// Multiple GC threads may try to promote the same object. If two // or more GC threads copy the object, only one wins the race to install // the forwarding pointer. The other threads have to undo their copy.oop ParNewGeneration::copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {// In the sequential version, this assert also says that the object is// not forwarded. That might not be the case here. It is the case that// the caller observed it to be not forwarded at some time in the past.assert(is_in_reserved(old), "shouldn't be scavenging this oop");// The sequential code read "old->age()" below. That doesn't work here,// since the age is in the mark word, and that might be overwritten with// a forwarding pointer by a parallel thread. So we must save the mark// word here, install it in a local oopDesc, and then analyze it.oopDesc dummyOld;dummyOld.set_mark(m);assert(!dummyOld.is_forwarded(),"should not be called with forwarding pointer mark word.");bool failed_to_promote = false;oop new_obj = NULL;oop forward_ptr;// Try allocating obj in to-space (unless too old)if (dummyOld.age() < tenuring_threshold()) {new_obj = (oop)par_scan_state->alloc_in_to_space(sz);if (new_obj == NULL) {set_survivor_overflow(true);}}if (new_obj == NULL) {// Either to-space is full or we decided to promote// try allocating obj tenurednew_obj = _next_gen->par_promote(par_scan_state->thread_num(),old, m, sz);if (new_obj == NULL) {// promotion failed, forward to selfforward_ptr = old->forward_to_atomic(old);new_obj = old;if (forward_ptr != NULL) {return forward_ptr; // someone else succeeded}_promotion_failed = true;failed_to_promote = true;preserve_mark_if_necessary(old, m);// Log the size of the maiden promotion failurepar_scan_state->log_promotion_failure(sz);}} else {// Is in to-space; do copying ourselves.Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);// Restore the mark word copied above.new_obj->set_mark(m);// Increment age if new_obj still in new generationnew_obj->incr_age();par_scan_state->age_table()->add(new_obj, sz);}assert(new_obj != NULL, "just checking");// Now attempt to install the forwarding pointer (atomically).// We have to copy the mark word before overwriting with forwarding// ptr, so we can restore it below in the copy.if (!failed_to_promote) {forward_ptr = old->forward_to_atomic(new_obj);}if (forward_ptr == NULL) {oop obj_to_push = new_obj;if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {// Length field used as index of next element to be scanned.// Real length can be obtained from real_forwardee()arrayOop(old)->set_length(0);obj_to_push = old;assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,"push forwarded object");}// Push it on one of the queues of to-be-scanned objects.bool simulate_overflow = false;NOT_PRODUCT(if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {// simulate a stack overflowsimulate_overflow = true;})if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {// Add stats for overflow pushes.push_on_overflow_list(old, par_scan_state);TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));}return new_obj;}// Oops. Someone beat us to it. Undo the allocation. Where did we// allocate it?if (is_in_reserved(new_obj)) {// Must be in to_space.assert(to()->is_in_reserved(new_obj), "Checking");par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);} else {assert(!_avoid_promotion_undo, "Should not be here if avoiding.");_next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),(HeapWord*)new_obj, sz);}return forward_ptr; }#ifndef PRODUCT // It's OK to call this multi-threaded; the worst thing // that can happen is that we'll get a bunch of closely // spaced simulated oveflows, but that's OK, in fact // probably good as it would exercise the overflow code // under contention. bool ParNewGeneration::should_simulate_overflow() {if (_overflow_counter-- <= 0) { // just being defensive_overflow_counter = ParGCWorkQueueOverflowInterval;return true;} else {return false;} } #endif// In case we are using compressed oops, we need to be careful. // If the object being pushed is an object array, then its length // field keeps track of the "grey boundary" at which the next // incremental scan will be done (see ParGCArrayScanChunk). // When using compressed oops, this length field is kept in the // lower 32 bits of the erstwhile klass word and cannot be used // for the overflow chaining pointer (OCP below). As such the OCP // would itself need to be compressed into the top 32-bits in this // case. Unfortunately, see below, in the event that we have a // promotion failure, the node to be pushed on the list can be // outside of the Java heap, so the heap-based pointer compression // would not work (we would have potential aliasing between C-heap // and Java-heap pointers). For this reason, when using compressed // oops, we simply use a worker-thread-local, non-shared overflow // list in the form of a growable array, with a slightly different // overflow stack draining strategy. If/when we start using fat // stacks here, we can go back to using (fat) pointer chains // (although some performance comparisons would be useful since // single global lists have their own performance disadvantages // as we were made painfully aware not long ago, see 6786503). #define BUSY (oop(0x1aff1aff)) void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {assert(is_in_reserved(from_space_obj), "Should be from this generation");if (ParGCUseLocalOverflow) {// In the case of compressed oops, we use a private, not-shared// overflow stack.par_scan_state->push_on_overflow_stack(from_space_obj);} else {assert(!UseCompressedOops, "Error");// if the object has been forwarded to itself, then we cannot// use the klass pointer for the linked list. Instead we have// to allocate an oopDesc in the C-Heap and use that for the linked list.// XXX This is horribly inefficient when a promotion failure occurs// and should be fixed. XXX FIX ME !!! #ifndef PRODUCTAtomic::inc_ptr(&_num_par_pushes);assert(_num_par_pushes > 0, "Tautology"); #endifif (from_space_obj->forwardee() == from_space_obj) {oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);listhead->forward_to(from_space_obj);from_space_obj = listhead;}oop observed_overflow_list = _overflow_list;oop cur_overflow_list;do {cur_overflow_list = observed_overflow_list;if (cur_overflow_list != BUSY) {from_space_obj->set_klass_to_list_ptr(cur_overflow_list);} else {from_space_obj->set_klass_to_list_ptr(NULL);}observed_overflow_list =(oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);} while (cur_overflow_list != observed_overflow_list);} }bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {bool res;if (ParGCUseLocalOverflow) {res = par_scan_state->take_from_overflow_stack();} else {assert(!UseCompressedOops, "Error");res = take_from_overflow_list_work(par_scan_state);}return res; }// *NOTE*: The overflow list manipulation code here and // in CMSCollector:: are very similar in shape, // except that in the CMS case we thread the objects // directly into the list via their mark word, and do // not need to deal with special cases below related // to chunking of object arrays and promotion failure // handling. // CR 6797058 has been filed to attempt consolidation of // the common code. // Because of the common code, if you make any changes in // the code below, please check the CMS version to see if // similar changes might be needed. // See CMSCollector::par_take_from_overflow_list() for // more extensive documentation comments. bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {ObjToScanQueue* work_q = par_scan_state->work_queue();// How many to take?size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,(size_t)ParGCDesiredObjsFromOverflowList);assert(!UseCompressedOops, "Error");assert(par_scan_state->overflow_stack() == NULL, "Error");if (_overflow_list == NULL) return false;// Otherwise, there was something there; try claiming the list.oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);// Trim off a prefix of at most objsFromOverflow itemsThread* tid = Thread::current();size_t spin_count = (size_t)ParallelGCThreads;size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {// someone grabbed it before we did ...// ... we spin for a short while...os::sleep(tid, sleep_time_millis, false);if (_overflow_list == NULL) {// nothing left to takereturn false;} else if (_overflow_list != BUSY) {// try and grab the prefixprefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);}}if (prefix == NULL || prefix == BUSY) {// Nothing to take or waited long enoughif (prefix == NULL) {// Write back the NULL in case we overwrote it with BUSY above// and it is still the same value.(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);}return false;}assert(prefix != NULL && prefix != BUSY, "Error");size_t i = 1;oop cur = prefix;while (i < objsFromOverflow && cur->klass_or_null() != NULL) {i++; cur = oop(cur->klass());}// Reattach remaining (suffix) to overflow listif (cur->klass_or_null() == NULL) {// Write back the NULL in lieu of the BUSY we wrote// above and it is still the same value.if (_overflow_list == BUSY) {(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);}} else {assert(cur->klass_or_null() != BUSY, "Error");oop suffix = oop(cur->klass()); // suffix will be put back on global listcur->set_klass_to_list_ptr(NULL); // break off suffix// It's possible that the list is still in the empty(busy) state// we left it in a short while ago; in that case we may be// able to place back the suffix.oop observed_overflow_list = _overflow_list;oop cur_overflow_list = observed_overflow_list;bool attached = false;while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {observed_overflow_list =(oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);if (cur_overflow_list == observed_overflow_list) {attached = true;break;} else cur_overflow_list = observed_overflow_list;}if (!attached) {// Too bad, someone else got in in between; we'll need to do a splice.// Find the last item of suffix listoop last = suffix;while (last->klass_or_null() != NULL) {last = oop(last->klass());}// Atomically prepend suffix to current overflow listobserved_overflow_list = _overflow_list;do {cur_overflow_list = observed_overflow_list;if (cur_overflow_list != BUSY) {// Do the splice ...last->set_klass_to_list_ptr(cur_overflow_list);} else { // cur_overflow_list == BUSYlast->set_klass_to_list_ptr(NULL);}observed_overflow_list =(oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);} while (cur_overflow_list != observed_overflow_list);}}// Push objects on prefix list onto this thread's work queueassert(prefix != NULL && prefix != BUSY, "program logic");cur = prefix;ssize_t n = 0;while (cur != NULL) {oop obj_to_push = cur->forwardee();oop next = oop(cur->klass_or_null());cur->set_klass(obj_to_push->klass());// This may be an array object that is self-forwarded. In that case, the list pointer// space, cur, is not in the Java heap, but rather in the C-heap and should be freed.if (!is_in_reserved(cur)) {// This can become a scaling bottleneck when there is work queue overflow coincident// with promotion failure.oopDesc* f = cur;FREE_C_HEAP_ARRAY(oopDesc, f);} else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");obj_to_push = cur;}bool ok = work_q->push(obj_to_push);assert(ok, "Should have succeeded");cur = next;n++;}TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); #ifndef PRODUCTassert(_num_par_pushes >= n, "Too many pops?");Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); #endifreturn true; } #undef BUSYvoid ParNewGeneration::ref_processor_init() {if (_ref_processor == NULL) {// Allocate and initialize a reference processor_ref_processor =new ReferenceProcessor(_reserved, // spanParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing(int) ParallelGCThreads, // mt processing degreerefs_discovery_is_mt(), // mt discovery(int) ParallelGCThreads, // mt discovery degreerefs_discovery_is_atomic(), // atomic_discoveryNULL, // is_alive_non_headerfalse); // write barrier for next field updates} }const char* ParNewGeneration::name() const {return "par new generation"; }bool ParNewGeneration::in_use() {return UseParNewGC && ParallelGCThreads > 0; }parCardTableModRefBS.cpp?
Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.** This code is free software; you can redistribute it and/or modify it* under the terms of the GNU General Public License version 2 only, as* published by the Free Software Foundation.** This code is distributed in the hope that it will be useful, but WITHOUT* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License* version 2 for more details (a copy is included in the LICENSE file that* accompanied this code).** You should have received a copy of the GNU General Public License version* 2 along with this work; if not, write to the Free Software Foundation,* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.** Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA* or visit www.oracle.com if you need additional information or have any* questions.**/#include "precompiled.hpp" #include "memory/allocation.inline.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/cardTableRS.hpp" #include "memory/sharedHeap.hpp" #include "memory/space.inline.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/virtualspace.hpp"void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,OopsInGenClosure* cl,CardTableRS* ct,int n_threads) {assert(n_threads > 0, "Error: expected n_threads > 0");assert((n_threads == 1 && ParallelGCThreads == 0) ||n_threads <= (int)ParallelGCThreads,"# worker threads != # requested!");// Make sure the LNC array is valid for the space.jbyte** lowest_non_clean;uintptr_t lowest_non_clean_base_chunk_index;size_t lowest_non_clean_chunk_size;get_LNC_array_for_space(sp, lowest_non_clean,lowest_non_clean_base_chunk_index,lowest_non_clean_chunk_size);int n_strides = n_threads * ParGCStridesPerThread;SequentialSubTasksDone* pst = sp->par_seq_tasks();pst->set_n_threads(n_threads);pst->set_n_tasks(n_strides);int stride = 0;while (!pst->is_task_claimed(/* reference */ stride)) {process_stride(sp, mr, stride, n_strides, cl, ct,lowest_non_clean,lowest_non_clean_base_chunk_index,lowest_non_clean_chunk_size);}if (pst->all_tasks_completed()) {// Clear lowest_non_clean array for next time.intptr_t first_chunk_index = addr_to_chunk_index(mr.start());uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {intptr_t ind = ch - lowest_non_clean_base_chunk_index;assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,"Bounds error");lowest_non_clean[ind] = NULL;}} }void CardTableModRefBS:: process_stride(Space* sp,MemRegion used,jint stride, int n_strides,OopsInGenClosure* cl,CardTableRS* ct,jbyte** lowest_non_clean,uintptr_t lowest_non_clean_base_chunk_index,size_t lowest_non_clean_chunk_size) {// We go from higher to lower addresses here; it wouldn't help that much// because of the strided parallelism pattern used here.// Find the first card address of the first chunk in the stride that is// at least "bottom" of the used region.jbyte* start_card = byte_for(used.start());jbyte* end_card = byte_after(used.last());uintptr_t start_chunk = addr_to_chunk_index(used.start());uintptr_t start_chunk_stride_num = start_chunk % n_strides;jbyte* chunk_card_start;if ((uintptr_t)stride >= start_chunk_stride_num) {chunk_card_start = (jbyte*)(start_card +(stride - start_chunk_stride_num) *ParGCCardsPerStrideChunk);} else {// Go ahead to the next chunk group boundary, then to the requested stride.chunk_card_start = (jbyte*)(start_card +(n_strides - start_chunk_stride_num + stride) *ParGCCardsPerStrideChunk);}while (chunk_card_start < end_card) {// Even though we go from lower to higher addresses below, the// strided parallelism can interleave the actual processing of the// dirty pages in various ways. For a specific chunk within this// stride, we take care to avoid double scanning or missing a card// by suitably initializing the "min_done" field in process_chunk_boundaries()// below, together with the dirty region extension accomplished in// DirtyCardToOopClosure::do_MemRegion().jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;// Invariant: chunk_mr should be fully contained within the "used" region.MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),chunk_card_end >= end_card ?used.end() : addr_for(chunk_card_end));assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");assert(used.contains(chunk_mr), "chunk_mr should be subset of used");DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),cl->gen_boundary());ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);// Process the chunk.process_chunk_boundaries(sp,dcto_cl,chunk_mr,used,lowest_non_clean,lowest_non_clean_base_chunk_index,lowest_non_clean_chunk_size);// We want the LNC array updates above in process_chunk_boundaries// to be visible before any of the card table value changes as a// result of the dirty card iteration below.OrderAccess::storestore();// We do not call the non_clean_card_iterate_serial() version because// we want to clear the cards: clear_cl here does the work of finding// contiguous dirty ranges of cards to process and clear.clear_cl.do_MemRegion(chunk_mr);// Find the next chunk of the stride.chunk_card_start += ParGCCardsPerStrideChunk * n_strides;} }// If you want a talkative process_chunk_boundaries, // then #define NOISY(x) x #ifdef NOISY #error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow" #else #define NOISY(x) #endifvoid CardTableModRefBS:: process_chunk_boundaries(Space* sp,DirtyCardToOopClosure* dcto_cl,MemRegion chunk_mr,MemRegion used,jbyte** lowest_non_clean,uintptr_t lowest_non_clean_base_chunk_index,size_t lowest_non_clean_chunk_size) {// We must worry about non-array objects that cross chunk boundaries,// because such objects are both precisely and imprecisely marked:// .. if the head of such an object is dirty, the entire object// needs to be scanned, under the interpretation that this// was an imprecise mark// .. if the head of such an object is not dirty, we can assume// precise marking and it's efficient to scan just the dirty// cards.// In either case, each scanned reference must be scanned precisely// once so as to avoid cloning of a young referent. For efficiency,// our closures depend on this property and do not protect against// double scans.uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;NOISY(tty->print_cr("===========================================================================");)NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")",chunk_mr.start(), chunk_mr.end());)// First, set "our" lowest_non_clean entry, which would be// used by the thread scanning an adjoining left chunk with// a non-array object straddling the mutual boundary.// Find the object that spans our boundary, if one exists.// first_block is the block possibly straddling our left boundary.HeapWord* first_block = sp->block_start(chunk_mr.start());assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),"First chunk should always have a co-initial block");// Does the block straddle the chunk's left boundary, and is it// a non-array object?if (first_block < chunk_mr.start() // first block straddles left bdry&& sp->block_is_obj(first_block) // first block is an object&& !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)|| oop(first_block)->is_typeArray())) {// Find our least non-clean card, so that a left neighbour// does not scan an object straddling the mutual boundary// too far to the right, and attempt to scan a portion of// that object twice.jbyte* first_dirty_card = NULL;jbyte* last_card_of_first_obj =byte_for(first_block + sp->block_size(first_block) - 1);jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());jbyte* last_card_to_check =(jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,(intptr_t) last_card_of_first_obj);// Note that this does not need to go beyond our last card// if our first object completely straddles this chunk.for (jbyte* cur = first_card_of_cur_chunk;cur <= last_card_to_check; cur++) {jbyte val = *cur;if (card_will_be_scanned(val)) {first_dirty_card = cur; break;} else {assert(!card_may_have_been_dirty(val), "Error");}}if (first_dirty_card != NULL) {NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk",first_dirty_card);)assert(0 <= cur_chunk_index && cur_chunk_index < lowest_non_clean_chunk_size,"Bounds error.");assert(lowest_non_clean[cur_chunk_index] == NULL,"Write exactly once : value should be stable hereafter for this round");lowest_non_clean[cur_chunk_index] = first_dirty_card;} NOISY(else {tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");// In the future, we could have this thread look for a non-NULL value to copy from its// right neighbour (up to the end of the first object).if (last_card_of_cur_chunk < last_card_of_first_obj) {tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"" might be efficient to get value from right neighbour?");}})} else {// In this case we can help our neighbour by just asking them// to stop at our first card (even though it may not be dirty).NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;}NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT" which corresponds to the heap address " PTR_FORMAT,cur_chunk_index, lowest_non_clean[cur_chunk_index],(lowest_non_clean[cur_chunk_index] != NULL)? addr_for(lowest_non_clean[cur_chunk_index]): NULL);)NOISY(tty->print_cr("---------------------------------------------------------------------------");)// Next, set our own max_to_do, which will strictly/exclusively bound// the highest address that we will scan past the right end of our chunk.HeapWord* max_to_do = NULL;if (chunk_mr.end() < used.end()) {// This is not the last chunk in the used region.// What is our last block? We check the first block of// the next (right) chunk rather than strictly check our last block// because it's potentially more efficient to do so.HeapWord* const last_block = sp->block_start(chunk_mr.end());assert(last_block <= chunk_mr.end(), "In case this property changes.");if ((last_block == chunk_mr.end()) // our last block does not straddle boundary|| !sp->block_is_obj(last_block) // last_block isn't an object|| oop(last_block)->is_objArray() // last_block is an array (precisely marked)|| oop(last_block)->is_typeArray()) {max_to_do = chunk_mr.end();NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n"" max_to_do left at " PTR_FORMAT, max_to_do);)} else {assert(last_block < chunk_mr.end(), "Tautology");// It is a non-array object that straddles the right boundary of this chunk.// last_obj_card is the card corresponding to the start of the last object// in the chunk. Note that the last object may not start in// the chunk.jbyte* const last_obj_card = byte_for(last_block);const jbyte val = *last_obj_card;if (!card_will_be_scanned(val)) {assert(!card_may_have_been_dirty(val), "Error");// The card containing the head is not dirty. Any marks on// subsequent cards still in this chunk must have been made// precisely; we can cap processing at the end of our chunk.max_to_do = chunk_mr.end();NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n"" max_to_do left at " PTR_FORMAT,max_to_do);)} else {// The last object must be considered dirty, and extends onto the// following chunk. Look for a dirty card in that chunk that will// bound our processing.jbyte* limit_card = NULL;const size_t last_block_size = sp->block_size(last_block);jbyte* const last_card_of_last_obj =byte_for(last_block + last_block_size - 1);jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());// This search potentially goes a long distance looking// for the next card that will be scanned, terminating// at the end of the last_block, if no earlier dirty card// is found.assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,"last card of next chunk may be wrong");for (jbyte* cur = first_card_of_next_chunk;cur <= last_card_of_last_obj; cur++) {const jbyte val = *cur;if (card_will_be_scanned(val)) {NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x",cur, (int)val);)limit_card = cur; break;} else {assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");}}if (limit_card != NULL) {max_to_do = addr_for(limit_card);assert(limit_card != NULL && max_to_do != NULL, "Error");NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT" max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: "PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));)} else {// The following is a pessimistic value, because it's possible// that a dirty card on a subsequent chunk has been cleared by// the time we get to look at it; we'll correct for that further below,// using the LNC array which records the least non-clean card// before cards were cleared in a particular chunk.limit_card = last_card_of_last_obj;max_to_do = last_block + last_block_size;assert(limit_card != NULL && max_to_do != NULL, "Error");NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n"" Setting limit_card to " PTR_FORMAT" and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,limit_card, last_block, last_block_size, max_to_do);)}assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,"Bounds error.");// It is possible that a dirty card for the last object may have been// cleared before we had a chance to examine it. In that case, the value// will have been logged in the LNC for that chunk.// We need to examine as many chunks to the right as this object// covers.const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)- lowest_non_clean_base_chunk_index;DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())- lowest_non_clean_base_chunk_index;)assert(last_chunk_index_to_check <= last_chunk_index,err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT" exceeds last_chunk_index " INTPTR_FORMAT,last_chunk_index_to_check, last_chunk_index));for (uintptr_t lnc_index = cur_chunk_index + 1;lnc_index <= last_chunk_index_to_check;lnc_index++) {jbyte* lnc_card = lowest_non_clean[lnc_index];if (lnc_card != NULL) {// we can stop at the first non-NULL entry we findif (lnc_card <= limit_card) {NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT," max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT,lnc_card, limit_card, addr_for(lnc_card), max_to_do);)limit_card = lnc_card;max_to_do = addr_for(limit_card);assert(limit_card != NULL && max_to_do != NULL, "Error");}// In any case, we break nowbreak;} // else continue to look for a non-NULL entry if any}assert(limit_card != NULL && max_to_do != NULL, "Error");}assert(max_to_do != NULL, "OOPS 1 !");}assert(max_to_do != NULL, "OOPS 2!");} else {max_to_do = used.end();NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n"" max_to_do left at " PTR_FORMAT,max_to_do);)}assert(max_to_do != NULL, "OOPS 3!");// Now we can set the closure we're using so it doesn't to beyond// max_to_do.dcto_cl->set_min_done(max_to_do); #ifndef PRODUCTdcto_cl->set_last_bottom(max_to_do); #endifNOISY(tty->print_cr("===========================================================================\n");) }#undef NOISYvoid CardTableModRefBS:: get_LNC_array_for_space(Space* sp,jbyte**& lowest_non_clean,uintptr_t& lowest_non_clean_base_chunk_index,size_t& lowest_non_clean_chunk_size) {int i = find_covering_region_containing(sp->bottom());MemRegion covered = _covered[i];size_t n_chunks = chunks_to_cover(covered);// Only the first thread to obtain the lock will resize the// LNC array for the covered region. Any later expansion can't affect// the used_at_save_marks region.// (I observed a bug in which the first thread to execute this would// resize, and then it would cause "expand_and_allocate" that would// increase the number of chunks in the covered region. Then a second// thread would come and execute this, see that the size didn't match,// and free and allocate again. So the first thread would be using a// freed "_lowest_non_clean" array.)// Do a dirty read here. If we pass the conditional then take the rare// event lock and do the read again in case some other thread had already// succeeded and done the resize.int cur_collection = Universe::heap()->total_collections();if (_last_LNC_resizing_collection[i] != cur_collection) {MutexLocker x(ParGCRareEvent_lock);if (_last_LNC_resizing_collection[i] != cur_collection) {if (_lowest_non_clean[i] == NULL ||n_chunks != _lowest_non_clean_chunk_size[i]) {// Should we delete the old?if (_lowest_non_clean[i] != NULL) {assert(n_chunks != _lowest_non_clean_chunk_size[i],"logical consequence");FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);_lowest_non_clean[i] = NULL;}// Now allocate a new one if necessary.if (_lowest_non_clean[i] == NULL) {_lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);_lowest_non_clean_chunk_size[i] = n_chunks;_lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());for (int j = 0; j < (int)n_chunks; j++)_lowest_non_clean[i][j] = NULL;}}_last_LNC_resizing_collection[i] = cur_collection;}}// In any case, now do the initialization.lowest_non_clean = _lowest_non_clean[i];lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; }parGCAllocBuffer.cpp
Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.** This code is free software; you can redistribute it and/or modify it* under the terms of the GNU General Public License version 2 only, as* published by the Free Software Foundation.** This code is distributed in the hope that it will be useful, but WITHOUT* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License* version 2 for more details (a copy is included in the LICENSE file that* accompanied this code).** You should have received a copy of the GNU General Public License version* 2 along with this work; if not, write to the Free Software Foundation,* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.** Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA* or visit www.oracle.com if you need additional information or have any* questions.**/#include "precompiled.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp" #include "memory/sharedHeap.hpp" #include "oops/arrayOop.hpp" #include "oops/oop.inline.hpp"ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :_word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),_end(NULL), _hard_end(NULL),_retained(false), _retained_filler(),_allocated(0), _wasted(0) {assert (min_size() > AlignmentReserve, "Inconsistency!");// arrayOopDesc::header_size depends on command line initialization.FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0; }size_t ParGCAllocBuffer::FillerHeaderSize;// If the minimum object size is greater than MinObjAlignment, we can // end up with a shard at the end of the buffer that's smaller than // the smallest object. We can't allow that because the buffer must // look like it's full of objects when we retire it, so we make // sure we have enough space for a filler int array object. size_t ParGCAllocBuffer::AlignmentReserve;void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {assert(!retain || end_of_gc, "Can only retain at GC end.");if (_retained) {// If the buffer had been retained shorten the previous filler object.assert(_retained_filler.end() <= _top, "INVARIANT");CollectedHeap::fill_with_object(_retained_filler);// Wasted space book-keeping, otherwise (normally) done in invalidate()_wasted += _retained_filler.word_size();_retained = false;}assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");if (_top < _hard_end) {CollectedHeap::fill_with_object(_top, _hard_end);if (!retain) {invalidate();} else {// Is there wasted space we'd like to retain for the next GC?if (pointer_delta(_end, _top) > FillerHeaderSize) {_retained = true;_retained_filler = MemRegion(_top, FillerHeaderSize);_top = _top + FillerHeaderSize;} else {invalidate();}}} }void ParGCAllocBuffer::flush_stats(PLABStats* stats) {assert(ResizePLAB, "Wasted work");stats->add_allocated(_allocated);stats->add_wasted(_wasted);stats->add_unused(pointer_delta(_end, _top)); }// Compute desired plab size and latch result for later // use. This should be called once at the end of parallel // scavenge; it clears the sensor accumulators. void PLABStats::adjust_desired_plab_sz() {assert(ResizePLAB, "Not set");if (_allocated == 0) {assert(_unused == 0, "Inconsistency in PLAB stats");_allocated = 1;}double wasted_frac = (double)_unused/(double)_allocated;size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/TargetPLABWastePct);if (target_refills == 0) {target_refills = 1;}_used = _allocated - _wasted - _unused;size_t plab_sz = _used/(target_refills*ParallelGCThreads);if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);// Take historical weighted average_filter.sample(plab_sz);// Clip from above and below, and align to object boundaryplab_sz = MAX2(min_size(), (size_t)_filter.average());plab_sz = MIN2(max_size(), plab_sz);plab_sz = align_object_size(plab_sz);// Latch the resultif (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);if (ResizePLAB) {_desired_plab_sz = plab_sz;}// Now clear the accumulators for next round:// note this needs to be fixed in the case where we// are retaining across scavenges. FIX ME !!! XXX_allocated = 0;_wasted = 0;_unused = 0; }#ifndef PRODUCT void ParGCAllocBuffer::print() {gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p""_retained: %c _retained_filler: [%p,%p)\n",_bottom, _top, _end, _hard_end,"FT"[_retained], _retained_filler.start(), _retained_filler.end()); } #endif // !PRODUCTconst size_t ParGCAllocBufferWithBOT::ChunkSizeInWords = MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),((size_t)Generation::GenGrain)/HeapWordSize); const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes = MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,(size_t)Generation::GenGrain);ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,BlockOffsetSharedArray* bsa) :ParGCAllocBuffer(word_sz),_bsa(bsa),_bt(bsa, MemRegion(_bottom, _hard_end)),_true_end(_hard_end) {}// The buffer comes with its own BOT, with a shared (obviously) underlying // BlockOffsetSharedArray. We manipulate this BOT in the normal way // as we would for any contiguous space. However, on accasion we // need to do some buffer surgery at the extremities before we // start using the body of the buffer for allocations. Such surgery // (as explained elsewhere) is to prevent allocation on a card that // is in the process of being walked concurrently by another GC thread. // When such surgery happens at a point that is far removed (to the // right of the current allocation point, top), we use the "contig" // parameter below to directly manipulate the shared array without // modifying the _next_threshold state in the BOT. void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,bool contig) {CollectedHeap::fill_with_object(mr);if (contig) {_bt.alloc_block(mr.start(), mr.end());} else {_bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());} }HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {HeapWord* res = NULL;if (_true_end > _hard_end) {assert((HeapWord*)align_size_down(intptr_t(_hard_end),ChunkSizeInBytes) == _hard_end,"or else _true_end should be equal to _hard_end");assert(_retained, "or else _true_end should be equal to _hard_end");assert(_retained_filler.end() <= _top, "INVARIANT");CollectedHeap::fill_with_object(_retained_filler);if (_top < _hard_end) {fill_region_with_block(MemRegion(_top, _hard_end), true);}HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);_retained_filler = MemRegion(_hard_end, FillerHeaderSize);_bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());_top = _retained_filler.end();_hard_end = next_hard_end;_end = _hard_end - AlignmentReserve;res = ParGCAllocBuffer::allocate(word_sz);if (res != NULL) {_bt.alloc_block(res, word_sz);}}return res; }void ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {ParGCAllocBuffer::undo_allocation(obj, word_sz);// This may back us up beyond the previous threshold, so reset._bt.set_region(MemRegion(_top, _hard_end));_bt.initialize_threshold(); }void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {assert(!retain || end_of_gc, "Can only retain at GC end.");if (_retained) {// We're about to make the retained_filler into a block._bt.BlockOffsetArray::alloc_block(_retained_filler.start(),_retained_filler.end());}// Reset _hard_end to _true_end (and update _end)if (retain && _hard_end != NULL) {assert(_hard_end <= _true_end, "Invariant.");_hard_end = _true_end;_end = MAX2(_top, _hard_end - AlignmentReserve);assert(_end <= _hard_end, "Invariant.");}_true_end = _hard_end;HeapWord* pre_top = _top;ParGCAllocBuffer::retire(end_of_gc, retain);// Now any old _retained_filler is cut back to size, the free part is// filled with a filler object, and top is past the header of that// object.if (retain && _top < _end) {assert(end_of_gc && retain, "Or else retain should be false.");// If the lab does not start on a card boundary, we don't want to// allocate onto that card, since that might lead to concurrent// allocation and card scanning, which we don't support. So we fill// the first card with a garbage object.size_t first_card_index = _bsa->index_for(pre_top);HeapWord* first_card_start = _bsa->address_for_index(first_card_index);if (first_card_start < pre_top) {HeapWord* second_card_start =_bsa->inc_by_region_size(first_card_start);// Ensure enough room to fill with the smallest blocksecond_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);// If the end is already in the first card, don't go beyond it!// Or if the remainder is too small for a filler object, gobble it up.if (_hard_end < second_card_start ||pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {second_card_start = _hard_end;}if (pre_top < second_card_start) {MemRegion first_card_suffix(pre_top, second_card_start);fill_region_with_block(first_card_suffix, true);}pre_top = second_card_start;_top = pre_top;_end = MAX2(_top, _hard_end - AlignmentReserve);}// If the lab does not end on a card boundary, we don't want to// allocate onto that card, since that might lead to concurrent// allocation and card scanning, which we don't support. So we fill// the last card with a garbage object.size_t last_card_index = _bsa->index_for(_hard_end);HeapWord* last_card_start = _bsa->address_for_index(last_card_index);if (last_card_start < _hard_end) {// Ensure enough room to fill with the smallest blocklast_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);// If the top is already in the last card, don't go back beyond it!// Or if the remainder is too small for a filler object, gobble it up.if (_top > last_card_start ||pointer_delta(last_card_start, _top) < AlignmentReserve) {last_card_start = _top;}if (last_card_start < _hard_end) {MemRegion last_card_prefix(last_card_start, _hard_end);fill_region_with_block(last_card_prefix, false);}_hard_end = last_card_start;_end = MAX2(_top, _hard_end - AlignmentReserve);_true_end = _hard_end;assert(_end <= _hard_end, "Invariant.");}// At this point:// 1) we had a filler object from the original top to hard_end.// 2) We've filled in any partial cards at the front and back.if (pre_top < _hard_end) {// Now we can reset the _bt to do allocation in the given area.MemRegion new_filler(pre_top, _hard_end);fill_region_with_block(new_filler, false);_top = pre_top + ParGCAllocBuffer::FillerHeaderSize;// If there's no space left, don't retain.if (_top >= _end) {_retained = false;invalidate();return;}_retained_filler = MemRegion(pre_top, _top);_bt.set_region(MemRegion(_top, _hard_end));_bt.initialize_threshold();assert(_bt.threshold() > _top, "initialize_threshold failed!");// There may be other reasons for queries into the middle of the// filler object. When such queries are done in parallel with// allocation, bad things can happen, if the query involves object// iteration. So we ensure that such queries do not involve object// iteration, by putting another filler object on the boundaries of// such queries. One such is the object spanning a parallel card// chunk boundary.// "chunk_boundary" is the address of the first chunk boundary less// than "hard_end".HeapWord* chunk_boundary =(HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);assert(chunk_boundary < _hard_end, "Or else above did not work.");assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,"Consequence of last card handling above.");if (_top <= chunk_boundary) {assert(_true_end == _hard_end, "Invariant.");while (_top <= chunk_boundary) {assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,"Consequence of last card handling above.");_bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);CollectedHeap::fill_with_object(chunk_boundary, _hard_end);_hard_end = chunk_boundary;chunk_boundary -= ChunkSizeInWords;}_end = _hard_end - AlignmentReserve;assert(_top <= _end, "Invariant.");// Now reset the initial filler chunk so it doesn't overlap with// the one(s) inserted above.MemRegion new_filler(pre_top, _hard_end);fill_region_with_block(new_filler, false);}} else {_retained = false;invalidate();}} else {assert(!end_of_gc ||(!_retained && _true_end == _hard_end), "Checking.");}assert(_end <= _hard_end, "Invariant.");assert(_top < _end || _top == _hard_end, "Invariant"); }總結(jié)
以上是生活随笔為你收集整理的openjdk(HOTSPOT)垃圾回收源码解读的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: IOS 打包证书签名 shell脚本
- 下一篇: 魔兽世界怀旧服雷布里斯库比格特掉落什么?