1 Star 0 Fork 74

LeoFang / openjdk-1.8.0

forked from src-openEuler / openjdk-1.8.0 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
8205921-Optimizing-best-of-2-work-stealing-queue-selection.patch 27.33 KB
一键复制 编辑 原始数据 按行查看 历史
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
From 0939432f28371aa91ec3fdbb7b38838aa282ecfa Mon Sep 17 00:00:00 2001
From:
Date: Tue, 21 Jul 2020 04:04:12 +0800
Subject: [PATCH] 73
---
.../concurrentMarkSweepGeneration.cpp | 29 ++---
.../concurrentMarkSweepGeneration.hpp | 3 -
.../gc_implementation/g1/concurrentMark.cpp | 4 +-
.../gc_implementation/g1/concurrentMark.hpp | 6 +-
.../g1/g1ParScanThreadState.cpp | 2 +-
.../g1/g1ParScanThreadState.hpp | 2 -
.../g1/g1ParScanThreadState.inline.hpp | 2 +-
.../parNew/parNewGeneration.cpp | 2 -
.../parNew/parNewGeneration.hpp | 2 -
.../parallelScavenge/pcTasks.cpp | 8 +-
.../parallelScavenge/psCompactionManager.hpp | 12 +-
.../parallelScavenge/psPromotionManager.hpp | 4 +-
.../parallelScavenge/psTasks.cpp | 3 +-
.../shenandoah/shenandoahConcurrentMark.cpp | 3 +-
hotspot/src/share/vm/memory/padded.hpp | 6 +
hotspot/src/share/vm/utilities/taskqueue.cpp | 18 ---
hotspot/src/share/vm/utilities/taskqueue.hpp | 106 ++++++++++++++----
17 files changed, 120 insertions(+), 92 deletions(-)
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index 02a29c2b..53b75a4c 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -680,11 +680,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
warning("task_queues allocation failure.");
return;
}
- _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
- if (_hash_seed == NULL) {
- warning("_hash_seed array allocation failure");
- return;
- }
typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
for (i = 0; i < num_queues; i++) {
@@ -697,7 +692,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
}
for (i = 0; i < num_queues; i++) {
_task_queues->queue(i)->initialize();
- _hash_seed[i] = 17; // copied from ParNew
}
}
}
@@ -4391,7 +4385,6 @@ void CMSConcMarkingTask::do_work_steal(int i) {
oop obj_to_scan;
CMSBitMap* bm = &(_collector->_markBitMap);
CMSMarkStack* ovflw = &(_collector->_markStack);
- int* seed = _collector->hash_seed(i);
Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
while (true) {
cl.trim_queue(0);
@@ -4401,7 +4394,7 @@ void CMSConcMarkingTask::do_work_steal(int i) {
// overflow stack may already have been stolen from us.
// assert(work_q->size() > 0, "Work from overflow stack");
continue;
- } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+ } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
assert(obj_to_scan->is_oop(), "Should be an oop");
assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
obj_to_scan->oop_iterate(&cl);
@@ -5373,7 +5366,7 @@ class CMSParRemarkTask: public CMSParMarkTask {
Par_MarkRefsIntoAndScanClosure* cl);
// ... work stealing for the above
- void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
+ void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl);
};
class RemarkKlassClosure : public KlassClosure {
@@ -5539,7 +5532,7 @@ void CMSParRemarkTask::work(uint worker_id) {
// ---------- ... and drain overflow list.
_timer.reset();
_timer.start();
- do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
+ do_work_steal(worker_id, &par_mrias_cl);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
@@ -5696,8 +5689,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
// . see if we can share work_queues with ParNew? XXX
void
-CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
- int* seed) {
+CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl) {
OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;)
oop obj_to_scan;
@@ -5728,7 +5720,7 @@ CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
// Verify that we have no work before we resort to stealing
assert(work_q->size() == 0, "Have work, shouldn't steal");
// Try to steal from other queues that have work
- if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+ if (task_queues()->steal(i,/* reference */ obj_to_scan)) {
NOT_PRODUCT(num_steals++;)
assert(obj_to_scan->is_oop(), "Oops, not an oop!");
assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
@@ -6141,8 +6133,7 @@ public:
void do_work_steal(int i,
CMSParDrainMarkingStackClosure* drain,
- CMSParKeepAliveClosure* keep_alive,
- int* seed);
+ CMSParKeepAliveClosure* keep_alive);
virtual void work(uint worker_id);
};
@@ -6160,8 +6151,7 @@ void CMSRefProcTaskProxy::work(uint worker_id) {
CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
_task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
if (_task.marks_oops_alive()) {
- do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
- _collector->hash_seed(worker_id));
+ do_work_steal(worker_id, &par_drain_stack, &par_keep_alive);
}
assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
@@ -6196,8 +6186,7 @@ CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
// . see if we can share work_queues with ParNew? XXX
void CMSRefProcTaskProxy::do_work_steal(int i,
CMSParDrainMarkingStackClosure* drain,
- CMSParKeepAliveClosure* keep_alive,
- int* seed) {
+ CMSParKeepAliveClosure* keep_alive) {
OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;)
oop obj_to_scan;
@@ -6226,7 +6215,7 @@ void CMSRefProcTaskProxy::do_work_steal(int i,
// Verify that we have no work before we resort to stealing
assert(work_q->size() == 0, "Have work, shouldn't steal");
// Try to steal from other queues that have work
- if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+ if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
NOT_PRODUCT(num_steals++;)
assert(obj_to_scan->is_oop(), "Oops, not an oop!");
assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
index 8b65d342..ca3fee21 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
@@ -563,8 +563,6 @@ class CMSCollector: public CHeapObj<mtGC> {
Stack<oop, mtGC> _preserved_oop_stack;
Stack<markOop, mtGC> _preserved_mark_stack;
- int* _hash_seed;
-
// In support of multi-threaded concurrent phases
YieldingFlexibleWorkGang* _conc_workers;
@@ -741,7 +739,6 @@ class CMSCollector: public CHeapObj<mtGC> {
bool stop_world_and_do(CMS_op_type op);
OopTaskQueueSet* task_queues() { return _task_queues; }
- int* hash_seed(int i) { return &_hash_seed[i]; }
YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
// Support for parallelizing Eden rescan in CMS remark phase
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index 28dd5aad..271b33a5 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -4421,7 +4421,7 @@ void CMTask::do_marking_step(double time_target_ms,
oop obj;
statsOnly( ++_steal_attempts );
- if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
+ if (_cm->try_stealing(_worker_id, obj)) {
if (_cm->verbose_medium()) {
gclog_or_tty->print_cr("[%u] stolen " PTR_FORMAT " successfully",
_worker_id, p2i((void*) obj));
@@ -4612,7 +4614,7 @@ CMTask::CMTask(uint worker_id,
_worker_id(worker_id), _cm(cm),
_objArray_processor(this),
_claimed(false),
- _nextMarkBitMap(NULL), _hash_seed(17),
+ _nextMarkBitMap(NULL),
_task_queue(task_queue),
_task_queues(task_queues),
_cm_oop_closure(NULL),
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
index 02a0cb18..1d785c19 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
@@ -679,8 +679,8 @@ public:
}
// Attempts to steal an object from the task queues of other tasks
- bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
- return _task_queues->steal(worker_id, hash_seed, obj);
+ bool try_stealing(uint worker_id, oop& obj) {
+ return _task_queues->steal(worker_id, obj);
}
ConcurrentMark(G1CollectedHeap* g1h,
@@ -1004,8 +1004,6 @@ private:
// it was decreased).
size_t _real_refs_reached_limit;
- // used by the work stealing stuff
- int _hash_seed;
// if this is true, then the task has aborted for some reason
bool _has_aborted;
// set when the task aborts because it has met its time quota
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
index e765620b..394f20e8 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
@@ -36,7 +36,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_dcq(&g1h->dirty_card_queue_set()),
_ct_bs(g1h->g1_barrier_set()),
_g1_rem(g1h->g1_rem_set()),
- _hash_seed(17), _queue_num(queue_num),
+ _queue_num(queue_num),
_term_attempts(0),
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
_age_table(false), _scanner(g1h, rp),
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
index d9403aa4..990b71d3 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp
@@ -59,7 +59,6 @@ class G1ParScanThreadState : public StackObj {
OopsInHeapRegionClosure* _evac_failure_cl;
- int _hash_seed;
uint _queue_num;
size_t _term_attempts;
@@ -129,7 +128,6 @@ class G1ParScanThreadState : public StackObj {
OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
- int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; }
size_t term_attempts() const { return _term_attempts; }
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
index 1b03f8ca..7dedb151 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
@@ -131,7 +131,7 @@ inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
StarTask stolen_task;
- while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
+ while (task_queues->steal(queue_num(), stolen_task)) {
assert(verify_task(stolen_task), "sanity");
dispatch_reference(stolen_task);
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
index 84cd4ed7..c07e9b81 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
@@ -92,7 +92,6 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
_survivor_chunk_array =
(ChunkArray*) old_gen()->get_data_recorder(thread_num());
- _hash_seed = 17; // Might want to take time-based random value.
_start = os::elapsedTime();
_old_gen_closure.set_generation(old_gen_);
_old_gen_root_closure.set_generation(old_gen_);
@@ -560,7 +559,6 @@ void ParEvacuateFollowersClosure::do_void() {
// attempt to steal work from promoted.
if (task_queues()->steal(par_scan_state()->thread_num(),
- par_scan_state()->hash_seed(),
obj_to_scan)) {
bool res = work_q->push(obj_to_scan);
assert(res, "Empty queue should have room for a push.");
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
index fa4265a2..ea527fdb 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
@@ -95,7 +95,6 @@ class ParScanThreadState {
HeapWord *_young_old_boundary;
- int _hash_seed;
int _thread_num;
ageTable _ageTable;
@@ -161,7 +160,6 @@ class ParScanThreadState {
// Is new_obj a candidate for scan_partial_array_and_push_remainder method.
inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
- int* hash_seed() { return &_hash_seed; }
int thread_num() { return _thread_num; }
// Allocate a to-space block of size "sz", or else return NULL.
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
index 35ea2992..37610f3d 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
@@ -217,14 +217,13 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
oop obj = NULL;
ObjArrayTask task;
- int random_seed = 17;
do {
- while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
+ while (ParCompactionManager::steal_objarray(which, task)) {
ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass();
k->oop_follow_contents(cm, task.obj(), task.index());
cm->follow_marking_stacks();
}
- while (ParCompactionManager::steal(which, &random_seed, obj)) {
+ while (ParCompactionManager::steal(which, obj)) {
obj->follow_contents(cm);
cm->follow_marking_stacks();
}
@@ -280,13 +279,12 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
cm->drain_region_stacks();
size_t region_index = 0;
- int random_seed = 17;
// If we're the termination task, try 10 rounds of stealing before
// setting the termination flag
while(true) {
- if (ParCompactionManager::steal(which, &random_seed, region_index)) {
+ if (ParCompactionManager::steal(which, region_index)) {
PSParallelCompact::fill_and_update_region(cm, region_index);
cm->drain_region_stacks();
} else {
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
index 7d7a9f49..a16a1676 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
@@ -177,16 +177,16 @@ private:
// Access function for compaction managers
static ParCompactionManager* gc_thread_compaction_manager(int index);
- static bool steal(int queue_num, int* seed, oop& t) {
- return stack_array()->steal(queue_num, seed, t);
+ static bool steal(int queue_num, oop& t) {
+ return stack_array()->steal(queue_num, t);
}
- static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
- return _objarray_queues->steal(queue_num, seed, t);
+ static bool steal_objarray(int queue_num, ObjArrayTask& t) {
+ return _objarray_queues->steal(queue_num, t);
}
- static bool steal(int queue_num, int* seed, size_t& region) {
- return region_array()->steal(queue_num, seed, region);
+ static bool steal(int queue_num, size_t& region) {
+ return region_array()->steal(queue_num, region);
}
// Process tasks remaining on any marking stack
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
index 6eef954b..542d8694 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
@@ -164,8 +164,8 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
static PSPromotionManager* gc_thread_promotion_manager(int index);
static PSPromotionManager* vm_thread_promotion_manager();
- static bool steal_depth(int queue_num, int* seed, StarTask& t) {
- return stack_array_depth()->steal(queue_num, seed, t);
+ static bool steal_depth(int queue_num, StarTask& t) {
+ return stack_array_depth()->steal(queue_num, t);
}
PSPromotionManager();
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
index f829e934..4fe869fd 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
@@ -151,10 +151,9 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
guarantee(pm->stacks_empty(),
"stacks should be empty at this point");
- int random_seed = 17;
while(true) {
StarTask p;
- if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
+ if (PSPromotionManager::steal_depth(which, p)) {
TASKQUEUE_STATS_ONLY(pm->record_steal(p));
pm->process_popped_location_depth(p);
pm->drain_stacks_depth(true);
diff --git a/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp
index 85bbea6c..afcb0dd4 100644
--- a/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp
@@ -939,7 +939,6 @@ void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminato
template <class T, bool CANCELLABLE>
void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
- int seed = 17;
uintx stride = ShenandoahMarkLoopStride;
ShenandoahHeap* heap = ShenandoahHeap::heap();
@@ -999,7 +998,7 @@ void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_da
uint work = 0;
for (uint i = 0; i < stride; i++) {
if (q->pop(t) ||
- queues->steal(worker_id, &seed, t)) {
+ queues->steal(worker_id, t)) {
do_task<T>(q, cl, live_data, &t);
work++;
} else {
diff --git a/hotspot/src/share/vm/memory/padded.hpp b/hotspot/src/share/vm/memory/padded.hpp
index 9ddd14f8..34eccd66 100644
--- a/hotspot/src/share/vm/memory/padded.hpp
+++ b/hotspot/src/share/vm/memory/padded.hpp
@@ -80,6 +80,12 @@ class PaddedEnd : public PaddedEndImpl<T, PADDED_END_SIZE(T, alignment)> {
// super class that is specialized for the pad_size == 0 case.
};
+// Similar to PaddedEnd, this macro defines a _pad_buf#id field
+// that is (alignment - size) bytes in size. This macro is used
+// to add padding in between non-class fields in a class or struct.
+#define DEFINE_PAD_MINUS_SIZE(id, alignment, size) \
+ char _pad_buf##id[(alignment) - (size)]
+
// Helper class to create an array of PaddedEnd<T> objects. All elements will
// start at a multiple of alignment and the size will be aligned to alignment.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
diff --git a/hotspot/src/share/vm/utilities/taskqueue.cpp b/hotspot/src/share/vm/utilities/taskqueue.cpp
index 0f4dcc90..37f4066a 100644
--- a/hotspot/src/share/vm/utilities/taskqueue.cpp
+++ b/hotspot/src/share/vm/utilities/taskqueue.cpp
@@ -112,24 +112,6 @@ void TaskQueueStats::verify() const
#endif // ASSERT
#endif // TASKQUEUE_STATS
-int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
- const int a = 16807;
- const int m = 2147483647;
- const int q = 127773; /* m div a */
- const int r = 2836; /* m mod a */
- assert(sizeof(int) == 4, "I think this relies on that");
- int seed = *seed0;
- int hi = seed / q;
- int lo = seed % q;
- int test = a * lo - r * hi;
- if (test > 0)
- seed = test;
- else
- seed = test + m;
- *seed0 = seed;
- return seed;
-}
-
ParallelTaskTerminator::
ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set) :
_n_threads(n_threads),
diff --git a/hotspot/src/share/vm/utilities/taskqueue.hpp b/hotspot/src/share/vm/utilities/taskqueue.hpp
index dec76c51..5b03ccfa 100644
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
+#include "memory/padded.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -307,12 +308,30 @@ public:
void oops_do(OopClosure* f);
private:
+ DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
// Element array.
volatile E* _elems;
+
+ DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(E*));
+ // Queue owner local variables. Not to be accessed by other threads.
+
+ static const uint InvalidQueueId = uint(-1);
+ uint _last_stolen_queue_id; // The id of the queue we last stole from
+
+ int _seed; // Current random seed used for selecting a random queue during stealing.
+
+ DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(uint) + sizeof(int));
+public:
+ int next_random_queue_id();
+
+ void set_last_stolen_queue_id(uint id) { _last_stolen_queue_id = id; }
+ uint last_stolen_queue_id() const { return _last_stolen_queue_id; }
+ bool is_last_stolen_queue_id_valid() const { return _last_stolen_queue_id != InvalidQueueId; }
+ void invalidate_last_stolen_queue_id() { _last_stolen_queue_id = InvalidQueueId; }
};
template<class E, MEMFLAGS F, unsigned int N>
-GenericTaskQueue<E, F, N>::GenericTaskQueue() {
+GenericTaskQueue<E, F, N>::GenericTaskQueue() : _last_stolen_queue_id(InvalidQueueId), _seed(17 /* random number */) {
assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
}
@@ -426,6 +445,30 @@ bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
return resAge == oldAge;
}
+inline int randomParkAndMiller(int *seed0) {
+ const int a = 16807;
+ const int m = 2147483647;
+ const int q = 127773; /* m div a */
+ const int r = 2836; /* m mod a */
+ STATIC_ASSERT(sizeof(int) == 4);
+ int seed = *seed0;
+ int hi = seed / q;
+ int lo = seed % q;
+ int test = a * lo - r * hi;
+ if (test > 0) {
+ seed = test;
+ } else {
+ seed = test + m;
+ }
+ *seed0 = seed;
+ return seed;
+}
+
+template<class E, MEMFLAGS F, unsigned int N>
+int GenericTaskQueue<E, F, N>::next_random_queue_id() {
+ return randomParkAndMiller(&_seed);
+}
+
template<class E, MEMFLAGS F, unsigned int N>
GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
FREE_C_HEAP_ARRAY(E, _elems, F);
@@ -495,8 +538,6 @@ bool OverflowTaskQueue<E, F, N>::try_push_to_taskqueue(E t) {
return taskqueue_t::push(t);
}
class TaskQueueSetSuper {
-protected:
- static int randomParkAndMiller(int* seed0);
public:
// Returns "true" if some TaskQueue in the set contains a task.
virtual bool peek() = 0;
@@ -517,27 +558,23 @@ private:
public:
typedef typename T::element_type E;
- GenericTaskQueueSet(int n) : _n(n) {
+ GenericTaskQueueSet(uint n) : _n(n) {
typedef T* GenericTaskQueuePtr;
_queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
- for (int i = 0; i < n; i++) {
+ for (uint i = 0; i < n; i++) {
_queues[i] = NULL;
}
}
- bool steal_best_of_2(uint queue_num, int* seed, E& t);
+ bool steal_best_of_2(uint queue_num, E& t);
void register_queue(uint i, T* q);
T* queue(uint n);
- // The thread with queue number "queue_num" (and whose random number seed is
- // at "seed") is trying to steal a task from some other queue. (It may try
- // several queues, according to some configuration parameter.) If some steal
- // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
- // false.
- bool steal(uint queue_num, int* seed, E& t);
-
+ // Try to steal a task from some other queue than queue_num. It may perform several attempts at doing so.
+ // Returns if stealing succeeds, and sets "t" to the stolen task.
+ bool steal(uint queue_num, E& t);
bool peek();
uint tasks() const;
size_t tasks();
@@ -557,9 +594,9 @@ GenericTaskQueueSet<T, F>::queue(uint i) {
}
template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
+GenericTaskQueueSet<T, F>::steal(uint queue_num, E& t) {
for (uint i = 0; i < 2 * _n; i++) {
- if (steal_best_of_2(queue_num, seed, t)) {
+ if (steal_best_of_2(queue_num, t)) {
TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
return true;
}
@@ -569,17 +606,46 @@ GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
}
template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
+GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, E& t) {
if (_n > 2) {
+ T* const local_queue = _queues[queue_num];
uint k1 = queue_num;
- while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
+
+ if (local_queue->is_last_stolen_queue_id_valid()) {
+ k1 = local_queue->last_stolen_queue_id();
+ assert(k1 != queue_num, "Should not be the same");
+ } else {
+ while (k1 == queue_num) {
+ k1 = local_queue->next_random_queue_id() % _n;
+ }
+ }
+
uint k2 = queue_num;
- while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
+ while (k2 == queue_num || k2 == k1) {
+ k2 = local_queue->next_random_queue_id() % _n;
+ }
// Sample both and try the larger.
uint sz1 = _queues[k1]->size();
uint sz2 = _queues[k2]->size();
- if (sz2 > sz1) return _queues[k2]->pop_global(t);
- else return _queues[k1]->pop_global(t);
+
+ uint sel_k = 0;
+ bool suc = false;
+
+ if (sz2 > sz1) {
+ sel_k = k2;
+ suc = _queues[k2]->pop_global(t);
+ } else if (sz1 > 0) {
+ sel_k = k1;
+ suc = _queues[k1]->pop_global(t);
+ }
+
+ if (suc) {
+ local_queue->set_last_stolen_queue_id(sel_k);
+ } else {
+ local_queue->invalidate_last_stolen_queue_id();
+ }
+
+ return suc;
} else if (_n == 2) {
// Just try the other one.
uint k = (queue_num + 1) % 2;
--
2.19.0
1
https://gitee.com/leofang94/openjdk-1.8.0.git
git@gitee.com:leofang94/openjdk-1.8.0.git
leofang94
openjdk-1.8.0
openjdk-1.8.0
master

搜索帮助