|
|
@ -8143,6 +8143,8 @@ begin_bench: |
|
|
|
int ts, max_staged = opt_queue; |
|
|
|
int ts, max_staged = opt_queue; |
|
|
|
struct pool *pool, *cp; |
|
|
|
struct pool *pool, *cp; |
|
|
|
bool lagging = false; |
|
|
|
bool lagging = false; |
|
|
|
|
|
|
|
struct timespec then; |
|
|
|
|
|
|
|
struct timeval now; |
|
|
|
struct work *work; |
|
|
|
struct work *work; |
|
|
|
|
|
|
|
|
|
|
|
if (opt_work_update) |
|
|
|
if (opt_work_update) |
|
|
@ -8155,6 +8157,10 @@ begin_bench: |
|
|
|
if (!pool_localgen(cp) && !staged_rollable) |
|
|
|
if (!pool_localgen(cp) && !staged_rollable) |
|
|
|
max_staged += mining_threads; |
|
|
|
max_staged += mining_threads; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cgtime(&now); |
|
|
|
|
|
|
|
then.tv_sec = now.tv_sec + 2; |
|
|
|
|
|
|
|
then.tv_nsec = now.tv_usec * 1000; |
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(stgd_lock); |
|
|
|
mutex_lock(stgd_lock); |
|
|
|
ts = __total_staged(); |
|
|
|
ts = __total_staged(); |
|
|
|
|
|
|
|
|
|
|
@ -8163,7 +8169,7 @@ begin_bench: |
|
|
|
|
|
|
|
|
|
|
|
/* Wait until hash_pop tells us we need to create more work */ |
|
|
|
/* Wait until hash_pop tells us we need to create more work */ |
|
|
|
if (ts > max_staged) { |
|
|
|
if (ts > max_staged) { |
|
|
|
pthread_cond_wait(&gws_cond, stgd_lock); |
|
|
|
pthread_cond_timedwait(&gws_cond, stgd_lock, &then); |
|
|
|
ts = __total_staged(); |
|
|
|
ts = __total_staged(); |
|
|
|
} |
|
|
|
} |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|
mutex_unlock(stgd_lock); |
|
|
|