|
|
@ -3957,14 +3957,23 @@ static bool queue_request(struct thr_info *thr, bool needed) |
|
|
|
|
|
|
|
|
|
|
|
static struct work *hash_pop(const struct timespec *abstime) |
|
|
|
static struct work *hash_pop(const struct timespec *abstime) |
|
|
|
{ |
|
|
|
{ |
|
|
|
struct work *work = NULL; |
|
|
|
struct work *work = NULL, *tmp; |
|
|
|
int rc = 0; |
|
|
|
int rc = 0, hc; |
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(stgd_lock); |
|
|
|
mutex_lock(stgd_lock); |
|
|
|
while (!getq->frozen && !HASH_COUNT(staged_work) && !rc) |
|
|
|
while (!getq->frozen && !HASH_COUNT(staged_work) && !rc) |
|
|
|
rc = pthread_cond_timedwait(&getq->cond, stgd_lock, abstime); |
|
|
|
rc = pthread_cond_timedwait(&getq->cond, stgd_lock, abstime); |
|
|
|
|
|
|
|
|
|
|
|
if (HASH_COUNT(staged_work)) { |
|
|
|
hc = HASH_COUNT(staged_work); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (likely(hc)) { |
|
|
|
|
|
|
|
/* Find clone work if possible, to allow masters to be reused */ |
|
|
|
|
|
|
|
if (hc > staged_rollable) { |
|
|
|
|
|
|
|
HASH_ITER(hh, staged_work, work, tmp) { |
|
|
|
|
|
|
|
if (!work_rollable(work)) |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} else |
|
|
|
work = staged_work; |
|
|
|
work = staged_work; |
|
|
|
HASH_DEL(staged_work, work); |
|
|
|
HASH_DEL(staged_work, work); |
|
|
|
work->pool->staged--; |
|
|
|
work->pool->staged--; |
|
|
@ -4110,7 +4119,6 @@ retry: |
|
|
|
pool_resus(pool); |
|
|
|
pool_resus(pool); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
work_heap = clone_work(work_heap); |
|
|
|
|
|
|
|
memcpy(work, work_heap, sizeof(struct work)); |
|
|
|
memcpy(work, work_heap, sizeof(struct work)); |
|
|
|
free_work(work_heap); |
|
|
|
free_work(work_heap); |
|
|
|
|
|
|
|
|
|
|
|