|
|
@ -2980,7 +2980,7 @@ static void roll_work(struct work *work) |
|
|
|
work->id = total_work++; |
|
|
|
work->id = total_work++; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static bool divide_work(struct work *work) |
|
|
|
static bool reuse_work(struct work *work) |
|
|
|
{ |
|
|
|
{ |
|
|
|
if (can_roll(work) && should_roll(work)) { |
|
|
|
if (can_roll(work) && should_roll(work)) { |
|
|
|
roll_work(work); |
|
|
|
roll_work(work); |
|
|
@ -3012,8 +3012,7 @@ retry: |
|
|
|
newreq = true; |
|
|
|
newreq = true; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (can_roll(work) && should_roll(work)) { |
|
|
|
if (reuse_work(work)) { |
|
|
|
roll_work(work); |
|
|
|
|
|
|
|
ret = true; |
|
|
|
ret = true; |
|
|
|
goto out; |
|
|
|
goto out; |
|
|
|
} |
|
|
|
} |
|
|
@ -3056,10 +3055,8 @@ retry: |
|
|
|
|
|
|
|
|
|
|
|
memcpy(work, work_heap, sizeof(*work)); |
|
|
|
memcpy(work, work_heap, sizeof(*work)); |
|
|
|
|
|
|
|
|
|
|
|
/* Copy the res nonce back so we know to start at a higher baseline
|
|
|
|
/* Hand out a clone if we can roll this work item */ |
|
|
|
* should we divide the same work up again. Make the work we're |
|
|
|
if (reuse_work(work_heap)) { |
|
|
|
* handing out be clone */ |
|
|
|
|
|
|
|
if (divide_work(work_heap)) { |
|
|
|
|
|
|
|
if (opt_debug) |
|
|
|
if (opt_debug) |
|
|
|
applog(LOG_DEBUG, "Pushing divided work to get queue head"); |
|
|
|
applog(LOG_DEBUG, "Pushing divided work to get queue head"); |
|
|
|
|
|
|
|
|
|
|
|