mirror of
https://github.com/GOSTSec/sgminer
synced 2025-01-09 14:28:12 +00:00
Remove unnecessary check for opt_debug on every invocation of applog at LOG_DEBUG and place the check in applog().
This commit is contained in:
parent
3dda18eb6b
commit
405a2120f8
57
adl.c
57
adl.c
@ -222,23 +222,22 @@ void init_adl(int nDevs)
|
||||
if (lpAdapterID == last_adapter)
|
||||
continue;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "GPU %d "
|
||||
"iAdapterIndex %d "
|
||||
"strUDID %s "
|
||||
"iBusNumber %d "
|
||||
"iDeviceNumber %d "
|
||||
"iFunctionNumber %d "
|
||||
"iVendorID %d "
|
||||
"strAdapterName %s ",
|
||||
devices,
|
||||
iAdapterIndex,
|
||||
lpInfo[i].strUDID,
|
||||
lpInfo[i].iBusNumber,
|
||||
lpInfo[i].iDeviceNumber,
|
||||
lpInfo[i].iFunctionNumber,
|
||||
lpInfo[i].iVendorID,
|
||||
lpInfo[i].strAdapterName);
|
||||
applog(LOG_DEBUG, "GPU %d "
|
||||
"iAdapterIndex %d "
|
||||
"strUDID %s "
|
||||
"iBusNumber %d "
|
||||
"iDeviceNumber %d "
|
||||
"iFunctionNumber %d "
|
||||
"iVendorID %d "
|
||||
"strAdapterName %s ",
|
||||
devices,
|
||||
iAdapterIndex,
|
||||
lpInfo[i].strUDID,
|
||||
lpInfo[i].iBusNumber,
|
||||
lpInfo[i].iDeviceNumber,
|
||||
lpInfo[i].iFunctionNumber,
|
||||
lpInfo[i].iVendorID,
|
||||
lpInfo[i].strAdapterName);
|
||||
|
||||
adapters[devices].iAdapterIndex = iAdapterIndex;
|
||||
adapters[devices].iBusNumber = lpInfo[i].iBusNumber;
|
||||
@ -908,8 +907,7 @@ int set_fanspeed(int gpu, int iFanSpeed)
|
||||
|
||||
ga = &gpus[gpu].adl;
|
||||
if (!(ga->lpFanSpeedInfo.iFlags & (ADL_DL_FANCTRL_SUPPORTS_RPM_WRITE | ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE ))) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "GPU %d doesn't support rpm or percent write", gpu);
|
||||
applog(LOG_DEBUG, "GPU %d doesn't support rpm or percent write", gpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -919,8 +917,7 @@ int set_fanspeed(int gpu, int iFanSpeed)
|
||||
|
||||
lock_adl();
|
||||
if (ADL_Overdrive5_FanSpeed_Get(ga->iAdapterIndex, 0, &ga->lpFanSpeedValue) != ADL_OK) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "GPU %d call to fanspeed get failed", gpu);
|
||||
applog(LOG_DEBUG, "GPU %d call to fanspeed get failed", gpu);
|
||||
}
|
||||
if (!(ga->lpFanSpeedInfo.iFlags & ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE)) {
|
||||
/* Must convert speed to an RPM */
|
||||
@ -977,8 +974,7 @@ static void fan_autotune(int gpu, int temp, int fanpercent, bool __maybe_unused
|
||||
applog(LOG_WARNING, "Overheat detected on GPU %d, increasing fan to 100%", gpu);
|
||||
newpercent = iMax;
|
||||
} else if (temp > ga->targettemp && fanpercent < top && temp >= ga->lasttemp) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Temperature over target, increasing fanspeed");
|
||||
applog(LOG_DEBUG, "Temperature over target, increasing fanspeed");
|
||||
if (temp > ga->targettemp + opt_hysteresis)
|
||||
newpercent = ga->targetfan + 10;
|
||||
else
|
||||
@ -986,19 +982,16 @@ static void fan_autotune(int gpu, int temp, int fanpercent, bool __maybe_unused
|
||||
if (newpercent > top)
|
||||
newpercent = top;
|
||||
} else if (fanpercent > bot && temp < ga->targettemp - opt_hysteresis && temp <= ga->lasttemp) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Temperature %d degrees below target, decreasing fanspeed", opt_hysteresis);
|
||||
applog(LOG_DEBUG, "Temperature %d degrees below target, decreasing fanspeed", opt_hysteresis);
|
||||
newpercent = ga->targetfan - 1;
|
||||
} else {
|
||||
/* We're in the optimal range, make minor adjustments if the
|
||||
* temp is still drifting */
|
||||
if (fanpercent > bot && temp < ga->lasttemp && ga->lasttemp < ga->targettemp) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Temperature dropping while in target range, decreasing fanspeed");
|
||||
applog(LOG_DEBUG, "Temperature dropping while in target range, decreasing fanspeed");
|
||||
newpercent = ga->targetfan - 1;
|
||||
} else if (fanpercent < top && temp > ga->lasttemp && temp > ga->targettemp - opt_hysteresis) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Temperature rising while in target range, increasing fanspeed");
|
||||
applog(LOG_DEBUG, "Temperature rising while in target range, increasing fanspeed");
|
||||
newpercent = ga->targetfan + 1;
|
||||
}
|
||||
}
|
||||
@ -1063,13 +1056,11 @@ void gpu_autotune(int gpu, bool *enable)
|
||||
applog(LOG_WARNING, "Overheat detected, decreasing GPU %d clock speed", gpu);
|
||||
newengine = ga->minspeed;
|
||||
} else if (temp > ga->targettemp + opt_hysteresis && engine > ga->minspeed && fan_optimal) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Temperature %d degrees over target, decreasing clock speed", opt_hysteresis);
|
||||
applog(LOG_DEBUG, "Temperature %d degrees over target, decreasing clock speed", opt_hysteresis);
|
||||
newengine = engine - ga->lpOdParameters.sEngineClock.iStep;
|
||||
/* Only try to tune engine speed up if this GPU is not disabled */
|
||||
} else if (temp < ga->targettemp && engine < ga->maxspeed && *enable) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Temperature below target, increasing clock speed");
|
||||
applog(LOG_DEBUG, "Temperature below target, increasing clock speed");
|
||||
newengine = engine + ga->lpOdParameters.sEngineClock.iStep;
|
||||
}
|
||||
|
||||
|
3
api.c
3
api.c
@ -1192,8 +1192,7 @@ static void send_result(SOCKETTYPE c, bool isjson)
|
||||
|
||||
len = strlen(io_buffer);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "DBG: send reply: (%d) '%.10s%s'", len+1, io_buffer, len > 10 ? "..." : "");
|
||||
applog(LOG_DEBUG, "DBG: send reply: (%d) '%.10s%s'", len+1, io_buffer, len > 10 ? "..." : "");
|
||||
|
||||
// ignore failure - it's closed immediately anyway
|
||||
n = send(c, io_buffer, len+1, 0);
|
||||
|
96
cgminer.c
96
cgminer.c
@ -1409,8 +1409,7 @@ static bool submit_upstream_work(const struct work *work)
|
||||
"{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}",
|
||||
hexstr);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
|
||||
applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
|
||||
|
||||
/* Force a fresh connection in case there are dead persistent
|
||||
* connections to this pool */
|
||||
@ -1451,8 +1450,7 @@ static bool submit_upstream_work(const struct work *work)
|
||||
pool->accepted++;
|
||||
cgpu->last_share_pool = pool->pool_no;
|
||||
cgpu->last_share_pool_time = time(NULL);
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
|
||||
applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
|
||||
if (!QUIET) {
|
||||
if (total_pools > 1)
|
||||
applog(LOG_NOTICE, "Accepted %s %s %d thread %d pool %d",
|
||||
@ -1470,8 +1468,7 @@ static bool submit_upstream_work(const struct work *work)
|
||||
cgpu->rejected++;
|
||||
total_rejected++;
|
||||
pool->rejected++;
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
|
||||
applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
|
||||
if (!QUIET) {
|
||||
char where[17];
|
||||
char reason[32];
|
||||
@ -1563,8 +1560,7 @@ static bool get_upstream_work(struct work *work, bool lagging)
|
||||
}
|
||||
|
||||
pool = select_pool(lagging);
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, rpc_req);
|
||||
applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, rpc_req);
|
||||
|
||||
retry:
|
||||
/* A single failure response here might be reported as a dead pool and
|
||||
@ -1670,40 +1666,34 @@ void kill_work(void)
|
||||
disable_curses();
|
||||
applog(LOG_INFO, "Received kill message");
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Killing off watchpool thread");
|
||||
applog(LOG_DEBUG, "Killing off watchpool thread");
|
||||
/* Kill the watchpool thread */
|
||||
thr = &thr_info[watchpool_thr_id];
|
||||
thr_info_cancel(thr);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Killing off watchdog thread");
|
||||
applog(LOG_DEBUG, "Killing off watchdog thread");
|
||||
/* Kill the watchdog thread */
|
||||
thr = &thr_info[watchdog_thr_id];
|
||||
thr_info_cancel(thr);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Killing off mining threads");
|
||||
applog(LOG_DEBUG, "Killing off mining threads");
|
||||
/* Stop the mining threads*/
|
||||
for (i = 0; i < mining_threads; i++) {
|
||||
thr = &thr_info[i];
|
||||
thr_info_cancel(thr);
|
||||
}
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Killing off stage thread");
|
||||
applog(LOG_DEBUG, "Killing off stage thread");
|
||||
/* Stop the others */
|
||||
thr = &thr_info[stage_thr_id];
|
||||
thr_info_cancel(thr);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Killing off longpoll thread");
|
||||
applog(LOG_DEBUG, "Killing off longpoll thread");
|
||||
thr = &thr_info[longpoll_thr_id];
|
||||
if (have_longpoll)
|
||||
thr_info_cancel(thr);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Killing off API thread");
|
||||
applog(LOG_DEBUG, "Killing off API thread");
|
||||
thr = &thr_info[api_thr_id];
|
||||
thr_info_cancel(thr);
|
||||
|
||||
@ -1751,8 +1741,7 @@ static void *get_work_thread(void *userdata)
|
||||
}
|
||||
fail_pause = opt_fail_pause;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing work to requesting thread");
|
||||
applog(LOG_DEBUG, "Pushing work to requesting thread");
|
||||
|
||||
/* send work to requesting thread */
|
||||
if (unlikely(!tq_push(thr_info[stage_thr_id].q, ret_work))) {
|
||||
@ -1945,9 +1934,8 @@ static void discard_work(struct work *work)
|
||||
if (work->pool)
|
||||
work->pool->discarded_work++;
|
||||
total_discarded++;
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Discarded work");
|
||||
} else if (opt_debug)
|
||||
applog(LOG_DEBUG, "Discarded work");
|
||||
} else
|
||||
applog(LOG_DEBUG, "Discarded cloned or rolled work");
|
||||
free_work(work);
|
||||
}
|
||||
@ -1996,8 +1984,7 @@ static int discard_stale(void)
|
||||
}
|
||||
mutex_unlock(stgd_lock);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
|
||||
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
|
||||
|
||||
/* Dec queued outside the loop to not have recursive locks */
|
||||
for (i = 0; i < stale; i++)
|
||||
@ -2146,8 +2133,7 @@ static void *stage_thread(void *userdata)
|
||||
while (ok) {
|
||||
struct work *work = NULL;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Popping work to stage thread");
|
||||
applog(LOG_DEBUG, "Popping work to stage thread");
|
||||
|
||||
work = tq_pop(mythr->q, NULL);
|
||||
if (unlikely(!work)) {
|
||||
@ -2159,8 +2145,7 @@ static void *stage_thread(void *userdata)
|
||||
|
||||
test_work_current(work, false);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing work to getwork queue");
|
||||
applog(LOG_DEBUG, "Pushing work to getwork queue");
|
||||
|
||||
if (unlikely(!hash_push(work))) {
|
||||
applog(LOG_WARNING, "Failed to hash_push in stage_thread");
|
||||
@ -2174,8 +2159,7 @@ static void *stage_thread(void *userdata)
|
||||
|
||||
static bool stage_work(struct work *work)
|
||||
{
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing work to stage thread");
|
||||
applog(LOG_DEBUG, "Pushing work to stage thread");
|
||||
|
||||
if (unlikely(!tq_push(thr_info[stage_thr_id].q, work))) {
|
||||
applog(LOG_ERR, "Could not tq_push work in stage_work");
|
||||
@ -2738,8 +2722,7 @@ static void *workio_thread(void *userdata)
|
||||
while (ok) {
|
||||
struct workio_cmd *wc;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Popping work to work thread");
|
||||
applog(LOG_DEBUG, "Popping work to work thread");
|
||||
|
||||
/* wait for workio_cmd sent to us, on our queue */
|
||||
wc = tq_pop(mythr->q, NULL);
|
||||
@ -2822,9 +2805,8 @@ static void hashmeter(int thr_id, struct timeval *diff,
|
||||
double thread_rolling = 0.0;
|
||||
int i;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
|
||||
thr_id, hashes_done, hashes_done / secs);
|
||||
applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
|
||||
thr_id, hashes_done, hashes_done / secs);
|
||||
|
||||
/* Rolling average for each thread and each device */
|
||||
decay_time(&thr->rolling, local_mhashes / secs);
|
||||
@ -2927,8 +2909,7 @@ static bool pool_active(struct pool *pool, bool pinging)
|
||||
pool->pool_no, pool->rpc_url);
|
||||
work->pool = pool;
|
||||
work->rolltime = rolltime;
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing pooltest work to base pool");
|
||||
applog(LOG_DEBUG, "Pushing pooltest work to base pool");
|
||||
|
||||
tq_push(thr_info[stage_thr_id].q, work);
|
||||
total_getworks++;
|
||||
@ -3014,8 +2995,7 @@ static bool queue_request(struct thr_info *thr, bool needed)
|
||||
if (rq && needed && !requests_staged() && !opt_fail_only)
|
||||
wc->lagging = true;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Queueing getwork request to work thread");
|
||||
applog(LOG_DEBUG, "Queueing getwork request to work thread");
|
||||
|
||||
/* send work request to workio thread */
|
||||
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
|
||||
@ -3074,8 +3054,7 @@ static void roll_work(struct work *work)
|
||||
local_work++;
|
||||
work->rolls++;
|
||||
work->blk.nonce = 0;
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Successfully rolled work");
|
||||
applog(LOG_DEBUG, "Successfully rolled work");
|
||||
|
||||
/* This is now a different work item so it needs a different ID for the
|
||||
* hashtable */
|
||||
@ -3130,8 +3109,7 @@ retry:
|
||||
gettimeofday(&now, NULL);
|
||||
abstime.tv_sec = now.tv_sec + 60;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Popping work from get queue to get work");
|
||||
applog(LOG_DEBUG, "Popping work from get queue to get work");
|
||||
|
||||
/* wait for 1st response, or get cached response */
|
||||
work_heap = hash_pop(&abstime);
|
||||
@ -3159,8 +3137,7 @@ retry:
|
||||
|
||||
/* Hand out a clone if we can roll this work item */
|
||||
if (reuse_work(work_heap)) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing divided work to get queue head");
|
||||
applog(LOG_DEBUG, "Pushing divided work to get queue head");
|
||||
|
||||
stage_work(work_heap);
|
||||
work->clone = true;
|
||||
@ -3206,8 +3183,7 @@ bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
|
||||
wc->thr = thr;
|
||||
memcpy(wc->u.work, work_in, sizeof(*work_in));
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing submit work to work thread");
|
||||
applog(LOG_DEBUG, "Pushing submit work to work thread");
|
||||
|
||||
/* send solution to workio thread */
|
||||
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
|
||||
@ -3295,8 +3271,7 @@ void *miner_thread(void *userdata)
|
||||
if (api->thread_init && !api->thread_init(mythr))
|
||||
goto out;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Popping ping in miner thread");
|
||||
applog(LOG_DEBUG, "Popping ping in miner thread");
|
||||
tq_pop(mythr->q, NULL); /* Wait for a ping to start */
|
||||
|
||||
sdiff.tv_sec = sdiff.tv_usec = 0;
|
||||
@ -3395,8 +3370,7 @@ void *miner_thread(void *userdata)
|
||||
if (unlikely(mythr->pause || !cgpu->enabled)) {
|
||||
applog(LOG_WARNING, "Thread %d being disabled", thr_id);
|
||||
mythr->rolling = mythr->cgpu->rolling = 0;
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Popping wakeup ping in miner thread");
|
||||
applog(LOG_DEBUG, "Popping wakeup ping in miner thread");
|
||||
thread_reportout(mythr);
|
||||
tq_pop(mythr->q, NULL); /* Ignore ping that's popped */
|
||||
thread_reportin(mythr);
|
||||
@ -3449,8 +3423,7 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
|
||||
memcpy(work_clone, work, sizeof(struct work));
|
||||
while (reuse_work(work)) {
|
||||
work_clone->clone = true;
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
|
||||
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
|
||||
if (unlikely(!stage_work(work_clone)))
|
||||
break;
|
||||
work_clone = make_work();
|
||||
@ -3458,12 +3431,11 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
|
||||
}
|
||||
free_work(work_clone);
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing converted work to stage thread");
|
||||
applog(LOG_DEBUG, "Pushing converted work to stage thread");
|
||||
|
||||
if (unlikely(!stage_work(work)))
|
||||
free_work(work);
|
||||
else if (opt_debug)
|
||||
else
|
||||
applog(LOG_DEBUG, "Converted longpoll data to work");
|
||||
}
|
||||
|
||||
@ -3602,8 +3574,7 @@ static void start_longpoll(void)
|
||||
tq_thaw(thr->q);
|
||||
if (unlikely(thr_info_create(thr, NULL, longpoll_thread, thr)))
|
||||
quit(1, "longpoll thread create failed");
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing ping to longpoll thread");
|
||||
applog(LOG_DEBUG, "Pushing ping to longpoll thread");
|
||||
tq_push(thr_info[longpoll_thr_id].q, &ping);
|
||||
}
|
||||
|
||||
@ -4471,8 +4442,7 @@ int main (int argc, char *argv[])
|
||||
/* Enable threads for devices set not to mine but disable
|
||||
* their queue in case we wish to enable them later */
|
||||
if (cgpu->enabled) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing ping to thread %d", thr->id);
|
||||
applog(LOG_DEBUG, "Pushing ping to thread %d", thr->id);
|
||||
|
||||
tq_push(thr->q, &ping);
|
||||
}
|
||||
|
@ -758,8 +758,7 @@ CPUSearch:
|
||||
|
||||
/* if nonce found, submit work */
|
||||
if (unlikely(rc)) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "CPU %d found something?", dev_from_id(thr_id));
|
||||
applog(LOG_DEBUG, "CPU %d found something?", dev_from_id(thr_id));
|
||||
if (unlikely(!submit_work_sync(thr, work))) {
|
||||
applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);
|
||||
}
|
||||
|
@ -553,8 +553,7 @@ retry:
|
||||
gpus[selected].enabled = false;
|
||||
goto retry;
|
||||
}
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Pushing ping to thread %d", thr->id);
|
||||
applog(LOG_DEBUG, "Pushing ping to thread %d", thr->id);
|
||||
|
||||
tq_push(thr->q, &ping);
|
||||
}
|
||||
@ -1215,13 +1214,11 @@ static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work,
|
||||
return 0;
|
||||
}
|
||||
if (unlikely(thrdata->last_work)) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "GPU %d found something in last work?", gpu->device_id);
|
||||
applog(LOG_DEBUG, "GPU %d found something in last work?", gpu->device_id);
|
||||
postcalc_hash_async(thr, thrdata->last_work, thrdata->res);
|
||||
thrdata->last_work = NULL;
|
||||
} else {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "GPU %d found something?", gpu->device_id);
|
||||
applog(LOG_DEBUG, "GPU %d found something?", gpu->device_id);
|
||||
postcalc_hash_async(thr, work, thrdata->res);
|
||||
}
|
||||
memset(thrdata->res, 0, BUFFERSIZE);
|
||||
|
@ -208,8 +208,7 @@ static void send_nonce(struct pc_data *pcd, cl_uint nonce)
|
||||
if (unlikely(submit_nonce(thr, work, nonce) == false))
|
||||
applog(LOG_ERR, "Failed to submit work, exiting");
|
||||
} else {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "No best_g found! Error in OpenCL code?");
|
||||
applog(LOG_DEBUG, "No best_g found! Error in OpenCL code?");
|
||||
hw_errors++;
|
||||
thr->cgpu->hw_errors++;
|
||||
}
|
||||
@ -233,8 +232,7 @@ static void *postcalc_hash(void *userdata)
|
||||
free(pcd);
|
||||
|
||||
if (unlikely(!nonces)) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "No nonces found! Error in OpenCL code?");
|
||||
applog(LOG_DEBUG, "No nonces found! Error in OpenCL code?");
|
||||
hw_errors++;
|
||||
thr->cgpu->hw_errors++;
|
||||
}
|
||||
|
57
ocl.c
57
ocl.c
@ -129,8 +129,7 @@ static int advance(char **area, unsigned *remaining, const char *marker)
|
||||
char *find = memmem(*area, *remaining, marker, strlen(marker));
|
||||
|
||||
if (!find) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Marker \"%s\" not found", marker);
|
||||
applog(LOG_DEBUG, "Marker \"%s\" not found", marker);
|
||||
return 0;
|
||||
}
|
||||
*remaining -= find - *area;
|
||||
@ -176,12 +175,10 @@ void patch_opcodes(char *w, unsigned remaining)
|
||||
opcode++;
|
||||
remaining -= 8;
|
||||
}
|
||||
if (opt_debug) {
|
||||
applog(LOG_DEBUG, "Potential OP3 instructions identified: "
|
||||
"%i BFE_INT, %i BFE_UINT, %i BYTE_ALIGN",
|
||||
count_bfe_int, count_bfe_uint, count_byte_align);
|
||||
applog(LOG_DEBUG, "Patched a total of %i BFI_INT instructions", patched);
|
||||
}
|
||||
applog(LOG_DEBUG, "Potential OP3 instructions identified: "
|
||||
"%i BFE_INT, %i BFE_UINT, %i BYTE_ALIGN",
|
||||
count_bfe_int, count_bfe_uint, count_byte_align);
|
||||
applog(LOG_DEBUG, "Patched a total of %i BFI_INT instructions", patched);
|
||||
}
|
||||
|
||||
_clState *initCl(unsigned int gpu, char *name, size_t nameSize)
|
||||
@ -321,16 +318,14 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
|
||||
applog(LOG_ERR, "Error: Failed to clGetDeviceInfo when trying to get CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT");
|
||||
return NULL;
|
||||
}
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Preferred vector width reported %d", clState->preferred_vwidth);
|
||||
applog(LOG_DEBUG, "Preferred vector width reported %d", clState->preferred_vwidth);
|
||||
|
||||
status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), (void *)&clState->max_work_size, NULL);
|
||||
if (status != CL_SUCCESS) {
|
||||
applog(LOG_ERR, "Error: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_WORK_GROUP_SIZE");
|
||||
return NULL;
|
||||
}
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Max work group size reported %d", clState->max_work_size);
|
||||
applog(LOG_DEBUG, "Max work group size reported %d", clState->max_work_size);
|
||||
|
||||
/* For some reason 2 vectors is still better even if the card says
|
||||
* otherwise, and many cards lie about their max so use 256 as max
|
||||
@ -426,14 +421,12 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
|
||||
|
||||
binaryfile = fopen(binaryfilename, "rb");
|
||||
if (!binaryfile) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "No binary found, generating from source");
|
||||
applog(LOG_DEBUG, "No binary found, generating from source");
|
||||
} else {
|
||||
struct stat binary_stat;
|
||||
|
||||
if (unlikely(stat(binaryfilename, &binary_stat))) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Unable to stat binary, generating from source");
|
||||
applog(LOG_DEBUG, "Unable to stat binary, generating from source");
|
||||
fclose(binaryfile);
|
||||
goto build;
|
||||
}
|
||||
@ -470,8 +463,7 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
|
||||
}
|
||||
|
||||
fclose(binaryfile);
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Loaded binary image %s", binaryfilename);
|
||||
applog(LOG_DEBUG, "Loaded binary image %s", binaryfilename);
|
||||
|
||||
goto built;
|
||||
}
|
||||
@ -498,15 +490,13 @@ build:
|
||||
|
||||
sprintf(CompilerOptions, "-D WORKSIZE=%d -D VECTORS%d",
|
||||
(int)clState->work_size, clState->preferred_vwidth);
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Setting worksize to %d", clState->work_size);
|
||||
if (clState->preferred_vwidth > 1 && opt_debug)
|
||||
applog(LOG_DEBUG, "Setting worksize to %d", clState->work_size);
|
||||
if (clState->preferred_vwidth > 1)
|
||||
applog(LOG_DEBUG, "Patched source to suit %d vectors", clState->preferred_vwidth);
|
||||
|
||||
if (clState->hasBitAlign) {
|
||||
strcat(CompilerOptions, " -D BITALIGN");
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "cl_amd_media_ops found, setting BITALIGN");
|
||||
applog(LOG_DEBUG, "cl_amd_media_ops found, setting BITALIGN");
|
||||
if (strstr(name, "Cedar") ||
|
||||
strstr(name, "Redwood") ||
|
||||
strstr(name, "Juniper") ||
|
||||
@ -522,18 +512,16 @@ build:
|
||||
strstr(name, "WinterPark" ) ||
|
||||
strstr(name, "BeaverCreek" ))
|
||||
patchbfi = true;
|
||||
} else if (opt_debug)
|
||||
} else
|
||||
applog(LOG_DEBUG, "cl_amd_media_ops not found, will not set BITALIGN");
|
||||
|
||||
if (patchbfi) {
|
||||
strcat(CompilerOptions, " -D BFI_INT");
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "BFI_INT patch requiring device found, patched source with BFI_INT");
|
||||
} else if (opt_debug)
|
||||
applog(LOG_DEBUG, "BFI_INT patch requiring device found, patched source with BFI_INT");
|
||||
} else
|
||||
applog(LOG_DEBUG, "BFI_INT patch requiring device not found, will not BFI_INT patch");
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "CompilerOptions: %s", CompilerOptions);
|
||||
applog(LOG_DEBUG, "CompilerOptions: %s", CompilerOptions);
|
||||
status = clBuildProgram(clState->program, 1, &devices[gpu], CompilerOptions , NULL, NULL);
|
||||
free(CompilerOptions);
|
||||
|
||||
@ -557,8 +545,7 @@ build:
|
||||
}
|
||||
|
||||
/* copy over all of the generated binaries. */
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "binary size %d : %d", gpu, binary_sizes[gpu]);
|
||||
applog(LOG_DEBUG, "binary size %d : %d", gpu, binary_sizes[gpu]);
|
||||
if (!binary_sizes[gpu]) {
|
||||
applog(LOG_ERR, "OpenCL compiler generated a zero sized binary, may need to reboot!");
|
||||
return NULL;
|
||||
@ -600,9 +587,8 @@ build:
|
||||
}
|
||||
w--; remaining++;
|
||||
w += start; remaining -= start;
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "At %p (%u rem. bytes), to begin patching",
|
||||
w, remaining);
|
||||
applog(LOG_DEBUG, "At %p (%u rem. bytes), to begin patching",
|
||||
w, remaining);
|
||||
patch_opcodes(w, length);
|
||||
|
||||
status = clReleaseProgram(clState->program);
|
||||
@ -633,8 +619,7 @@ build:
|
||||
binaryfile = fopen(binaryfilename, "wb");
|
||||
if (!binaryfile) {
|
||||
/* Not a fatal problem, just means we build it again next time */
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Unable to create file %s", binaryfilename);
|
||||
applog(LOG_DEBUG, "Unable to create file %s", binaryfilename);
|
||||
} else {
|
||||
if (unlikely(fwrite(binaries[gpu], 1, binary_sizes[gpu], binaryfile) != binary_sizes[gpu])) {
|
||||
applog(LOG_ERR, "Unable to fwrite to binaryfile");
|
||||
|
11
util.c
11
util.c
@ -68,6 +68,8 @@ struct tq_ent {
|
||||
void vapplog(int prio, const char *fmt, va_list ap)
|
||||
{
|
||||
extern bool use_curses;
|
||||
if (!opt_debug && prio == LOG_DEBUG)
|
||||
return;
|
||||
|
||||
#ifdef HAVE_SYSLOG_H
|
||||
if (use_syslog) {
|
||||
@ -219,11 +221,9 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
|
||||
|
||||
if (!strcasecmp("X-Roll-Ntime", key)) {
|
||||
if (!strncasecmp("N", val, 1)) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "X-Roll-Ntime: N found");
|
||||
applog(LOG_DEBUG, "X-Roll-Ntime: N found");
|
||||
} else {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "X-Roll-Ntime found");
|
||||
applog(LOG_DEBUG, "X-Roll-Ntime found");
|
||||
hi->has_rolltime = true;
|
||||
}
|
||||
}
|
||||
@ -417,8 +417,7 @@ json_t *json_rpc_call(CURL *curl, const char *url,
|
||||
}
|
||||
|
||||
if (!all_data.buf) {
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
|
||||
applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user