Browse Source

Remove unnecessary check for opt_debug on every invocation of applog at LOG_DEBUG and place the check in applog().

nfactor-troky
Con Kolivas 13 years ago
parent
commit
405a2120f8
  1. 9
      adl.c
  2. 1
      api.c
  3. 34
      cgminer.c
  4. 1
      device-cpu.c
  5. 3
      device-gpu.c
  6. 2
      findnonce.c
  7. 21
      ocl.c
  8. 5
      util.c

9
adl.c

@ -222,7 +222,6 @@ void init_adl(int nDevs) @@ -222,7 +222,6 @@ void init_adl(int nDevs)
if (lpAdapterID == last_adapter)
continue;
if (opt_debug)
applog(LOG_DEBUG, "GPU %d "
"iAdapterIndex %d "
"strUDID %s "
@ -908,7 +907,6 @@ int set_fanspeed(int gpu, int iFanSpeed) @@ -908,7 +907,6 @@ int set_fanspeed(int gpu, int iFanSpeed)
ga = &gpus[gpu].adl;
if (!(ga->lpFanSpeedInfo.iFlags & (ADL_DL_FANCTRL_SUPPORTS_RPM_WRITE | ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE ))) {
if (opt_debug)
applog(LOG_DEBUG, "GPU %d doesn't support rpm or percent write", gpu);
return ret;
}
@ -919,7 +917,6 @@ int set_fanspeed(int gpu, int iFanSpeed) @@ -919,7 +917,6 @@ int set_fanspeed(int gpu, int iFanSpeed)
lock_adl();
if (ADL_Overdrive5_FanSpeed_Get(ga->iAdapterIndex, 0, &ga->lpFanSpeedValue) != ADL_OK) {
if (opt_debug)
applog(LOG_DEBUG, "GPU %d call to fanspeed get failed", gpu);
}
if (!(ga->lpFanSpeedInfo.iFlags & ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE)) {
@ -977,7 +974,6 @@ static void fan_autotune(int gpu, int temp, int fanpercent, bool __maybe_unused @@ -977,7 +974,6 @@ static void fan_autotune(int gpu, int temp, int fanpercent, bool __maybe_unused
applog(LOG_WARNING, "Overheat detected on GPU %d, increasing fan to 100%", gpu);
newpercent = iMax;
} else if (temp > ga->targettemp && fanpercent < top && temp >= ga->lasttemp) {
if (opt_debug)
applog(LOG_DEBUG, "Temperature over target, increasing fanspeed");
if (temp > ga->targettemp + opt_hysteresis)
newpercent = ga->targetfan + 10;
@ -986,18 +982,15 @@ static void fan_autotune(int gpu, int temp, int fanpercent, bool __maybe_unused @@ -986,18 +982,15 @@ static void fan_autotune(int gpu, int temp, int fanpercent, bool __maybe_unused
if (newpercent > top)
newpercent = top;
} else if (fanpercent > bot && temp < ga->targettemp - opt_hysteresis && temp <= ga->lasttemp) {
if (opt_debug)
applog(LOG_DEBUG, "Temperature %d degrees below target, decreasing fanspeed", opt_hysteresis);
newpercent = ga->targetfan - 1;
} else {
/* We're in the optimal range, make minor adjustments if the
* temp is still drifting */
if (fanpercent > bot && temp < ga->lasttemp && ga->lasttemp < ga->targettemp) {
if (opt_debug)
applog(LOG_DEBUG, "Temperature dropping while in target range, decreasing fanspeed");
newpercent = ga->targetfan - 1;
} else if (fanpercent < top && temp > ga->lasttemp && temp > ga->targettemp - opt_hysteresis) {
if (opt_debug)
applog(LOG_DEBUG, "Temperature rising while in target range, increasing fanspeed");
newpercent = ga->targetfan + 1;
}
@ -1063,12 +1056,10 @@ void gpu_autotune(int gpu, bool *enable) @@ -1063,12 +1056,10 @@ void gpu_autotune(int gpu, bool *enable)
applog(LOG_WARNING, "Overheat detected, decreasing GPU %d clock speed", gpu);
newengine = ga->minspeed;
} else if (temp > ga->targettemp + opt_hysteresis && engine > ga->minspeed && fan_optimal) {
if (opt_debug)
applog(LOG_DEBUG, "Temperature %d degrees over target, decreasing clock speed", opt_hysteresis);
newengine = engine - ga->lpOdParameters.sEngineClock.iStep;
/* Only try to tune engine speed up if this GPU is not disabled */
} else if (temp < ga->targettemp && engine < ga->maxspeed && *enable) {
if (opt_debug)
applog(LOG_DEBUG, "Temperature below target, increasing clock speed");
newengine = engine + ga->lpOdParameters.sEngineClock.iStep;
}

1
api.c

@ -1192,7 +1192,6 @@ static void send_result(SOCKETTYPE c, bool isjson) @@ -1192,7 +1192,6 @@ static void send_result(SOCKETTYPE c, bool isjson)
len = strlen(io_buffer);
if (opt_debug)
applog(LOG_DEBUG, "DBG: send reply: (%d) '%.10s%s'", len+1, io_buffer, len > 10 ? "..." : "");
// ignore failure - it's closed immediately anyway

34
cgminer.c

@ -1409,7 +1409,6 @@ static bool submit_upstream_work(const struct work *work) @@ -1409,7 +1409,6 @@ static bool submit_upstream_work(const struct work *work)
"{\"method\": \"getwork\", \"params\": [ \"%s\" ], \"id\":1}",
hexstr);
if (opt_debug)
applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd);
/* Force a fresh connection in case there are dead persistent
@ -1451,7 +1450,6 @@ static bool submit_upstream_work(const struct work *work) @@ -1451,7 +1450,6 @@ static bool submit_upstream_work(const struct work *work)
pool->accepted++;
cgpu->last_share_pool = pool->pool_no;
cgpu->last_share_pool_time = time(NULL);
if (opt_debug)
applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)");
if (!QUIET) {
if (total_pools > 1)
@ -1470,7 +1468,6 @@ static bool submit_upstream_work(const struct work *work) @@ -1470,7 +1468,6 @@ static bool submit_upstream_work(const struct work *work)
cgpu->rejected++;
total_rejected++;
pool->rejected++;
if (opt_debug)
applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)");
if (!QUIET) {
char where[17];
@ -1563,7 +1560,6 @@ static bool get_upstream_work(struct work *work, bool lagging) @@ -1563,7 +1560,6 @@ static bool get_upstream_work(struct work *work, bool lagging)
}
pool = select_pool(lagging);
if (opt_debug)
applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, rpc_req);
retry:
@ -1670,19 +1666,16 @@ void kill_work(void) @@ -1670,19 +1666,16 @@ void kill_work(void)
disable_curses();
applog(LOG_INFO, "Received kill message");
if (opt_debug)
applog(LOG_DEBUG, "Killing off watchpool thread");
/* Kill the watchpool thread */
thr = &thr_info[watchpool_thr_id];
thr_info_cancel(thr);
if (opt_debug)
applog(LOG_DEBUG, "Killing off watchdog thread");
/* Kill the watchdog thread */
thr = &thr_info[watchdog_thr_id];
thr_info_cancel(thr);
if (opt_debug)
applog(LOG_DEBUG, "Killing off mining threads");
/* Stop the mining threads*/
for (i = 0; i < mining_threads; i++) {
@ -1690,19 +1683,16 @@ void kill_work(void) @@ -1690,19 +1683,16 @@ void kill_work(void)
thr_info_cancel(thr);
}
if (opt_debug)
applog(LOG_DEBUG, "Killing off stage thread");
/* Stop the others */
thr = &thr_info[stage_thr_id];
thr_info_cancel(thr);
if (opt_debug)
applog(LOG_DEBUG, "Killing off longpoll thread");
thr = &thr_info[longpoll_thr_id];
if (have_longpoll)
thr_info_cancel(thr);
if (opt_debug)
applog(LOG_DEBUG, "Killing off API thread");
thr = &thr_info[api_thr_id];
thr_info_cancel(thr);
@ -1751,7 +1741,6 @@ static void *get_work_thread(void *userdata) @@ -1751,7 +1741,6 @@ static void *get_work_thread(void *userdata)
}
fail_pause = opt_fail_pause;
if (opt_debug)
applog(LOG_DEBUG, "Pushing work to requesting thread");
/* send work to requesting thread */
@ -1945,9 +1934,8 @@ static void discard_work(struct work *work) @@ -1945,9 +1934,8 @@ static void discard_work(struct work *work)
if (work->pool)
work->pool->discarded_work++;
total_discarded++;
if (opt_debug)
applog(LOG_DEBUG, "Discarded work");
} else if (opt_debug)
} else
applog(LOG_DEBUG, "Discarded cloned or rolled work");
free_work(work);
}
@ -1996,7 +1984,6 @@ static int discard_stale(void) @@ -1996,7 +1984,6 @@ static int discard_stale(void)
}
mutex_unlock(stgd_lock);
if (opt_debug)
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
/* Dec queued outside the loop to not have recursive locks */
@ -2146,7 +2133,6 @@ static void *stage_thread(void *userdata) @@ -2146,7 +2133,6 @@ static void *stage_thread(void *userdata)
while (ok) {
struct work *work = NULL;
if (opt_debug)
applog(LOG_DEBUG, "Popping work to stage thread");
work = tq_pop(mythr->q, NULL);
@ -2159,7 +2145,6 @@ static void *stage_thread(void *userdata) @@ -2159,7 +2145,6 @@ static void *stage_thread(void *userdata)
test_work_current(work, false);
if (opt_debug)
applog(LOG_DEBUG, "Pushing work to getwork queue");
if (unlikely(!hash_push(work))) {
@ -2174,7 +2159,6 @@ static void *stage_thread(void *userdata) @@ -2174,7 +2159,6 @@ static void *stage_thread(void *userdata)
static bool stage_work(struct work *work)
{
if (opt_debug)
applog(LOG_DEBUG, "Pushing work to stage thread");
if (unlikely(!tq_push(thr_info[stage_thr_id].q, work))) {
@ -2738,7 +2722,6 @@ static void *workio_thread(void *userdata) @@ -2738,7 +2722,6 @@ static void *workio_thread(void *userdata)
while (ok) {
struct workio_cmd *wc;
if (opt_debug)
applog(LOG_DEBUG, "Popping work to work thread");
/* wait for workio_cmd sent to us, on our queue */
@ -2822,7 +2805,6 @@ static void hashmeter(int thr_id, struct timeval *diff, @@ -2822,7 +2805,6 @@ static void hashmeter(int thr_id, struct timeval *diff,
double thread_rolling = 0.0;
int i;
if (opt_debug)
applog(LOG_DEBUG, "[thread %d: %lu hashes, %.0f khash/sec]",
thr_id, hashes_done, hashes_done / secs);
@ -2927,7 +2909,6 @@ static bool pool_active(struct pool *pool, bool pinging) @@ -2927,7 +2909,6 @@ static bool pool_active(struct pool *pool, bool pinging)
pool->pool_no, pool->rpc_url);
work->pool = pool;
work->rolltime = rolltime;
if (opt_debug)
applog(LOG_DEBUG, "Pushing pooltest work to base pool");
tq_push(thr_info[stage_thr_id].q, work);
@ -3014,7 +2995,6 @@ static bool queue_request(struct thr_info *thr, bool needed) @@ -3014,7 +2995,6 @@ static bool queue_request(struct thr_info *thr, bool needed)
if (rq && needed && !requests_staged() && !opt_fail_only)
wc->lagging = true;
if (opt_debug)
applog(LOG_DEBUG, "Queueing getwork request to work thread");
/* send work request to workio thread */
@ -3074,7 +3054,6 @@ static void roll_work(struct work *work) @@ -3074,7 +3054,6 @@ static void roll_work(struct work *work)
local_work++;
work->rolls++;
work->blk.nonce = 0;
if (opt_debug)
applog(LOG_DEBUG, "Successfully rolled work");
/* This is now a different work item so it needs a different ID for the
@ -3130,7 +3109,6 @@ retry: @@ -3130,7 +3109,6 @@ retry:
gettimeofday(&now, NULL);
abstime.tv_sec = now.tv_sec + 60;
if (opt_debug)
applog(LOG_DEBUG, "Popping work from get queue to get work");
/* wait for 1st response, or get cached response */
@ -3159,7 +3137,6 @@ retry: @@ -3159,7 +3137,6 @@ retry:
/* Hand out a clone if we can roll this work item */
if (reuse_work(work_heap)) {
if (opt_debug)
applog(LOG_DEBUG, "Pushing divided work to get queue head");
stage_work(work_heap);
@ -3206,7 +3183,6 @@ bool submit_work_sync(struct thr_info *thr, const struct work *work_in) @@ -3206,7 +3183,6 @@ bool submit_work_sync(struct thr_info *thr, const struct work *work_in)
wc->thr = thr;
memcpy(wc->u.work, work_in, sizeof(*work_in));
if (opt_debug)
applog(LOG_DEBUG, "Pushing submit work to work thread");
/* send solution to workio thread */
@ -3295,7 +3271,6 @@ void *miner_thread(void *userdata) @@ -3295,7 +3271,6 @@ void *miner_thread(void *userdata)
if (api->thread_init && !api->thread_init(mythr))
goto out;
if (opt_debug)
applog(LOG_DEBUG, "Popping ping in miner thread");
tq_pop(mythr->q, NULL); /* Wait for a ping to start */
@ -3395,7 +3370,6 @@ void *miner_thread(void *userdata) @@ -3395,7 +3370,6 @@ void *miner_thread(void *userdata)
if (unlikely(mythr->pause || !cgpu->enabled)) {
applog(LOG_WARNING, "Thread %d being disabled", thr_id);
mythr->rolling = mythr->cgpu->rolling = 0;
if (opt_debug)
applog(LOG_DEBUG, "Popping wakeup ping in miner thread");
thread_reportout(mythr);
tq_pop(mythr->q, NULL); /* Ignore ping that's popped */
@ -3449,7 +3423,6 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool) @@ -3449,7 +3423,6 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
memcpy(work_clone, work, sizeof(struct work));
while (reuse_work(work)) {
work_clone->clone = true;
if (opt_debug)
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
if (unlikely(!stage_work(work_clone)))
break;
@ -3458,12 +3431,11 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool) @@ -3458,12 +3431,11 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
}
free_work(work_clone);
if (opt_debug)
applog(LOG_DEBUG, "Pushing converted work to stage thread");
if (unlikely(!stage_work(work)))
free_work(work);
else if (opt_debug)
else
applog(LOG_DEBUG, "Converted longpoll data to work");
}
@ -3602,7 +3574,6 @@ static void start_longpoll(void) @@ -3602,7 +3574,6 @@ static void start_longpoll(void)
tq_thaw(thr->q);
if (unlikely(thr_info_create(thr, NULL, longpoll_thread, thr)))
quit(1, "longpoll thread create failed");
if (opt_debug)
applog(LOG_DEBUG, "Pushing ping to longpoll thread");
tq_push(thr_info[longpoll_thr_id].q, &ping);
}
@ -4471,7 +4442,6 @@ int main (int argc, char *argv[]) @@ -4471,7 +4442,6 @@ int main (int argc, char *argv[])
/* Enable threads for devices set not to mine but disable
* their queue in case we wish to enable them later */
if (cgpu->enabled) {
if (opt_debug)
applog(LOG_DEBUG, "Pushing ping to thread %d", thr->id);
tq_push(thr->q, &ping);

1
device-cpu.c

@ -758,7 +758,6 @@ CPUSearch: @@ -758,7 +758,6 @@ CPUSearch:
/* if nonce found, submit work */
if (unlikely(rc)) {
if (opt_debug)
applog(LOG_DEBUG, "CPU %d found something?", dev_from_id(thr_id));
if (unlikely(!submit_work_sync(thr, work))) {
applog(LOG_ERR, "Failed to submit_work_sync in miner_thread %d", thr_id);

3
device-gpu.c

@ -553,7 +553,6 @@ retry: @@ -553,7 +553,6 @@ retry:
gpus[selected].enabled = false;
goto retry;
}
if (opt_debug)
applog(LOG_DEBUG, "Pushing ping to thread %d", thr->id);
tq_push(thr->q, &ping);
@ -1215,12 +1214,10 @@ static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work, @@ -1215,12 +1214,10 @@ static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work,
return 0;
}
if (unlikely(thrdata->last_work)) {
if (opt_debug)
applog(LOG_DEBUG, "GPU %d found something in last work?", gpu->device_id);
postcalc_hash_async(thr, thrdata->last_work, thrdata->res);
thrdata->last_work = NULL;
} else {
if (opt_debug)
applog(LOG_DEBUG, "GPU %d found something?", gpu->device_id);
postcalc_hash_async(thr, work, thrdata->res);
}

2
findnonce.c

@ -208,7 +208,6 @@ static void send_nonce(struct pc_data *pcd, cl_uint nonce) @@ -208,7 +208,6 @@ static void send_nonce(struct pc_data *pcd, cl_uint nonce)
if (unlikely(submit_nonce(thr, work, nonce) == false))
applog(LOG_ERR, "Failed to submit work, exiting");
} else {
if (opt_debug)
applog(LOG_DEBUG, "No best_g found! Error in OpenCL code?");
hw_errors++;
thr->cgpu->hw_errors++;
@ -233,7 +232,6 @@ static void *postcalc_hash(void *userdata) @@ -233,7 +232,6 @@ static void *postcalc_hash(void *userdata)
free(pcd);
if (unlikely(!nonces)) {
if (opt_debug)
applog(LOG_DEBUG, "No nonces found! Error in OpenCL code?");
hw_errors++;
thr->cgpu->hw_errors++;

21
ocl.c

@ -129,7 +129,6 @@ static int advance(char **area, unsigned *remaining, const char *marker) @@ -129,7 +129,6 @@ static int advance(char **area, unsigned *remaining, const char *marker)
char *find = memmem(*area, *remaining, marker, strlen(marker));
if (!find) {
if (opt_debug)
applog(LOG_DEBUG, "Marker \"%s\" not found", marker);
return 0;
}
@ -176,13 +175,11 @@ void patch_opcodes(char *w, unsigned remaining) @@ -176,13 +175,11 @@ void patch_opcodes(char *w, unsigned remaining)
opcode++;
remaining -= 8;
}
if (opt_debug) {
applog(LOG_DEBUG, "Potential OP3 instructions identified: "
"%i BFE_INT, %i BFE_UINT, %i BYTE_ALIGN",
count_bfe_int, count_bfe_uint, count_byte_align);
applog(LOG_DEBUG, "Patched a total of %i BFI_INT instructions", patched);
}
}
_clState *initCl(unsigned int gpu, char *name, size_t nameSize)
{
@ -321,7 +318,6 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -321,7 +318,6 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
applog(LOG_ERR, "Error: Failed to clGetDeviceInfo when trying to get CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT");
return NULL;
}
if (opt_debug)
applog(LOG_DEBUG, "Preferred vector width reported %d", clState->preferred_vwidth);
status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), (void *)&clState->max_work_size, NULL);
@ -329,7 +325,6 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -329,7 +325,6 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
applog(LOG_ERR, "Error: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_WORK_GROUP_SIZE");
return NULL;
}
if (opt_debug)
applog(LOG_DEBUG, "Max work group size reported %d", clState->max_work_size);
/* For some reason 2 vectors is still better even if the card says
@ -426,13 +421,11 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -426,13 +421,11 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
binaryfile = fopen(binaryfilename, "rb");
if (!binaryfile) {
if (opt_debug)
applog(LOG_DEBUG, "No binary found, generating from source");
} else {
struct stat binary_stat;
if (unlikely(stat(binaryfilename, &binary_stat))) {
if (opt_debug)
applog(LOG_DEBUG, "Unable to stat binary, generating from source");
fclose(binaryfile);
goto build;
@ -470,7 +463,6 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -470,7 +463,6 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
}
fclose(binaryfile);
if (opt_debug)
applog(LOG_DEBUG, "Loaded binary image %s", binaryfilename);
goto built;
@ -498,14 +490,12 @@ build: @@ -498,14 +490,12 @@ build:
sprintf(CompilerOptions, "-D WORKSIZE=%d -D VECTORS%d",
(int)clState->work_size, clState->preferred_vwidth);
if (opt_debug)
applog(LOG_DEBUG, "Setting worksize to %d", clState->work_size);
if (clState->preferred_vwidth > 1 && opt_debug)
if (clState->preferred_vwidth > 1)
applog(LOG_DEBUG, "Patched source to suit %d vectors", clState->preferred_vwidth);
if (clState->hasBitAlign) {
strcat(CompilerOptions, " -D BITALIGN");
if (opt_debug)
applog(LOG_DEBUG, "cl_amd_media_ops found, setting BITALIGN");
if (strstr(name, "Cedar") ||
strstr(name, "Redwood") ||
@ -522,17 +512,15 @@ build: @@ -522,17 +512,15 @@ build:
strstr(name, "WinterPark" ) ||
strstr(name, "BeaverCreek" ))
patchbfi = true;
} else if (opt_debug)
} else
applog(LOG_DEBUG, "cl_amd_media_ops not found, will not set BITALIGN");
if (patchbfi) {
strcat(CompilerOptions, " -D BFI_INT");
if (opt_debug)
applog(LOG_DEBUG, "BFI_INT patch requiring device found, patched source with BFI_INT");
} else if (opt_debug)
} else
applog(LOG_DEBUG, "BFI_INT patch requiring device not found, will not BFI_INT patch");
if (opt_debug)
applog(LOG_DEBUG, "CompilerOptions: %s", CompilerOptions);
status = clBuildProgram(clState->program, 1, &devices[gpu], CompilerOptions , NULL, NULL);
free(CompilerOptions);
@ -557,7 +545,6 @@ build: @@ -557,7 +545,6 @@ build:
}
/* copy over all of the generated binaries. */
if (opt_debug)
applog(LOG_DEBUG, "binary size %d : %d", gpu, binary_sizes[gpu]);
if (!binary_sizes[gpu]) {
applog(LOG_ERR, "OpenCL compiler generated a zero sized binary, may need to reboot!");
@ -600,7 +587,6 @@ build: @@ -600,7 +587,6 @@ build:
}
w--; remaining++;
w += start; remaining -= start;
if (opt_debug)
applog(LOG_DEBUG, "At %p (%u rem. bytes), to begin patching",
w, remaining);
patch_opcodes(w, length);
@ -633,7 +619,6 @@ build: @@ -633,7 +619,6 @@ build:
binaryfile = fopen(binaryfilename, "wb");
if (!binaryfile) {
/* Not a fatal problem, just means we build it again next time */
if (opt_debug)
applog(LOG_DEBUG, "Unable to create file %s", binaryfilename);
} else {
if (unlikely(fwrite(binaries[gpu], 1, binary_sizes[gpu], binaryfile) != binary_sizes[gpu])) {

5
util.c

@ -68,6 +68,8 @@ struct tq_ent { @@ -68,6 +68,8 @@ struct tq_ent {
void vapplog(int prio, const char *fmt, va_list ap)
{
extern bool use_curses;
if (!opt_debug && prio == LOG_DEBUG)
return;
#ifdef HAVE_SYSLOG_H
if (use_syslog) {
@ -219,10 +221,8 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) @@ -219,10 +221,8 @@ static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
if (!strcasecmp("X-Roll-Ntime", key)) {
if (!strncasecmp("N", val, 1)) {
if (opt_debug)
applog(LOG_DEBUG, "X-Roll-Ntime: N found");
} else {
if (opt_debug)
applog(LOG_DEBUG, "X-Roll-Ntime found");
hi->has_rolltime = true;
}
@ -417,7 +417,6 @@ json_t *json_rpc_call(CURL *curl, const char *url, @@ -417,7 +417,6 @@ json_t *json_rpc_call(CURL *curl, const char *url,
}
if (!all_data.buf) {
if (opt_debug)
applog(LOG_DEBUG, "Empty data received in json_rpc_call.");
goto err_out;
}

Loading…
Cancel
Save