diff --git a/cgminer.c b/cgminer.c index 1d881070..d7443056 100644 --- a/cgminer.c +++ b/cgminer.c @@ -854,7 +854,7 @@ static char *parse_config(json_t *config, bool fileconf) return NULL; } -static char *load_config(const char *arg, void *unused) +static char *load_config(const char *arg, void __maybe_unused *unused) { json_error_t err; json_t *config; @@ -2599,7 +2599,7 @@ retry: opt_loginput = false; } -static void *input_thread(void *userdata) +static void *input_thread(void __maybe_unused *userdata) { pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); @@ -2979,7 +2979,8 @@ static void roll_work(struct work *work) /* Recycle the work at a higher starting res_nonce if we know the thread we're * giving it to will not finish scanning it. We keep the master copy to be * recycled more rapidly and discard the clone to avoid repeating work */ -static bool divide_work(struct timeval *now, struct work *work, uint32_t hash_div) +static bool divide_work(struct timeval __maybe_unused *now, struct work *work, + uint32_t __maybe_unused hash_div) { if (can_roll(work) && should_roll(work)) { roll_work(work); @@ -3183,7 +3184,7 @@ bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce) return submit_work_sync(thr, work); } -static inline bool abandon_work(int thr_id, struct work *work, struct timeval *wdiff, uint64_t hashes) +static inline bool abandon_work(struct work *work, struct timeval *wdiff, uint64_t hashes) { if (wdiff->tv_sec > opt_scantime || work->blk.nonce >= MAXTHREADS - hashes || @@ -3334,7 +3335,7 @@ void *miner_thread(void *userdata) if (can_roll(work) && should_roll(work)) roll_work(work); - } while (!abandon_work(thr_id, work, &wdiff, hashes)); + } while (!abandon_work(work, &wdiff, hashes)); } out: @@ -3527,7 +3528,7 @@ void reinit_device(struct cgpu_info *cgpu) /* Makes sure the hashmeter keeps going even if mining threads stall, updates * the screen at regular intervals, and restarts threads if they appear to have * died. */ -static void *watchdog_thread(void *userdata) +static void *watchdog_thread(void __maybe_unused *userdata) { const unsigned int interval = 3; static struct timeval rotate_tv; diff --git a/device-gpu.c b/device-gpu.c index 8dfe92d5..a74c51aa 100644 --- a/device-gpu.c +++ b/device-gpu.c @@ -1081,13 +1081,14 @@ static void opencl_free_work(struct thr_info *thr, struct work *work) } } -static bool opencl_prepare_work(struct thr_info *thr, struct work *work) +static bool opencl_prepare_work(struct thr_info __maybe_unused *thr, struct work *work) { precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64)); return true; } -static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work, uint64_t max_nonce) +static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work, + uint64_t __maybe_unused max_nonce) { const int thr_id = thr->id; struct opencl_thread_data *thrdata = thr->cgpu_data; diff --git a/util.c b/util.c index b68ba206..e05e4507 100644 --- a/util.c +++ b/util.c @@ -239,7 +239,8 @@ out: } #ifdef CURL_HAS_SOCKOPT -int json_rpc_call_sockopt_cb(void *userdata, curl_socket_t fd, curlsocktype purpose) +int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd, + curlsocktype __maybe_unused purpose) { int keepalive = 1; int tcp_keepcnt = 5;