Browse Source

Split thr_info array into control_thr and mining_thr pointers so more mining threads can be added later

nfactor-troky
Kano 12 years ago
parent
commit
212f32e5e0
  1. 8
      api.c
  2. 94
      cgminer.c
  3. 2
      driver-cpu.c
  4. 16
      driver-opencl.c
  5. 3
      miner.h

8
api.c

@ -1800,9 +1800,9 @@ static void pgaenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char @@ -1800,9 +1800,9 @@ static void pgaenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
#endif
for (i = 0; i < mining_threads; i++) {
pga = thr_info[i].cgpu->cgminer_id;
thr = mining_thr[i];
pga = thr->cgpu->cgminer_id;
if (pga == dev) {
thr = &thr_info[i];
cgpu->deven = DEV_ENABLED;
applog(LOG_DEBUG, "API: pushing ping (%d) to thread %d", ping, thr->id);
tq_push(thr->q, &ping);
@ -2105,9 +2105,9 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char @@ -2105,9 +2105,9 @@ static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char
}
for (i = 0; i < gpu_threads; i++) {
gpu = thr_info[i].cgpu->device_id;
thr = mining_thr[i];
gpu = thr->cgpu->device_id;
if (gpu == id) {
thr = &thr_info[i];
if (thr->cgpu->status != LIFE_WELL) {
message(io_data, MSG_GPUMRE, id, NULL, isjson);
return;

94
cgminer.c

@ -146,7 +146,8 @@ bool opt_bfl_noncerange; @@ -146,7 +146,8 @@ bool opt_bfl_noncerange;
#endif
#define QUIET (opt_quiet || opt_realquiet)
struct thr_info *thr_info;
struct thr_info *control_thr;
struct thr_info **mining_thr;
static int gwsched_thr_id;
static int stage_thr_id;
static int watchpool_thr_id;
@ -156,7 +157,7 @@ static int input_thr_id; @@ -156,7 +157,7 @@ static int input_thr_id;
#endif
int gpur_thr_id;
static int api_thr_id;
static int total_threads;
static int total_control_threads;
#ifdef HAVE_LIBUSB
pthread_mutex_t cgusb_lock;
@ -377,7 +378,7 @@ static void sharelog(const char*disposition, const struct work*work) @@ -377,7 +378,7 @@ static void sharelog(const char*disposition, const struct work*work)
return;
thr_id = work->thr_id;
cgpu = thr_info[thr_id].cgpu;
cgpu = mining_thr[thr_id]->cgpu;
pool = work->pool;
t = (unsigned long int)(work->tv_work_found.tv_sec);
target = bin2hex(work->target, sizeof(work->target));
@ -1722,7 +1723,7 @@ out: @@ -1722,7 +1723,7 @@ out:
int dev_from_id(int thr_id)
{
return thr_info[thr_id].cgpu->device_id;
return mining_thr[thr_id]->cgpu->device_id;
}
/* Make the change in the recent value adjust dynamically when the difference
@ -1894,7 +1895,7 @@ static void get_statline(char *buf, struct cgpu_info *cgpu) @@ -1894,7 +1895,7 @@ static void get_statline(char *buf, struct cgpu_info *cgpu)
static void text_print_status(int thr_id)
{
struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
struct cgpu_info *cgpu = mining_thr[thr_id]->cgpu;
char logline[256];
if (cgpu) {
@ -1960,7 +1961,7 @@ static int dev_width; @@ -1960,7 +1961,7 @@ static int dev_width;
static void curses_print_devstatus(int thr_id)
{
static int awidth = 1, rwidth = 1, hwwidth = 1, uwidth = 1;
struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
struct cgpu_info *cgpu = mining_thr[thr_id]->cgpu;
char logline[256];
char displayed_hashes[16], displayed_rolling[16];
uint64_t dh64, dr64;
@ -2212,7 +2213,7 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work, @@ -2212,7 +2213,7 @@ share_result(json_t *val, json_t *res, json_t *err, const struct work *work,
char *hashshow, bool resubmit, char *worktime)
{
struct pool *pool = work->pool;
struct cgpu_info *cgpu = thr_info[work->thr_id].cgpu;
struct cgpu_info *cgpu = mining_thr[work->thr_id]->cgpu;
if (json_is_true(res) || (work->gbt && json_is_null(res))) {
mutex_lock(&stats_lock);
@ -2362,7 +2363,7 @@ static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit) @@ -2362,7 +2363,7 @@ static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit)
char *s;
bool rc = false;
int thr_id = work->thr_id;
struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
struct cgpu_info *cgpu = mining_thr[thr_id]->cgpu;
struct pool *pool = work->pool;
int rolltime;
struct timeval tv_submit, tv_submit_reply;
@ -2744,18 +2745,18 @@ static void __kill_work(void) @@ -2744,18 +2745,18 @@ static void __kill_work(void)
applog(LOG_DEBUG, "Killing off watchpool thread");
/* Kill the watchpool thread */
thr = &thr_info[watchpool_thr_id];
thr = &control_thr[watchpool_thr_id];
thr_info_cancel(thr);
applog(LOG_DEBUG, "Killing off watchdog thread");
/* Kill the watchdog thread */
thr = &thr_info[watchdog_thr_id];
thr = &control_thr[watchdog_thr_id];
thr_info_cancel(thr);
applog(LOG_DEBUG, "Stopping mining threads");
/* Stop the mining threads*/
for (i = 0; i < mining_threads; i++) {
thr = &thr_info[i];
thr = mining_thr[i];
thr_info_freeze(thr);
thr->pause = true;
}
@ -2765,17 +2766,17 @@ static void __kill_work(void) @@ -2765,17 +2766,17 @@ static void __kill_work(void)
applog(LOG_DEBUG, "Killing off mining threads");
/* Kill the mining threads*/
for (i = 0; i < mining_threads; i++) {
thr = &thr_info[i];
thr = mining_thr[i];
thr_info_cancel(thr);
}
applog(LOG_DEBUG, "Killing off stage thread");
/* Stop the others */
thr = &thr_info[stage_thr_id];
thr = &control_thr[stage_thr_id];
thr_info_cancel(thr);
applog(LOG_DEBUG, "Killing off API thread");
thr = &thr_info[api_thr_id];
thr = &control_thr[api_thr_id];
thr_info_cancel(thr);
}
@ -3369,7 +3370,7 @@ static void restart_threads(void) @@ -3369,7 +3370,7 @@ static void restart_threads(void)
discard_stale();
for (i = 0; i < mining_threads; i++)
thr_info[i].work_restart = true;
mining_thr[i]->work_restart = true;
mutex_lock(&restart_lock);
pthread_cond_broadcast(&restart_cond);
@ -4409,16 +4410,16 @@ static void hashmeter(int thr_id, struct timeval *diff, @@ -4409,16 +4410,16 @@ static void hashmeter(int thr_id, struct timeval *diff,
local_mhashes = (double)hashes_done / 1000000.0;
/* Update the last time this thread reported in */
if (thr_id >= 0) {
gettimeofday(&thr_info[thr_id].last, NULL);
thr_info[thr_id].cgpu->device_last_well = time(NULL);
gettimeofday(&(mining_thr[thr_id]->last), NULL);
mining_thr[thr_id]->cgpu->device_last_well = time(NULL);
}
secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0);
/* So we can call hashmeter from a non worker thread */
if (thr_id >= 0) {
struct thr_info *thr = &thr_info[thr_id];
struct cgpu_info *cgpu = thr_info[thr_id].cgpu;
struct thr_info *thr = mining_thr[thr_id];
struct cgpu_info *cgpu = thr->cgpu;
double thread_rolling = 0.0;
int i;
@ -4908,7 +4909,7 @@ retry_stratum: @@ -4908,7 +4909,7 @@ retry_stratum:
calc_diff(work, 0);
applog(LOG_DEBUG, "Pushing pooltest work to base pool");
tq_push(thr_info[stage_thr_id].q, work);
tq_push(control_thr[stage_thr_id].q, work);
total_getworks++;
pool->getwork_requested++;
ret = true;
@ -5858,7 +5859,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) @@ -5858,7 +5859,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
sched_paused = true;
for (i = 0; i < mining_threads; i++) {
struct thr_info *thr;
thr = &thr_info[i];
thr = mining_thr[i];
thr->pause = true;
}
@ -5872,7 +5873,7 @@ static void *watchdog_thread(void __maybe_unused *userdata) @@ -5872,7 +5873,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
for (i = 0; i < mining_threads; i++) {
struct thr_info *thr;
thr = &thr_info[i];
thr = mining_thr[i];
/* Don't touch disabled devices */
if (thr->cgpu->deven == DEV_DISABLED)
@ -6709,14 +6710,23 @@ int main(int argc, char *argv[]) @@ -6709,14 +6710,23 @@ int main(int argc, char *argv[])
fork_monitor();
#endif // defined(unix)
total_threads = mining_threads + 7;
thr_info = calloc(total_threads, sizeof(*thr));
if (!thr_info)
quit(1, "Failed to calloc thr_info");
mining_thr = calloc(mining_threads, sizeof(thr));
if (!mining_thr)
quit(1, "Failed to calloc mining_thr");
for (i = 0; i < mining_threads; i++) {
mining_thr[i] = calloc(1, sizeof(*thr));
if (!mining_thr[i])
quit(1, "Failed to calloc mining_thr[%d]", i);
}
gwsched_thr_id = mining_threads;
stage_thr_id = mining_threads + 1;
thr = &thr_info[stage_thr_id];
total_control_threads = 7;
control_thr = calloc(total_control_threads, sizeof(*thr));
if (!control_thr)
quit(1, "Failed to calloc control_thr");
gwsched_thr_id = 0;
stage_thr_id = 1;
thr = &control_thr[stage_thr_id];
thr->q = tq_new();
if (!thr->q)
quit(1, "Failed to tq_new");
@ -6808,7 +6818,7 @@ begin_bench: @@ -6808,7 +6818,7 @@ begin_bench:
cgpu->status = LIFE_INIT;
for (j = 0; j < cgpu->threads; ++j, ++k) {
thr = &thr_info[k];
thr = mining_thr[k];
thr->id = k;
thr->cgpu = cgpu;
thr->device_thread = j;
@ -6853,15 +6863,15 @@ begin_bench: @@ -6853,15 +6863,15 @@ begin_bench:
gettimeofday(&total_tv_start, NULL);
gettimeofday(&total_tv_end, NULL);
watchpool_thr_id = mining_threads + 2;
thr = &thr_info[watchpool_thr_id];
watchpool_thr_id = 2;
thr = &control_thr[watchpool_thr_id];
/* start watchpool thread */
if (thr_info_create(thr, NULL, watchpool_thread, NULL))
quit(1, "watchpool thread create failed");
pthread_detach(thr->pth);
watchdog_thr_id = mining_threads + 3;
thr = &thr_info[watchdog_thr_id];
watchdog_thr_id = 3;
thr = &control_thr[watchdog_thr_id];
/* start watchdog thread */
if (thr_info_create(thr, NULL, watchdog_thread, NULL))
quit(1, "watchdog thread create failed");
@ -6869,8 +6879,8 @@ begin_bench: @@ -6869,8 +6879,8 @@ begin_bench:
#ifdef HAVE_OPENCL
/* Create reinit gpu thread */
gpur_thr_id = mining_threads + 4;
thr = &thr_info[gpur_thr_id];
gpur_thr_id = 4;
thr = &control_thr[gpur_thr_id];
thr->q = tq_new();
if (!thr->q)
quit(1, "tq_new failed for gpur_thr_id");
@ -6879,8 +6889,8 @@ begin_bench: @@ -6879,8 +6889,8 @@ begin_bench:
#endif
/* Create API socket thread */
api_thr_id = mining_threads + 5;
thr = &thr_info[api_thr_id];
api_thr_id = 5;
thr = &control_thr[api_thr_id];
if (thr_info_create(thr, NULL, api_thread, thr))
quit(1, "API thread create failed");
@ -6888,13 +6898,17 @@ begin_bench: @@ -6888,13 +6898,17 @@ begin_bench:
/* Create curses input thread for keyboard input. Create this last so
* that we know all threads are created since this can call kill_work
* to try and shut down ll previous threads. */
input_thr_id = mining_threads + 6;
thr = &thr_info[input_thr_id];
input_thr_id = 6;
thr = &control_thr[input_thr_id];
if (thr_info_create(thr, NULL, input_thread, thr))
quit(1, "input thread create failed");
pthread_detach(thr->pth);
#endif
/* Just to be sure */
if (total_control_threads != 7)
quit(1, "incorrect total_control_threads (%d) should be 7", total_control_threads);
/* Once everything is set up, main() becomes the getwork scheduler */
while (42) {
int ts, max_staged = opt_queue;

2
driver-cpu.c

@ -768,7 +768,7 @@ static void cpu_detect() @@ -768,7 +768,7 @@ static void cpu_detect()
static void reinit_cpu_device(struct cgpu_info *cpu)
{
tq_push(thr_info[cpur_thr_id].q, cpu);
tq_push(control_thr[cpur_thr_id].q, cpu);
}
static bool cpu_thread_prepare(struct thr_info *thr)

16
driver-opencl.c

@ -616,7 +616,7 @@ void pause_dynamic_threads(int gpu) @@ -616,7 +616,7 @@ void pause_dynamic_threads(int gpu)
int i;
for (i = 1; i < cgpu->threads; i++) {
struct thr_info *thr = &thr_info[i];
struct thr_info *thr = mining_thr[i];
if (!thr->pause && cgpu->dynamic) {
applog(LOG_WARNING, "Disabling extra threads due to dynamic mode.");
@ -705,7 +705,7 @@ retry: @@ -705,7 +705,7 @@ retry:
else
wlog("%d\n", gpus[gpu].intensity);
for (i = 0; i < mining_threads; i++) {
thr = &thr_info[i];
thr = mining_thr[i];
if (thr->cgpu != cgpu)
continue;
get_datestamp(checkin, &thr->last);
@ -760,7 +760,7 @@ retry: @@ -760,7 +760,7 @@ retry:
}
gpus[selected].deven = DEV_ENABLED;
for (i = 0; i < mining_threads; ++i) {
thr = &thr_info[i];
thr = mining_thr[i];
cgpu = thr->cgpu;
if (cgpu->drv->drv != DRIVER_OPENCL)
continue;
@ -1147,14 +1147,14 @@ select_cgpu: @@ -1147,14 +1147,14 @@ select_cgpu:
gpu = cgpu->device_id;
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
thr = &thr_info[thr_id];
thr = mining_thr[thr_id];
cgpu = thr->cgpu;
if (cgpu->drv->drv != DRIVER_OPENCL)
continue;
if (dev_from_id(thr_id) != gpu)
continue;
thr = &thr_info[thr_id];
thr = mining_thr[thr_id];
if (!thr) {
applog(LOG_WARNING, "No reference to thread %d exists", thr_id);
continue;
@ -1172,7 +1172,7 @@ select_cgpu: @@ -1172,7 +1172,7 @@ select_cgpu:
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
int virtual_gpu;
thr = &thr_info[thr_id];
thr = mining_thr[thr_id];
cgpu = thr->cgpu;
if (cgpu->drv->drv != DRIVER_OPENCL)
continue;
@ -1209,7 +1209,7 @@ select_cgpu: @@ -1209,7 +1209,7 @@ select_cgpu:
get_datestamp(cgpu->init, &now);
for (thr_id = 0; thr_id < mining_threads; ++thr_id) {
thr = &thr_info[thr_id];
thr = mining_thr[thr_id];
cgpu = thr->cgpu;
if (cgpu->drv->drv != DRIVER_OPENCL)
continue;
@ -1263,7 +1263,7 @@ static void opencl_detect() @@ -1263,7 +1263,7 @@ static void opencl_detect()
static void reinit_opencl_device(struct cgpu_info *gpu)
{
tq_push(thr_info[gpur_thr_id].q, gpu);
tq_push(control_thr[gpur_thr_id].q, gpu);
}
#ifdef HAVE_ADL

3
miner.h

@ -784,7 +784,8 @@ extern int num_processors; @@ -784,7 +784,8 @@ extern int num_processors;
extern int hw_errors;
extern bool use_syslog;
extern bool opt_quiet;
extern struct thr_info *thr_info;
extern struct thr_info *control_thr;
extern struct thr_info **mining_thr;
extern struct cgpu_info gpus[MAX_GPUDEVICES];
extern int gpu_threads;
#ifdef USE_SCRYPT

Loading…
Cancel
Save