nvapi: link some more apis, pascal boost table
these informations are shown with ccminer -D -n
This commit is contained in:
parent
eae4ede111
commit
bdc441ebd0
5
api.cpp
5
api.cpp
@ -263,6 +263,9 @@ static void gpuhwinfos(int gpu_id)
|
||||
cgpu->gpu_pstate = (int16_t) gpu_pstate(cgpu);
|
||||
cgpu->gpu_power = gpu_power(cgpu);
|
||||
gpu_info(cgpu);
|
||||
#ifdef WIN32
|
||||
if (opt_debug) nvapi_pstateinfo(cgpu->gpu_id);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
cuda_gpu_clocks(cgpu);
|
||||
@ -273,7 +276,7 @@ static void gpuhwinfos(int gpu_id)
|
||||
|
||||
card = device_name[gpu_id];
|
||||
|
||||
snprintf(buf, sizeof(buf), "GPU=%d;BUS=%hd;CARD=%s;SM=%u;MEM=%lu;"
|
||||
snprintf(buf, sizeof(buf), "GPU=%d;BUS=%hd;CARD=%s;SM=%hu;MEM=%lu;"
|
||||
"TEMP=%.1f;FAN=%hu;RPM=%hu;FREQ=%d;MEMFREQ=%d;PST=%s;POWER=%u;"
|
||||
"VID=%hx;PID=%hx;NVML=%d;NVAPI=%d;SN=%s;BIOS=%s|",
|
||||
gpu_id, cgpu->gpu_bus, card, cgpu->gpu_arch, cgpu->gpu_mem,
|
||||
|
@ -19,6 +19,7 @@ typedef struct {
|
||||
NvU32 unknown4; // 0
|
||||
} entries[4];
|
||||
} NVAPI_GPU_POWER_INFO;
|
||||
#define NVAPI_GPU_POWER_INFO_VER MAKE_NVAPI_VERSION(NVAPI_GPU_POWER_INFO, 1)
|
||||
|
||||
typedef struct {
|
||||
NvU32 version;
|
||||
@ -30,14 +31,132 @@ typedef struct {
|
||||
NvU32 unknown4;
|
||||
} entries[4];
|
||||
} NVAPI_GPU_POWER_STATUS;
|
||||
|
||||
#define NVAPI_GPU_POWER_STATUS_VER MAKE_NVAPI_VERSION(NVAPI_GPU_POWER_STATUS, 1)
|
||||
#define NVAPI_GPU_POWER_INFO_VER MAKE_NVAPI_VERSION(NVAPI_GPU_POWER_INFO, 1)
|
||||
|
||||
typedef struct {
|
||||
NvU32 version;
|
||||
NvU32 flags;
|
||||
struct {
|
||||
NvU32 controller;
|
||||
NvU32 unknown;
|
||||
NvS32 min_temp;
|
||||
NvS32 def_temp;
|
||||
NvS32 max_temp;
|
||||
NvU32 defaultFlags;
|
||||
} entries[4];
|
||||
} NVAPI_GPU_THERMAL_INFO;
|
||||
#define NVAPI_GPU_THERMAL_INFO_VER MAKE_NVAPI_VERSION(NVAPI_GPU_THERMAL_INFO, 2)
|
||||
|
||||
typedef struct {
|
||||
NvU32 version;
|
||||
NvU32 flags;
|
||||
struct {
|
||||
NvU32 controller;
|
||||
NvU32 value;
|
||||
NvU32 flags;
|
||||
} entries[4];
|
||||
} NVAPI_GPU_THERMAL_LIMIT;
|
||||
#define NVAPI_GPU_THERMAL_LIMIT_VER MAKE_NVAPI_VERSION(NVAPI_GPU_THERMAL_LIMIT, 2)
|
||||
|
||||
typedef struct {
|
||||
NvU32 version;
|
||||
NvU32 flags;
|
||||
struct {
|
||||
NvU32 voltage_domain;
|
||||
NvU32 current_voltage;
|
||||
} entries[16];
|
||||
} NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS;
|
||||
#define NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS_VER MAKE_NVAPI_VERSION(NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS, 1)
|
||||
|
||||
typedef struct {
|
||||
NvU32 version;
|
||||
NvU32 numClocks; // unsure
|
||||
NvU32 nul[8];
|
||||
struct {
|
||||
NvU32 a;
|
||||
NvU32 clockType;
|
||||
NvU32 c;
|
||||
NvU32 d;
|
||||
NvU32 e;
|
||||
NvU32 f;
|
||||
NvU32 g;
|
||||
NvU32 h;
|
||||
NvU32 i;
|
||||
NvU32 j;
|
||||
NvS32 rangeMax;
|
||||
NvS32 rangeMin;
|
||||
NvS32 tempMax; // ? unsure
|
||||
NvU32 n;
|
||||
NvU32 o;
|
||||
NvU32 p;
|
||||
NvU32 q;
|
||||
NvU32 r;
|
||||
} entries[32]; // NVAPI_MAX_GPU_CLOCKS ?
|
||||
} NVAPI_CLOCKS_RANGE; // 2344 bytes
|
||||
#define NVAPI_CLOCKS_RANGE_VER MAKE_NVAPI_VERSION(NVAPI_CLOCKS_RANGE, 1)
|
||||
|
||||
// seems to return a clock table mask
|
||||
typedef struct {
|
||||
NvU32 version;
|
||||
NvU32 mask[4]; // 80 bits mask
|
||||
NvU32 buf0[8];
|
||||
struct {
|
||||
NvU32 a;
|
||||
NvU32 b;
|
||||
NvU32 c;
|
||||
NvU32 d;
|
||||
NvU32 memDelta; // 1 for mem
|
||||
NvU32 gpuDelta; // 1 for gpu
|
||||
} clocks[80 + 23];
|
||||
NvU32 buf1[916];
|
||||
} NVAPI_CLOCK_MASKS; // 6188 bytes
|
||||
#define NVAPI_CLOCK_MASKS_VER MAKE_NVAPI_VERSION(NVAPI_CLOCK_MASKS, 1)
|
||||
|
||||
// contains the gpu/mem clocks deltas
|
||||
typedef struct {
|
||||
NvU32 version;
|
||||
NvU32 mask[4]; // 80 bits mask
|
||||
NvU32 buf0[12];
|
||||
struct {
|
||||
NvU32 a;
|
||||
NvU32 b;
|
||||
NvU32 c;
|
||||
NvU32 d;
|
||||
NvU32 e;
|
||||
NvS32 freqDelta; // 84000 = +84MHz
|
||||
NvU32 g;
|
||||
NvU32 h;
|
||||
NvU32 i;
|
||||
} gpuDeltas[80];
|
||||
NvU32 memFilled[23]; // maybe only 4 max
|
||||
NvS32 memDeltas[23];
|
||||
NvU32 buf1[1529];
|
||||
} NVAPI_CLOCK_TABLE; // 9248 bytes
|
||||
#define NVAPI_CLOCK_TABLE_VER MAKE_NVAPI_VERSION(NVAPI_CLOCK_TABLE, 1)
|
||||
|
||||
NvAPI_Status NvAPI_DLL_GetInterfaceVersionString(NvAPI_ShortString string);
|
||||
NvAPI_Status NvAPI_DLL_ClientPowerPoliciesGetInfo(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_POWER_INFO* pInfo);
|
||||
NvAPI_Status NvAPI_DLL_ClientPowerPoliciesGetStatus(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_POWER_STATUS* pPolicies);
|
||||
NvAPI_Status NvAPI_DLL_ClientPowerPoliciesSetStatus(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_POWER_STATUS* pPolicies);
|
||||
|
||||
NvAPI_Status NvAPI_DLL_ClientPowerPoliciesGetInfo(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_POWER_INFO*);
|
||||
NvAPI_Status NvAPI_DLL_ClientPowerPoliciesGetStatus(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_POWER_STATUS*);
|
||||
NvAPI_Status NvAPI_DLL_ClientPowerPoliciesSetStatus(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_POWER_STATUS*);
|
||||
|
||||
NvAPI_Status NvAPI_DLL_ClientThermalPoliciesGetInfo(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_THERMAL_INFO*);
|
||||
NvAPI_Status NvAPI_DLL_ClientThermalPoliciesGetLimit(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_THERMAL_LIMIT*);
|
||||
NvAPI_Status NvAPI_DLL_ClientThermalPoliciesSetLimit(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_GPU_THERMAL_LIMIT*);
|
||||
|
||||
NvAPI_Status NvAPI_DLL_GetVoltageDomainsStatus(NvPhysicalGpuHandle hPhysicalGpu, NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS*);
|
||||
|
||||
// to dig...
|
||||
NvAPI_Status NvAPI_DLL_GetClockBoostRanges(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_CLOCKS_RANGE*);
|
||||
NvAPI_Status NvAPI_DLL_GetClockBoostMask(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_CLOCK_MASKS*); // 0x507B4B59
|
||||
NvAPI_Status NvAPI_DLL_GetClockBoostTable(NvPhysicalGpuHandle hPhysicalGpu, NVAPI_CLOCK_TABLE*); // 0x23F1B133
|
||||
|
||||
|
||||
NvAPI_Status NvAPI_DLL_GetPerfClocks(NvPhysicalGpuHandle hPhysicalGpu, void* pFreqs);
|
||||
|
||||
NvAPI_Status NvAPI_DLL_GetSerialNumber(NvPhysicalGpuHandle handle, NvAPI_ShortString serial);
|
||||
|
||||
NvAPI_Status NvAPI_DLL_SetPstates20(NvPhysicalGpuHandle handle, NV_GPU_PERF_PSTATES20_INFO *pPerfPstatesInfo);
|
||||
|
||||
NvAPI_Status NvAPI_DLL_Unload();
|
||||
|
||||
|
3
cuda.cpp
3
cuda.cpp
@ -108,6 +108,9 @@ void cuda_print_devices()
|
||||
fprintf(stderr, "GPU #%d: SM %d.%d %s\n", dev_id, props.major, props.minor, device_name[dev_id]);
|
||||
#ifdef USE_WRAPNVML
|
||||
if (opt_debug) nvml_print_device_info(dev_id);
|
||||
#ifdef WIN32
|
||||
if (opt_debug) nvapi_pstateinfo(dev_id);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
4
miner.h
4
miner.h
@ -67,7 +67,8 @@ typedef char * va_list;
|
||||
|
||||
#ifdef HAVE_SYSLOG_H
|
||||
#include <syslog.h>
|
||||
#define LOG_BLUE 0x10 /* unique value */
|
||||
#define LOG_BLUE 0x10
|
||||
#define LOG_RAW 0x99
|
||||
#else
|
||||
enum {
|
||||
LOG_ERR,
|
||||
@ -77,6 +78,7 @@ enum {
|
||||
LOG_DEBUG,
|
||||
/* custom notices */
|
||||
LOG_BLUE = 0x10,
|
||||
LOG_RAW = 0x99
|
||||
};
|
||||
#endif
|
||||
|
||||
|
109
nvapi.cpp
109
nvapi.cpp
@ -25,7 +25,11 @@ public:
|
||||
NvAPILibraryHandle()
|
||||
{
|
||||
bool success = false;
|
||||
#ifdef _WIN64
|
||||
library = LoadLibrary("nvapi64.dll");
|
||||
#else
|
||||
library = LoadLibrary("nvapi.dll");
|
||||
#endif
|
||||
if (library != NULL) {
|
||||
nvidia_query = reinterpret_cast<QueryPtr>(GetProcAddress(library, "nvapi_QueryInterface"));
|
||||
if (nvidia_query != NULL) {
|
||||
@ -105,7 +109,7 @@ NvAPI_Status NvAPI_DLL_ClientPowerPoliciesGetStatus(NvPhysicalGpuHandle handle,
|
||||
return (*pointer)(handle, pPolicies);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_POWERPOL_SET 0x0AD95F5ED
|
||||
#define NVAPI_ID_POWERPOL_SET 0xAD95F5ED
|
||||
NvAPI_Status NvAPI_DLL_ClientPowerPoliciesSetStatus(NvPhysicalGpuHandle handle, NVAPI_GPU_POWER_STATUS* pPolicies) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVAPI_GPU_POWER_STATUS*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
@ -115,6 +119,109 @@ NvAPI_Status NvAPI_DLL_ClientPowerPoliciesSetStatus(NvPhysicalGpuHandle handle,
|
||||
return (*pointer)(handle, pPolicies);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_THERMAL_INFO 0x0D258BB5
|
||||
NvAPI_Status NvAPI_DLL_ClientThermalPoliciesGetInfo(NvPhysicalGpuHandle handle, NVAPI_GPU_THERMAL_INFO* pInfo) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVAPI_GPU_THERMAL_INFO*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NVAPI_GPU_THERMAL_INFO*))nvidia_handle->query(NVAPI_ID_THERMAL_INFO);
|
||||
}
|
||||
return (*pointer)(handle, pInfo);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_TLIMIT_GET 0xE9C425A1
|
||||
NvAPI_Status NvAPI_DLL_ClientThermalPoliciesGetLimit(NvPhysicalGpuHandle handle, NVAPI_GPU_THERMAL_LIMIT* pLimit) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVAPI_GPU_THERMAL_LIMIT*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NVAPI_GPU_THERMAL_LIMIT*))nvidia_handle->query(NVAPI_ID_TLIMIT_GET);
|
||||
}
|
||||
return (*pointer)(handle, pLimit);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_TLIMIT_SET 0x34C0B13D
|
||||
NvAPI_Status NvAPI_DLL_ClientThermalPoliciesSetLimit(NvPhysicalGpuHandle handle, NVAPI_GPU_THERMAL_LIMIT* pLimit) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVAPI_GPU_THERMAL_LIMIT*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NVAPI_GPU_THERMAL_LIMIT*))nvidia_handle->query(NVAPI_ID_TLIMIT_SET);
|
||||
}
|
||||
return (*pointer)(handle, pLimit);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_SERIALNUM_GET 0x14B83A5F
|
||||
NvAPI_Status NvAPI_DLL_GetSerialNumber(NvPhysicalGpuHandle handle, NvAPI_ShortString serial) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NvAPI_ShortString) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NvAPI_ShortString))nvidia_handle->query(NVAPI_ID_SERIALNUM_GET);
|
||||
}
|
||||
return (*pointer)(handle, serial);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_VDOMAINS_GET 0xC16C7E2C
|
||||
NvAPI_Status NvAPI_DLL_GetVoltageDomainsStatus(NvPhysicalGpuHandle handle, NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS* status) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS*))nvidia_handle->query(NVAPI_ID_SERIALNUM_GET);
|
||||
}
|
||||
return (*pointer)(handle, status);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_CLK_RANGE_GET 0x64B43A6A
|
||||
NvAPI_Status NvAPI_DLL_GetClockBoostRanges(NvPhysicalGpuHandle handle, NVAPI_CLOCKS_RANGE* range) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVAPI_CLOCKS_RANGE*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NVAPI_CLOCKS_RANGE*))nvidia_handle->query(NVAPI_ID_CLK_RANGE_GET);
|
||||
}
|
||||
return (*pointer)(handle, range);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_CLK_BOOST_MASK 0x507B4B59
|
||||
NvAPI_Status NvAPI_DLL_GetClockBoostMask(NvPhysicalGpuHandle handle, NVAPI_CLOCK_MASKS* range) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVAPI_CLOCK_MASKS*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NVAPI_CLOCK_MASKS*))nvidia_handle->query(NVAPI_ID_CLK_BOOST_MASK);
|
||||
}
|
||||
return (*pointer)(handle, range);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_CLK_BOOST_TABLE 0x23F1B133
|
||||
NvAPI_Status NvAPI_DLL_GetClockBoostTable(NvPhysicalGpuHandle handle, NVAPI_CLOCK_TABLE* range) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NVAPI_CLOCK_TABLE*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NVAPI_CLOCK_TABLE*))nvidia_handle->query(NVAPI_ID_CLK_BOOST_TABLE);
|
||||
}
|
||||
return (*pointer)(handle, range);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_CLK_BOOST_CURVE 0x0700004A //??
|
||||
|
||||
|
||||
#define NVAPI_ID_PERFCLOCKS_GET 0x1EA54A3B
|
||||
NvAPI_Status NvAPI_DLL_GetPerfClocks(NvPhysicalGpuHandle handle, void* pFreqs){
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, void*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, void*))nvidia_handle->query(NVAPI_ID_PERFCLOCKS_GET);
|
||||
}
|
||||
return (*pointer)(handle, pFreqs);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_PSTATE20_SET 0x0F4DAE6B // NOT SUPPORTED
|
||||
NvAPI_Status NvAPI_DLL_SetPstates20(NvPhysicalGpuHandle handle, NV_GPU_PERF_PSTATES20_INFO *pPerfPstatesInfo) {
|
||||
static NvAPI_Status (*pointer)(NvPhysicalGpuHandle, NV_GPU_PERF_PSTATES20_INFO*) = NULL;
|
||||
if(!nvapi_dll_loaded) return NVAPI_API_NOT_INITIALIZED;
|
||||
if(!pointer) {
|
||||
pointer = (NvAPI_Status (*)(NvPhysicalGpuHandle, NV_GPU_PERF_PSTATES20_INFO*))nvidia_handle->query(NVAPI_ID_PSTATE20_SET);
|
||||
}
|
||||
return (*pointer)(handle, pPerfPstatesInfo);
|
||||
}
|
||||
|
||||
#define NVAPI_ID_UNLOAD 0xD22BDD7E
|
||||
NvAPI_Status NvAPI_DLL_Unload() {
|
||||
static NvAPI_Status (*pointer)() = NULL;
|
||||
|
161
nvml.cpp
161
nvml.cpp
@ -1,4 +1,4 @@
|
||||
/*
|
||||
/*
|
||||
* A trivial little dlopen()-based wrapper library for the
|
||||
* NVIDIA NVML library, to allow runtime discovery of NVML on an
|
||||
* arbitrary system. This is all very hackish and simple-minded, but
|
||||
@ -830,7 +830,7 @@ int nvapi_fanspeed(unsigned int devNum, unsigned int *speed)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvapi_getpstate(unsigned int devNum, unsigned int *power)
|
||||
int nvapi_getpstate(unsigned int devNum, unsigned int *pstate)
|
||||
{
|
||||
NvAPI_Status ret;
|
||||
|
||||
@ -848,7 +848,7 @@ int nvapi_getpstate(unsigned int devNum, unsigned int *power)
|
||||
}
|
||||
else {
|
||||
// get pstate for the moment... often 0 = P0
|
||||
(*power) = (unsigned int)CurrentPstate;
|
||||
(*pstate) = (unsigned int)CurrentPstate;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -909,25 +909,27 @@ int nvapi_getinfo(unsigned int devNum, uint16_t &vid, uint16_t &pid)
|
||||
|
||||
int nvapi_getserial(unsigned int devNum, char *serial, unsigned int maxlen)
|
||||
{
|
||||
// NvAPI_Status ret;
|
||||
NvAPI_Status ret;
|
||||
if (devNum >= nvapi_dev_cnt)
|
||||
return -ENODEV;
|
||||
|
||||
sprintf(serial, "");
|
||||
memset(serial, 0, maxlen);
|
||||
|
||||
if (maxlen < 64) // Short String
|
||||
return -1;
|
||||
if (maxlen < 11)
|
||||
return -EINVAL;
|
||||
|
||||
#if 0
|
||||
ret = NvAPI_GPU_Get..(phys[devNum], serial);
|
||||
NvAPI_ShortString ser = { 0 };
|
||||
ret = NvAPI_DLL_GetSerialNumber(phys[devNum], ser);
|
||||
if (ret != NVAPI_OK) {
|
||||
NvAPI_ShortString string;
|
||||
NvAPI_GetErrorMessage(ret, string);
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "NVAPI ...: %s", string);
|
||||
applog(LOG_DEBUG, "NVAPI GetSerialNumber: %s", string);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
uint8_t *bytes = (uint8_t*) ser;
|
||||
for (int n=0; n<5; n++) sprintf(&serial[n*2], "%02X", bytes[n]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -951,6 +953,133 @@ int nvapi_getbios(unsigned int devNum, char *desc, unsigned int maxlen)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define FREQ_GETVAL(clk) (clk.typeId == 0 ? clk.data.single.freq_kHz : clk.data.range.maxFreq_kHz)
|
||||
|
||||
int nvapi_pstateinfo(unsigned int devNum)
|
||||
{
|
||||
uint32_t n;
|
||||
NvAPI_Status ret;
|
||||
|
||||
unsigned int current = 0xFF;
|
||||
// useless on init but...
|
||||
nvapi_getpstate(devNum, ¤t);
|
||||
|
||||
NV_GPU_PERF_PSTATES20_INFO info = { 0 };
|
||||
info.version = NV_GPU_PERF_PSTATES20_INFO_VER;
|
||||
if ((ret = NvAPI_GPU_GetPstates20(phys[devNum], &info)) != NVAPI_OK) {
|
||||
NvAPI_ShortString string;
|
||||
NvAPI_GetErrorMessage(ret, string);
|
||||
if (opt_debug)
|
||||
applog(LOG_RAW, "NVAPI GetPstates20: %s", string);
|
||||
return -1;
|
||||
}
|
||||
applog(LOG_RAW, "%u P-states with %u clocks %s",
|
||||
info.numPstates, info.numClocks, info.numBaseVoltages ? "and voltage":"");
|
||||
for (n=0; n < info.numPstates; n++) {
|
||||
NV_GPU_PSTATE20_CLOCK_ENTRY_V1* clocks = info.pstates[n].clocks;
|
||||
applog(LOG_RAW, "%sP%d: MEM %4u MHz%s GPU %3u-%4u MHz%s %4u mV%s \x7F %d/%d",
|
||||
info.pstates[n].pstateId == current ? ">":" ", info.pstates[n].pstateId,
|
||||
FREQ_GETVAL(clocks[1])/1000, clocks[1].bIsEditable ? "*":" ",
|
||||
clocks[0].data.range.minFreq_kHz/1000, FREQ_GETVAL(clocks[0])/1000, clocks[0].bIsEditable ? "*":" ",
|
||||
info.pstates[n].baseVoltages[0].volt_uV/1000, info.pstates[n].baseVoltages[0].bIsEditable ? "*": " ",
|
||||
info.pstates[n].baseVoltages[0].voltDelta_uV.valueRange.min/1000, // range if editable
|
||||
info.pstates[n].baseVoltages[0].voltDelta_uV.valueRange.max/1000);
|
||||
}
|
||||
// boost over volting (GTX 9xx) ?
|
||||
for (n=0; n < info.ov.numVoltages; n++) {
|
||||
applog(LOG_RAW, " OV: %u mV%s + %d/%d",
|
||||
info.ov.voltages[n].volt_uV/1000, info.ov.voltages[n].bIsEditable ? "*":" ",
|
||||
info.ov.voltages[n].voltDelta_uV.valueRange.min/1000, info.ov.voltages[n].voltDelta_uV.valueRange.max/1000);
|
||||
}
|
||||
|
||||
NV_GPU_CLOCK_FREQUENCIES freqs = { 0 };
|
||||
freqs.version = NV_GPU_CLOCK_FREQUENCIES_VER;
|
||||
freqs.ClockType = NV_GPU_CLOCK_FREQUENCIES_CURRENT_FREQ;
|
||||
ret = NvAPI_GPU_GetAllClockFrequencies(phys[devNum], &freqs);
|
||||
applog(LOG_RAW, " MEM %4.0f MHz GPU %8.2f MHz >Current",
|
||||
(double) freqs.domain[NVAPI_GPU_PUBLIC_CLOCK_MEMORY].frequency / 1000,
|
||||
(double) freqs.domain[NVAPI_GPU_PUBLIC_CLOCK_GRAPHICS].frequency / 1000);
|
||||
|
||||
freqs.ClockType = NV_GPU_CLOCK_FREQUENCIES_BASE_CLOCK;
|
||||
ret = NvAPI_GPU_GetAllClockFrequencies(phys[devNum], &freqs);
|
||||
applog(LOG_RAW, " MEM %4.0f MHz GPU %8.2f MHz Base Clocks",
|
||||
(double) freqs.domain[NVAPI_GPU_PUBLIC_CLOCK_MEMORY].frequency / 1000,
|
||||
(double) freqs.domain[NVAPI_GPU_PUBLIC_CLOCK_GRAPHICS].frequency / 1000);
|
||||
|
||||
freqs.ClockType = NV_GPU_CLOCK_FREQUENCIES_BOOST_CLOCK;
|
||||
ret = NvAPI_GPU_GetAllClockFrequencies(phys[devNum], &freqs);
|
||||
applog(LOG_RAW, " MEM %4.0f MHz GPU %8.2f MHz Boost Clocks",
|
||||
(double) freqs.domain[NVAPI_GPU_PUBLIC_CLOCK_MEMORY].frequency / 1000,
|
||||
(double) freqs.domain[NVAPI_GPU_PUBLIC_CLOCK_GRAPHICS].frequency / 1000);
|
||||
|
||||
#if 1
|
||||
NV_GPU_THERMAL_SETTINGS tset = { 0 };
|
||||
NVAPI_GPU_THERMAL_INFO tnfo = { 0 };
|
||||
NVAPI_GPU_THERMAL_LIMIT tlim = { 0 };
|
||||
tset.version = NV_GPU_THERMAL_SETTINGS_VER;
|
||||
NvAPI_GPU_GetThermalSettings(phys[devNum], 0, &tset);
|
||||
tnfo.version = NVAPI_GPU_THERMAL_INFO_VER;
|
||||
NvAPI_DLL_ClientThermalPoliciesGetInfo(phys[devNum], &tnfo);
|
||||
tlim.version = NVAPI_GPU_THERMAL_LIMIT_VER;
|
||||
if ((ret = NvAPI_DLL_ClientThermalPoliciesGetLimit(phys[devNum], &tlim)) == NVAPI_OK) {
|
||||
applog(LOG_RAW, " Thermal limit is set to %u, current Tc %d, range [%u-%u]",
|
||||
tlim.entries[0].value >> 8, tset.sensor[0].currentTemp,
|
||||
tnfo.entries[0].min_temp >> 8, tnfo.entries[0].max_temp >> 8);
|
||||
// ok
|
||||
//tlim.entries[0].value = 80 << 8;
|
||||
//tlim.flags = 1;
|
||||
//ret = NvAPI_DLL_ClientThermalPoliciesSetLimit(phys[devNum], &tlim);
|
||||
}
|
||||
#endif
|
||||
uint8_t plim = nvapi_getplimit(devNum);
|
||||
applog(LOG_RAW, " Power limit coef. is set to %u%%", (uint32_t) plim);
|
||||
|
||||
#if 1
|
||||
// seems empty..
|
||||
NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS volts = { 0 };
|
||||
volts.version = NVIDIA_GPU_VOLTAGE_DOMAINS_STATUS_VER;
|
||||
ret = NvAPI_DLL_GetVoltageDomainsStatus(phys[devNum], &volts);
|
||||
#endif
|
||||
|
||||
#if 1
|
||||
// Read pascal Clocks Table, Empty on 9xx
|
||||
NVAPI_CLOCKS_RANGE ranges = { 0 };
|
||||
ranges.version = NVAPI_CLOCKS_RANGE_VER;
|
||||
ret = NvAPI_DLL_GetClockBoostRanges(phys[devNum], &ranges);
|
||||
NVAPI_CLOCK_MASKS boost = { 0 };
|
||||
boost.version = NVAPI_CLOCK_MASKS_VER;
|
||||
ret = NvAPI_DLL_GetClockBoostMask(phys[devNum], &boost);
|
||||
int gpuClocks = 0, memClocks = 0;
|
||||
for (n=0; n < 80+23; n++) {
|
||||
if (boost.clocks[n].memDelta) memClocks++;
|
||||
if (boost.clocks[n].gpuDelta) gpuClocks++;
|
||||
}
|
||||
|
||||
if (gpuClocks || memClocks) {
|
||||
applog(LOG_RAW, "Boost table contains %d gpu clocks and %d mem clocks.", gpuClocks, memClocks);
|
||||
NVAPI_CLOCK_TABLE table = { 0 };
|
||||
table.version = NVAPI_CLOCK_TABLE_VER;
|
||||
memcpy(table.mask, boost.mask, 12);
|
||||
ret = NvAPI_DLL_GetClockBoostTable(phys[devNum], &table);
|
||||
for (n=0; n < 12; n++) {
|
||||
if (table.buf0[n] != 0) applog(LOG_RAW, "boost table 0[%u] not empty (%u)", n, table.buf0[n]);
|
||||
}
|
||||
for (n=0; n < 80; n++) {
|
||||
if (table.gpuDeltas[n].freqDelta)
|
||||
applog(LOG_RAW, "boost gpu clock delta %u set to %d MHz", n, table.gpuDeltas[n].freqDelta/1000);
|
||||
}
|
||||
for (n=0; n < 23; n++) {
|
||||
if (table.memFilled[n])
|
||||
applog(LOG_RAW, "boost mem clock delta %u set to %d MHz", n, table.memDeltas[n]/1000);
|
||||
}
|
||||
for (n=0; n < 1529; n++) {
|
||||
if (table.buf1[n] != 0) applog(LOG_RAW, "boost table 1[%u] not empty (%u)", n, table.buf1[n]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint8_t nvapi_getplimit(unsigned int devNum)
|
||||
{
|
||||
NvAPI_Status ret = NVAPI_OK;
|
||||
@ -960,7 +1089,7 @@ uint8_t nvapi_getplimit(unsigned int devNum)
|
||||
NvAPI_ShortString string;
|
||||
NvAPI_GetErrorMessage(ret, string);
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "NVAPI GetPowerPoliciesStatus: %s", string);
|
||||
applog(LOG_DEBUG, "NVAPI PowerPoliciesGetStatus: %s", string);
|
||||
return 0;
|
||||
}
|
||||
return (uint8_t) (pol.entries[0].power / 1000); // in percent
|
||||
@ -991,7 +1120,7 @@ int nvapi_setplimit(unsigned int devNum, uint16_t percent)
|
||||
NvAPI_ShortString string;
|
||||
NvAPI_GetErrorMessage(ret, string);
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "NVAPI SetPowerPoliciesStatus: %s", string);
|
||||
applog(LOG_DEBUG, "NVAPI PowerPoliciesSetStatus: %s", string);
|
||||
return -1;
|
||||
}
|
||||
return ret;
|
||||
@ -1072,6 +1201,10 @@ int nvapi_init()
|
||||
uint32_t res = nvapi_getplimit(nvapi_dev_map[dev_id]);
|
||||
gpulog(LOG_INFO, n, "NVAPI power limit is set to %u%%", res);
|
||||
}
|
||||
if (device_pstate[dev_id]) {
|
||||
if (opt_debug) nvapi_pstateinfo(nvapi_dev_map[dev_id]);
|
||||
// todo...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1174,6 +1307,8 @@ unsigned int gpu_power(struct cgpu_info *gpu)
|
||||
if (support == -1) {
|
||||
unsigned int pct = 0;
|
||||
nvapi_getusage(nvapi_dev_map[gpu->gpu_id], &pct);
|
||||
pct *= nvapi_getplimit(nvapi_dev_map[gpu->gpu_id]);
|
||||
pct /= 100;
|
||||
mw = pct; // to fix
|
||||
}
|
||||
#endif
|
||||
|
3
nvml.h
3
nvml.h
@ -217,6 +217,9 @@ int gpu_info(struct cgpu_info *gpu);
|
||||
|
||||
int gpu_vendor(uint8_t pci_bus_id, char *vendorname);
|
||||
|
||||
// to debug clocks..
|
||||
int nvapi_pstateinfo(unsigned int devNum);
|
||||
uint8_t nvapi_getplimit(unsigned int devNum);
|
||||
|
||||
/* nvapi functions */
|
||||
#ifdef WIN32
|
||||
|
4
util.cpp
4
util.cpp
@ -139,6 +139,10 @@ void applog(int prio, const char *fmt, ...)
|
||||
fmt,
|
||||
use_colors ? CL_N : ""
|
||||
);
|
||||
if (prio == LOG_RAW) {
|
||||
// no time prefix, for ccminer -n
|
||||
sprintf(f, "%s%s\n", fmt, CL_N);
|
||||
}
|
||||
pthread_mutex_lock(&applog_lock);
|
||||
vfprintf(stdout, f, ap); /* atomic write to stdout */
|
||||
fflush(stdout);
|
||||
|
Loading…
Reference in New Issue
Block a user