Browse Source

nvml: get devices vendor names with libpci

made for linux and require libpci-dev (optional)

if libpci is not installed, card's vendor names are not handled...

Note: only a few vendor names were added, common GeForce vendors.

Signed-off-by: Tanguy Pruvot <tanguy.pruvot@gmail.com>
master
Tanguy Pruvot 9 years ago
parent
commit
6b41234ff1
  1. 4
      Makefile.am
  2. 6
      configure.ac
  3. 23
      cuda.cpp
  4. 95
      nvml.cpp
  5. 2
      nvml.h

4
Makefile.am

@ -74,8 +74,8 @@ ccminer_SOURCES += compat/winansi.c @@ -74,8 +74,8 @@ ccminer_SOURCES += compat/winansi.c
endif
ccminer_LDFLAGS = $(PTHREAD_FLAGS) @CUDA_LDFLAGS@
ccminer_LDADD = @LIBCURL@ @JANSSON_LIBS@ @PTHREAD_LIBS@ @WS2_LIBS@ @CUDA_LIBS@ @OPENMP_CFLAGS@ @LIBS@ $(nvml_libs)
ccminer_CPPFLAGS = @LIBCURL_CPPFLAGS@ @OPENMP_CFLAGS@ $(CPPFLAGS) $(PTHREAD_FLAGS) -fno-strict-aliasing $(JANSSON_INCLUDES) $(DEF_INCLUDES) $(nvml_defs) -DSCRYPT_KECCAK512 -DSCRYPT_CHACHA -DSCRYPT_CHOOSE_COMPILETIME
ccminer_LDADD = @LIBCURL@ @JANSSON_LIBS@ @PTHREAD_LIBS@ @WS2_LIBS@ @CUDA_LIBS@ @PCILIB@ @OPENMP_CFLAGS@ @LIBS@ $(nvml_libs)
ccminer_CPPFLAGS = @LIBCURL_CPPFLAGS@ @PCIFLAGS@ @OPENMP_CFLAGS@ $(CPPFLAGS) $(PTHREAD_FLAGS) -fno-strict-aliasing $(JANSSON_INCLUDES) $(DEF_INCLUDES) $(nvml_defs)
nvcc_ARCH = -gencode=arch=compute_50,code=\"sm_50,compute_50\"

6
configure.ac

@ -75,6 +75,12 @@ AC_CHECK_LIB([pthread], [pthread_create], PTHREAD_LIBS="-lpthread", @@ -75,6 +75,12 @@ AC_CHECK_LIB([pthread], [pthread_create], PTHREAD_LIBS="-lpthread",
AC_CHECK_LIB([ssl],[SSL_library_init], [], [AC_MSG_ERROR([OpenSSL library required])])
AC_CHECK_LIB([crypto],[EVP_DigestFinal_ex], [], [AC_MSG_ERROR([OpenSSL library required])])
# libpci-dev (to get vendor name)
PCILIB=""; PCIFLAGS=""
AC_CHECK_LIB([pci],[pci_alloc], PCILIB="-lpci"; PCIFLAGS="-DHAVE_PCIDEV", [])
AC_SUBST(PCILIB)
AC_SUBST(PCIFLAGS)
AM_CONDITIONAL([WANT_JANSSON], [test x$request_jansson = xtrue])
AM_CONDITIONAL([HAVE_WINDOWS], [test x$have_win32 = xtrue])
AM_CONDITIONAL([ARCH_x86], [test x$have_x86 = xtrue])

23
cuda.cpp

@ -19,6 +19,7 @@ @@ -19,6 +19,7 @@
#endif
#include "miner.h"
#include "nvml.h"
#include "cuda_runtime.h"
@ -64,23 +65,39 @@ void cuda_devicenames() @@ -64,23 +65,39 @@ void cuda_devicenames()
GPU_N = min(MAX_GPUS, GPU_N);
for (int i=0; i < GPU_N; i++)
{
char vendorname[32] = { 0 };
cudaDeviceProp props;
cudaGetDeviceProperties(&props, device_map[i]);
device_name[i] = strdup(props.name);
device_sm[i] = (props.major * 100 + props.minor * 10);
if (device_name[i]) {
free(device_name[i]);
device_name[i] = NULL;
}
if (gpu_vendor(props.pciBusID, vendorname) > 0 && strlen(vendorname)) {
device_name[i] = (char*) calloc(1, strlen(vendorname) + strlen(props.name) + 2);
if (!strncmp(props.name, "GeForce ", 8))
sprintf(device_name[i], "%s %s", vendorname, &props.name[8]);
else
sprintf(device_name[i], "%s %s", vendorname, props.name);
} else
device_name[i] = strdup(props.name);
}
}
void cuda_print_devices()
{
int ngpus = cuda_num_devices();
cuda_devicenames();
for (int n=0; n < ngpus; n++) {
int m = device_map[n];
cudaDeviceProp props;
cudaGetDeviceProperties(&props, m);
if (!opt_n_threads || n < opt_n_threads)
fprintf(stderr, "GPU #%d: SM %d.%d %s\n", m, props.major, props.minor, props.name);
if (!opt_n_threads || n < opt_n_threads) {
fprintf(stderr, "GPU #%d: SM %d.%d %s\n", m, props.major, props.minor, device_name[n]);
}
}
}

95
nvml.cpp

@ -512,7 +512,7 @@ int nvml_get_bios(nvml_handle *nvmlh, int cudaindex, char *desc, int maxlen) @@ -512,7 +512,7 @@ int nvml_get_bios(nvml_handle *nvmlh, int cudaindex, char *desc, int maxlen)
return 0;
}
int nvml_get_info(nvml_handle *nvmlh, int cudaindex, uint16_t *vid, uint16_t *pid)
int nvml_get_info(nvml_handle *nvmlh, int cudaindex, uint16_t &vid, uint16_t &pid)
{
uint32_t subids = 0;
int gpuindex = nvmlh->cuda_nvml_device_id[cudaindex];
@ -520,8 +520,8 @@ int nvml_get_info(nvml_handle *nvmlh, int cudaindex, uint16_t *vid, uint16_t *pi @@ -520,8 +520,8 @@ int nvml_get_info(nvml_handle *nvmlh, int cudaindex, uint16_t *vid, uint16_t *pi
return -1;
subids = nvmlh->nvml_pci_subsys_id[gpuindex];
(*pid) = subids >> 16;
(*vid) = subids & 0xFFFF;
pid = subids >> 16;
vid = subids & 0xFFFF;
return 0;
}
@ -651,7 +651,7 @@ int nvapi_getusage(unsigned int devNum, unsigned int *pct) @@ -651,7 +651,7 @@ int nvapi_getusage(unsigned int devNum, unsigned int *pct)
return 0;
}
int nvapi_getinfo(unsigned int devNum, uint16_t *vid, uint16_t *pid)
int nvapi_getinfo(unsigned int devNum, uint16_t &vid, uint16_t &pid)
{
NvAPI_Status ret;
NvU32 pDeviceId, pSubSystemId, pRevisionId, pExtDeviceId;
@ -668,8 +668,8 @@ int nvapi_getinfo(unsigned int devNum, uint16_t *vid, uint16_t *pid) @@ -668,8 +668,8 @@ int nvapi_getinfo(unsigned int devNum, uint16_t *vid, uint16_t *pid)
return -1;
}
(*pid) = pDeviceId >> 16;
(*vid) = pDeviceId & 0xFFFF;
pid = pDeviceId >> 16;
vid = pDeviceId & 0xFFFF;
return 0;
}
@ -888,9 +888,82 @@ unsigned int gpu_power(struct cgpu_info *gpu) @@ -888,9 +888,82 @@ unsigned int gpu_power(struct cgpu_info *gpu)
return mw;
}
#ifdef HAVE_PCIDEV
extern "C" {
#include <errno.h>
#include <pci/pci.h>
}
static int linux_gpu_vendor(uint8_t pci_bus_id, char* vendorname, uint16_t &pid)
{
uint16_t subvendor = 0;
struct pci_access *pci;
struct pci_dev *dev;
uint16_t subdevice;
struct VENDORS {
const uint16_t vid;
const char *name;
} vendors[] = {
{ 0x1043, "ASUS" },
{ 0x10B0, "Gainward" },
{ 0x10DE, "NVIDIA" },
{ 0x1458, "Gigabyte" },
{ 0x1462, "MSI" },
{ 0, "" }
};
if (!vendorname)
return -EINVAL;
pci = pci_alloc();
if (!pci)
return -ENODEV;
pci_init(pci);
pci_scan_bus(pci);
for(dev = pci->devices; dev; dev = dev->next)
{
if (dev->device_class == PCI_CLASS_DISPLAY_VGA && dev->bus == pci_bus_id)
{
bool identified = false;
subvendor = pci_read_word(dev, PCI_SUBSYSTEM_VENDOR_ID);
subdevice = pci_read_word(dev, PCI_SUBSYSTEM_ID); // model
for(int v=0; v < ARRAY_SIZE(vendors); v++) {
if (subvendor == vendors[v].vid) {
strcpy(vendorname, vendors[v].name);
identified = true;
pid = subdevice;
break;
}
}
if (!identified && !opt_quiet)
applog(LOG_DEBUG, "%04x:%04x (Unknown vendor)\n",
subvendor, subdevice);
}
}
pci_cleanup(pci);
return (int) subvendor;
}
#endif
int gpu_vendor(uint8_t pci_bus_id, char *vendorname)
{
#ifdef HAVE_PCIDEV
uint16_t pid = 0;
return linux_gpu_vendor(pci_bus_id, vendorname, pid);
#else
return 0;
#endif
}
int gpu_info(struct cgpu_info *gpu)
{
char vendorname[32] = { 0 };
int id = gpu->gpu_id;
uint8_t bus_id = 0;
gpu->nvml_id = -1;
gpu->nvapi_id = -1;
@ -900,13 +973,19 @@ int gpu_info(struct cgpu_info *gpu) @@ -900,13 +973,19 @@ int gpu_info(struct cgpu_info *gpu)
if (hnvml) {
gpu->nvml_id = (int8_t) hnvml->cuda_nvml_device_id[id];
nvml_get_info(hnvml, id, &gpu->gpu_vid, &gpu->gpu_pid);
#ifdef HAVE_PCIDEV
gpu->gpu_vid = linux_gpu_vendor(hnvml->nvml_pci_bus_id[id], vendorname, gpu->gpu_pid);
if (!gpu->gpu_vid || !gpu->gpu_pid)
nvml_get_info(hnvml, id, gpu->gpu_vid, gpu->gpu_pid);
#else
nvml_get_info(hnvml, id, gpu->gpu_vid, gpu->gpu_pid);
#endif
nvml_get_serial(hnvml, id, gpu->gpu_sn, sizeof(gpu->gpu_sn));
nvml_get_bios(hnvml, id, gpu->gpu_desc, sizeof(gpu->gpu_desc));
}
#ifdef WIN32
gpu->nvapi_id = (int8_t) nvapi_dev_map[id];
nvapi_getinfo(nvapi_dev_map[id], &gpu->gpu_vid, &gpu->gpu_pid);
nvapi_getinfo(nvapi_dev_map[id], gpu->gpu_vid, gpu->gpu_pid);
nvapi_getserial(nvapi_dev_map[id], gpu->gpu_sn, sizeof(gpu->gpu_sn));
nvapi_getbios(nvapi_dev_map[id], gpu->gpu_desc, sizeof(gpu->gpu_desc));
#endif

2
nvml.h

@ -176,6 +176,8 @@ int gpu_busid(struct cgpu_info *gpu); @@ -176,6 +176,8 @@ int gpu_busid(struct cgpu_info *gpu);
/* pid/vid, sn and bios rev */
int gpu_info(struct cgpu_info *gpu);
int gpu_vendor(uint8_t pci_bus_id, char *vendorname);
/* nvapi functions */
#ifdef WIN32

Loading…
Cancel
Save