Browse Source

ocl correct applog typing

nfactor-troky
Kano 12 years ago
parent
commit
54d5d26095
  1. 19
      ocl.c

19
ocl.c

@ -363,14 +363,14 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -363,14 +363,14 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_WORK_GROUP_SIZE", status);
return NULL;
}
applog(LOG_DEBUG, "Max work group size reported %d", clState->max_work_size);
applog(LOG_DEBUG, "Max work group size reported %zu", clState->max_work_size);
status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_MEM_ALLOC_SIZE , sizeof(cl_ulong), (void *)&cgpu->max_alloc, NULL);
if (status != CL_SUCCESS) {
applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_MEM_ALLOC_SIZE", status);
return NULL;
}
applog(LOG_DEBUG, "Max mem alloc size is %u", cgpu->max_alloc);
applog(LOG_DEBUG, "Max mem alloc size is %lu", (long unsigned int)(cgpu->max_alloc));
/* Create binary filename based on parameters passed to opencl
* compiler to ensure we only load a binary that matches what would
@ -495,7 +495,7 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize) @@ -495,7 +495,7 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
if (cgpu->thread_concurrency > cgpu->shaders * 5)
cgpu->thread_concurrency = cgpu->shaders * 5;
}
applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %u",gpu, cgpu->thread_concurrency);
applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %zu", gpu, cgpu->thread_concurrency);
} else
cgpu->thread_concurrency = cgpu->opt_tc;
}
@ -610,7 +610,7 @@ build: @@ -610,7 +610,7 @@ build:
sprintf(CompilerOptions, "-D WORKSIZE=%d -D VECTORS%d -D WORKVEC=%d",
(int)clState->wsize, clState->vwidth, (int)clState->wsize * clState->vwidth);
}
applog(LOG_DEBUG, "Setting worksize to %d", clState->wsize);
applog(LOG_DEBUG, "Setting worksize to %zu", clState->wsize);
if (clState->vwidth > 1)
applog(LOG_DEBUG, "Patched source to suit %d vectors", clState->vwidth);
@ -689,7 +689,7 @@ build: @@ -689,7 +689,7 @@ build:
break;
/* copy over all of the generated binaries. */
applog(LOG_DEBUG, "Binary size for gpu %d found in binary slot %d: %d", gpu, slot, binary_sizes[slot]);
applog(LOG_DEBUG, "Binary size for gpu %d found in binary slot %d: %zu", gpu, slot, binary_sizes[slot]);
if (!binary_sizes[slot]) {
applog(LOG_ERR, "OpenCL compiler generated a zero sized binary, FAIL!");
return NULL;
@ -771,7 +771,7 @@ built: @@ -771,7 +771,7 @@ built:
free(binaries);
free(binary_sizes);
applog(LOG_INFO, "Initialising kernel %s with%s bitalign, %d vectors and worksize %d",
applog(LOG_INFO, "Initialising kernel %s with%s bitalign, %d vectors and worksize %zu",
filename, clState->hasBitAlign ? "" : "out", clState->vwidth, clState->wsize);
if (!prog_built) {
@ -804,10 +804,11 @@ built: @@ -804,10 +804,11 @@ built:
/* Use the max alloc value which has been rounded to a power of
* 2 greater >= required amount earlier */
if (bufsize > cgpu->max_alloc) {
applog(LOG_WARNING, "Maximum buffer memory device %d supports says %u", gpu, cgpu->max_alloc);
applog(LOG_WARNING, "Your scrypt settings come to %u", bufsize);
applog(LOG_WARNING, "Maximum buffer memory device %d supports says %lu",
gpu, (long unsigned int)(cgpu->max_alloc));
applog(LOG_WARNING, "Your scrypt settings come to %zu", bufsize);
}
applog(LOG_DEBUG, "Creating scrypt buffer sized %u", bufsize);
applog(LOG_DEBUG, "Creating scrypt buffer sized %zu", bufsize);
clState->padbufsize = bufsize;
/* This buffer is weird and might work to some degree even if

Loading…
Cancel
Save