From a51514d9d13cc7de281e120965c99948c5e4cbb4 Mon Sep 17 00:00:00 2001 From: Con Kolivas Date: Tue, 27 Dec 2011 10:30:04 +1100 Subject: [PATCH] White space cleanup. --- api.c | 2 +- findnonce.c | 14 ++++----- main.c | 22 ++++++------- sha256_altivec_4way.c | 6 ++-- uthash.h | 72 +++++++++++++++++++++---------------------- util.c | 8 ++--- 6 files changed, 62 insertions(+), 62 deletions(-) diff --git a/api.c b/api.c index ad5ebcec..fe29b947 100644 --- a/api.c +++ b/api.c @@ -993,7 +993,7 @@ void api(void) send_result(c, isjson); did = true; } - else { + else { json_val = json_object_get(json_config, JSON_COMMAND); if (json_val == NULL) { strcpy(io_buffer, message(MSG_MISSCMD, 0, isjson)); diff --git a/findnonce.c b/findnonce.c index 649d6225..a5da20a4 100644 --- a/findnonce.c +++ b/findnonce.c @@ -4,7 +4,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. See COPYING for more details. */ @@ -49,7 +49,7 @@ const uint32_t SHA256_K[64] = { void precalc_hash(dev_blk_ctx *blk, uint32_t *state, uint32_t *data) { cl_uint A, B, C, D, E, F, G, H; - + A = state[0]; B = state[1]; C = state[2]; @@ -59,7 +59,7 @@ void precalc_hash(dev_blk_ctx *blk, uint32_t *state, uint32_t *data) { G = state[6]; H = state[7]; - R(A, B, C, D, E, F, G, H, data[0], SHA256_K[0]); + R(A, B, C, D, E, F, G, H, data[0], SHA256_K[0]); R(H, A, B, C, D, E, F, G, data[1], SHA256_K[1]); R(G, H, A, B, C, D, E, F, data[2], SHA256_K[2]); @@ -90,7 +90,7 @@ void precalc_hash(dev_blk_ctx *blk, uint32_t *state, uint32_t *data) { blk->merkle = data[0]; blk->ntime = data[1]; blk->nbits = data[2]; - + blk->W16 = blk->fW0 = data[0] + (rotr(data[1], 7) ^ rotr(data[1], 18) ^ (data[1] >> 3)); blk->W17 = blk->fW1 = data[1] + (rotr(data[2], 7) ^ rotr(data[2], 18) ^ (data[2] >> 3)) + 0x01100000; blk->PreVal4 = blk->fcty_e = E + (rotr(B, 6) ^ rotr(B, 11) ^ rotr(B, 25)) + (D ^ (B & (C ^ D))) + 0xe9b5dba5; @@ -101,8 +101,8 @@ void precalc_hash(dev_blk_ctx *blk, uint32_t *state, uint32_t *data) { blk->PreW32 = blk->W16 + ((rotr(blk->W17, 7) ^ rotr(blk->W17, 18) ^ (blk->W17 >> 3))); blk->PreW18 = data[2] + (rotr(blk->W16, 17) ^ rotr(blk->W16, 19) ^ (blk->W16 >> 10)); blk->PreW19 = 0x11002000 + (rotr(blk->W17, 17) ^ rotr(blk->W17, 19) ^ (blk->W17 >> 10)); - - + + blk->W2 = data[2]; blk->W2A = blk->W2 + (rotr(blk->W16, 19) ^ rotr(blk->W16, 17) ^ (blk->W16 >> 10)); @@ -113,7 +113,7 @@ void precalc_hash(dev_blk_ctx *blk, uint32_t *state, uint32_t *data) { blk->fW15 = 0x00000280 + (rotr(blk->fW0, 7) ^ rotr(blk->fW0, 18) ^ (blk->fW0 >> 3)); blk->fW01r = blk->fW0 + (rotr(blk->fW1, 7) ^ rotr(blk->fW1, 18) ^ (blk->fW1 >> 3)); - + blk->PreVal4addT1 = blk->PreVal4 + blk->T1; blk->T1substate0 = state[0] - blk->T1; } diff --git a/main.c b/main.c index c8ec5c07..404c4c83 100644 --- a/main.c +++ b/main.c @@ -1239,28 +1239,28 @@ static char *set_gpu_memdiff(char *arg) { int i, val = 0, device = 0; char *nextptr; - + nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu memdiff"; val = atoi(nextptr); if (val < -9999 || val > 9999) return "Invalid value passed to set_gpu_memdiff"; - + gpus[device++].gpu_memdiff = val; - + while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); if (val < -9999 || val > 9999) return "Invalid value passed to set_gpu_memdiff"; - + gpus[device++].gpu_memdiff = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].gpu_memdiff = gpus[0].gpu_memdiff; } - + return NULL; } @@ -3089,7 +3089,7 @@ static void remove_pool(struct pool *pool) static void write_config(FILE *fcfg) { int i; - + /* Write pool values */ fputs("{\n\"pools\" : [", fcfg); for(i = 0; i < total_pools; i++) { @@ -3133,7 +3133,7 @@ static void write_config(FILE *fcfg) fputs("\",\n", fcfg); } fprintf(fcfg, "\n\"algo\" : \"%s\"", algo_names[opt_algo]); - + /* Simple bool and int options */ struct opt_table *opt; for (opt = opt_config_table; opt->type != OPT_END; opt++) { @@ -3141,12 +3141,12 @@ static void write_config(FILE *fcfg) for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) { if (p[1] != '-') continue; - if (opt->type & OPT_NOARG && + if (opt->type & OPT_NOARG && ((void *)opt->cb == (void *)opt_set_bool || (void *)opt->cb == (void *)opt_set_invbool) && (*(bool *)opt->u.arg == ((void *)opt->cb == (void *)opt_set_bool))) fprintf(fcfg, ",\n\"%s\" : true", p+2); - - if (opt->type & OPT_HASARG && + + if (opt->type & OPT_HASARG && ((void *)opt->cb_arg == (void *)set_int_0_to_9999 || (void *)opt->cb_arg == (void *)set_int_1_to_65535 || (void *)opt->cb_arg == (void *)set_int_0_to_10 || @@ -3167,7 +3167,7 @@ static void write_config(FILE *fcfg) #if defined(unix) if (opt_stderr_cmd && *opt_stderr_cmd) fprintf(fcfg, ",\n\"monitor\" : \"%s\"", opt_stderr_cmd); -#endif // defined(unix) +#endif // defined(unix) if (opt_kernel && *opt_kernel) fprintf(fcfg, ",\n\"kernel\" : \"%s\"", opt_kernel); if (opt_kernel_path && *opt_kernel_path) { diff --git a/sha256_altivec_4way.c b/sha256_altivec_4way.c index 41dc5a09..b70e4d03 100644 --- a/sha256_altivec_4way.c +++ b/sha256_altivec_4way.c @@ -82,7 +82,7 @@ unsigned int ScanHash_altivec_4way(int thr_id, const unsigned char *pmidstate, uint32_t nonce) { unsigned int *nNonce_p = (unsigned int*)(pdata + 12); - + work_restart[thr_id].restart = 0; for (;;) @@ -102,7 +102,7 @@ unsigned int ScanHash_altivec_4way(int thr_id, const unsigned char *pmidstate, for (i = 0; i < 32/4; i++) ((unsigned int*)phash)[i] = thash[i][j]; - + if (fulltest(phash, ptarget)) { *nHashesDone = nonce; *nNonce_p = nonce + j; @@ -138,7 +138,7 @@ static void DoubleBlockSHA256(const void* pin, void* pad, const void *pre, unsig /* nonce offset for vector */ vector unsigned int offset = (vector unsigned int)(0, 1, 2, 3); - + preNonce = vec_add((vector unsigned int)(In[3],In[3],In[3],In[3]), offset); for(k = 0; k /* memcmp,strlen */ #include /* ptrdiff_t */ @@ -49,7 +49,7 @@ do { char **_da_dst = (char**)(&(dst)); \ *_da_dst = (char*)(src); \ } while(0) -#else +#else #define DECLTYPE_ASSIGN(dst,src) \ do { \ (dst) = DECLTYPE(dst)(src); \ @@ -121,9 +121,9 @@ do { HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) #else -#define HASH_BLOOM_MAKE(tbl) -#define HASH_BLOOM_FREE(tbl) -#define HASH_BLOOM_ADD(tbl,hashv) +#define HASH_BLOOM_MAKE(tbl) +#define HASH_BLOOM_FREE(tbl) +#define HASH_BLOOM_ADD(tbl,hashv) #define HASH_BLOOM_TEST(tbl,hashv) (1) #endif @@ -148,7 +148,7 @@ do { #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add) - + #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ do { \ unsigned _ha_bkt; \ @@ -300,10 +300,10 @@ do { } \ } while (0) #else -#define HASH_FSCK(hh,head) +#define HASH_FSCK(hh,head) #endif -/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to +/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to * the descriptor to which this macro is defined for tuning the hash function. * The app can #include to get the prototype for write(2). */ #ifdef HASH_EMIT_KEYS @@ -313,12 +313,12 @@ do { write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ write(HASH_EMIT_KEYS, keyptr, fieldlen); \ } while (0) -#else -#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) +#else +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) #endif /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ -#ifdef HASH_FUNCTION +#ifdef HASH_FUNCTION #define HASH_FCN HASH_FUNCTION #else #define HASH_FCN HASH_JEN @@ -335,7 +335,7 @@ do { } while (0) -/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at +/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \ do { \ @@ -356,7 +356,7 @@ do { hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \ bkt = hashv & (num_bkts-1); \ } while(0); - + #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \ do { \ unsigned _ho_i; \ @@ -485,14 +485,14 @@ do { #ifdef HASH_USING_NO_STRICT_ALIASING /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads. * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. - * MurmurHash uses the faster approach only on CPU's where we know it's safe. + * MurmurHash uses the faster approach only on CPU's where we know it's safe. * * Note the preprocessor built-in defines can be emitted using: * * gcc -m64 -dM -E - < /dev/null (on gcc) * cc -## a.c (where a.c is a simple test file) (Sun Studio) */ -#if (defined(__i386__) || defined(__x86_64__)) +#if (defined(__i386__) || defined(__x86_64__)) #define MUR_GETBLOCK(p,i) p[i] #else /* non intel */ #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 0x3) == 0) @@ -562,7 +562,7 @@ do { \ #endif /* HASH_USING_NO_STRICT_ALIASING */ /* key comparison function; return 0 if keys equal */ -#define HASH_KEYCMP(a,b,len) memcmp(a,b,len) +#define HASH_KEYCMP(a,b,len) memcmp(a,b,len) /* iterate over items in a known bucket to find desired item */ #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \ @@ -603,36 +603,36 @@ do { } \ if (hh_del->hh_next) { \ hh_del->hh_next->hh_prev = hh_del->hh_prev; \ - } + } /* Bucket expansion has the effect of doubling the number of buckets * and redistributing the items into the new buckets. Ideally the * items will distribute more or less evenly into the new buckets * (the extent to which this is true is a measure of the quality of - * the hash function as it applies to the key domain). - * + * the hash function as it applies to the key domain). + * * With the items distributed into more buckets, the chain length * (item count) in each bucket is reduced. Thus by expanding buckets - * the hash keeps a bound on the chain length. This bounded chain + * the hash keeps a bound on the chain length. This bounded chain * length is the essence of how a hash provides constant time lookup. - * + * * The calculation of tbl->ideal_chain_maxlen below deserves some * explanation. First, keep in mind that we're calculating the ideal * maximum chain length based on the *new* (doubled) bucket count. * In fractions this is just n/b (n=number of items,b=new num buckets). - * Since the ideal chain length is an integer, we want to calculate + * Since the ideal chain length is an integer, we want to calculate * ceil(n/b). We don't depend on floating point arithmetic in this * hash, so to calculate ceil(n/b) with integers we could write - * + * * ceil(n/b) = (n/b) + ((n%b)?1:0) - * + * * and in fact a previous version of this hash did just that. * But now we have improved things a bit by recognizing that b is * always a power of two. We keep its base 2 log handy (call it lb), * so now we can write this with a bit shift and logical AND: - * + * * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) - * + * */ #define HASH_EXPAND_BUCKETS(tbl) \ do { \ @@ -684,7 +684,7 @@ do { /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ -/* Note that HASH_SORT assumes the hash handle name to be hh. +/* Note that HASH_SORT assumes the hash handle name to be hh. * HASH_SRT was added to allow the hash handle name to be passed in. */ #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) #define HASH_SRT(hh,head,cmpfcn) \ @@ -766,10 +766,10 @@ do { } \ } while (0) -/* This function selects items from one hash into another hash. - * The end result is that the selected items have dual presence - * in both hashes. There is no copy of the items made; rather - * they are added into the new hash through a secondary hash +/* This function selects items from one hash into another hash. + * The end result is that the selected items have dual presence + * in both hashes. There is no copy of the items made; rather + * they are added into the new hash through a secondary hash * hash handle that must be present in the structure. */ #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ do { \ @@ -822,7 +822,7 @@ do { #ifdef NO_DECLTYPE #define HASH_ITER(hh,head,el,tmp) \ for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \ - el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL)) + el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL)) #else #define HASH_ITER(hh,head,el,tmp) \ for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \ @@ -830,7 +830,7 @@ for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); #endif /* obtain a count of items in the hash */ -#define HASH_COUNT(head) HASH_CNT(hh,head) +#define HASH_COUNT(head) HASH_CNT(hh,head) #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0) typedef struct UT_hash_bucket { @@ -839,7 +839,7 @@ typedef struct UT_hash_bucket { /* expand_mult is normally set to 0. In this situation, the max chain length * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If - * the bucket's chain exceeds this length, bucket expansion is triggered). + * the bucket's chain exceeds this length, bucket expansion is triggered). * However, setting expand_mult to a non-zero value delays bucket expansion * (that would be triggered by additions to this particular bucket) * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. @@ -847,7 +847,7 @@ typedef struct UT_hash_bucket { * multiplier is to reduce bucket expansions, since they are expensive, in * situations where we know that a particular bucket tends to be overused. * It is better to let its chain length grow to a longer yet-still-bounded - * value, than to do an O(n) bucket expansion too often. + * value, than to do an O(n) bucket expansion too often. */ unsigned expand_mult; @@ -873,7 +873,7 @@ typedef struct UT_hash_table { * hash distribution; reaching them in a chain traversal takes >ideal steps */ unsigned nonideal_items; - /* ineffective expands occur when a bucket doubling was performed, but + /* ineffective expands occur when a bucket doubling was performed, but * afterward, more than half the items in the hash had nonideal chain * positions. If this happens on two consecutive expansions we inhibit any * further expansion, as it's not helping; this happens when the hash diff --git a/util.c b/util.c index 0b0bf809..f3287bc3 100644 --- a/util.c +++ b/util.c @@ -244,7 +244,7 @@ int json_rpc_call_sockopt_cb(void *userdata, curl_socket_t fd, curlsocktype purp int tcp_keepcnt = 5; int tcp_keepidle = 120; int tcp_keepintvl = 120; - + #ifndef WIN32 if (unlikely(setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &keepalive, sizeof(keepalive)))) @@ -269,7 +269,7 @@ int json_rpc_call_sockopt_cb(void *userdata, curl_socket_t fd, curlsocktype purp # endif /* __APPLE_CC__ */ #else /* WIN32 */ - + struct tcp_keepalive vals; vals.onoff = 1; vals.keepalivetime = tcp_keepidle * 1000; @@ -279,7 +279,7 @@ int json_rpc_call_sockopt_cb(void *userdata, curl_socket_t fd, curlsocktype purp if (unlikely(WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outputBytes, NULL, NULL))) return 1; - + #endif /* WIN32 */ return 0; @@ -374,7 +374,7 @@ json_t *json_rpc_call(CURL *curl, const char *url, if (probing) { pool->probed = true; /* If X-Long-Polling was found, activate long polling */ - if (hi.lp_path) + if (hi.lp_path) pool->hdr_path = hi.lp_path; else pool->hdr_path = NULL;