Browse Source

Add comments, rename our struct http_data h to cookie, all clientsockets to sock, all size_t from socket_recvs to byte_count. Make signal handler set default handler for the second SIGINT

dynamic-accesslists
erdgeist 16 years ago
parent
commit
66c906d5d3
  1. 140
      opentracker.c
  2. 1
      ot_clean.c
  3. 4
      ot_fullscrape.c
  4. 2
      ot_fullscrape.h
  5. 140
      ot_http.c
  6. 2
      ot_livesync.h
  7. 18
      ot_mutex.c
  8. 4
      ot_mutex.h
  9. 4
      ot_stats.c
  10. 2
      ot_stats.h
  11. 26
      ot_udp.c
  12. 2
      ot_vector.c

140
opentracker.c

@ -48,12 +48,17 @@ static void panic( const char *routine ) { @@ -48,12 +48,17 @@ static void panic( const char *routine ) {
static void signal_handler( int s ) {
if( s == SIGINT ) {
signal( SIGINT, SIG_IGN);
/* Any new interrupt signal quits the application */
signal( SIGINT, SIG_DFL);
/* Tell all other threads to not acquire any new lock on a bucket
but cancel their operations and return */
g_opentracker_running = 0;
trackerlogic_deinit();
exit( 0 );
} else if( s == SIGALRM ) {
/* Maintain our copy of the clock. time() on BSDs is very expensive. */
g_now_seconds = time(NULL);
alarm(5);
}
@ -90,88 +95,87 @@ static void help( char *name ) { @@ -90,88 +95,87 @@ static void help( char *name ) {
}
#undef HELPLINE
static void handle_dead( const int64 socket ) {
struct http_data* h=io_getcookie( socket );
if( h ) {
if( h->flag & STRUCT_HTTP_FLAG_IOB_USED )
iob_reset( &h->data.batch );
if( h->flag & STRUCT_HTTP_FLAG_ARRAY_USED )
array_reset( &h->data.request );
if( h->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK )
mutex_workqueue_canceltask( socket );
free( h );
static void handle_dead( const int64 sock ) {
struct http_data* cookie=io_getcookie( sock );
if( cookie ) {
if( cookie->flag & STRUCT_HTTP_FLAG_IOB_USED )
iob_reset( &cookie->data.batch );
if( cookie->flag & STRUCT_HTTP_FLAG_ARRAY_USED )
array_reset( &cookie->data.request );
if( cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK )
mutex_workqueue_canceltask( sock );
free( cookie );
}
io_close( socket );
io_close( sock );
}
static ssize_t handle_read( const int64 clientsocket, struct ot_workstruct *ws ) {
struct http_data* h = io_getcookie( clientsocket );
ssize_t l;
static ssize_t handle_read( const int64 sock, struct ot_workstruct *ws ) {
struct http_data* cookie = io_getcookie( sock );
ssize_t byte_count;
if( ( l = io_tryread( clientsocket, ws->inbuf, G_INBUF_SIZE ) ) <= 0 ) {
handle_dead( clientsocket );
if( ( byte_count = io_tryread( sock, ws->inbuf, G_INBUF_SIZE ) ) <= 0 ) {
handle_dead( sock );
return 0;
}
/* If we get the whole request in one packet, handle it without copying */
if( !array_start( &h->data.request ) ) {
if( memchr( ws->inbuf, '\n', l ) ) {
if( !array_start( &cookie->data.request ) ) {
if( memchr( ws->inbuf, '\n', byte_count ) ) {
ws->request = ws->inbuf;
ws->request_size = l;
return http_handle_request( clientsocket, ws );
ws->request_size = byte_count;
return http_handle_request( sock, ws );
}
/* ... else take a copy */
h->flag |= STRUCT_HTTP_FLAG_ARRAY_USED;
array_catb( &h->data.request, ws->inbuf, l );
cookie->flag |= STRUCT_HTTP_FLAG_ARRAY_USED;
array_catb( &cookie->data.request, ws->inbuf, byte_count );
return 0;
}
h->flag |= STRUCT_HTTP_FLAG_ARRAY_USED;
array_catb( &h->data.request, ws->inbuf, l );
array_catb( &cookie->data.request, ws->inbuf, byte_count );
if( array_failed( &h->data.request ) )
return http_issue_error( clientsocket, ws, CODE_HTTPERROR_500 );
if( array_failed( &cookie->data.request ) )
return http_issue_error( sock, ws, CODE_HTTPERROR_500 );
if( array_bytes( &h->data.request ) > 8192 )
return http_issue_error( clientsocket, ws, CODE_HTTPERROR_500 );
if( array_bytes( &cookie->data.request ) > 8192 )
return http_issue_error( sock, ws, CODE_HTTPERROR_500 );
if( !memchr( array_start( &h->data.request ), '\n', array_bytes( &h->data.request ) ) )
if( !memchr( array_start( &cookie->data.request ), '\n', array_bytes( &cookie->data.request ) ) )
return 0;
ws->request = array_start( &h->data.request );
ws->request_size = array_bytes( &h->data.request );
return http_handle_request( clientsocket, ws );
ws->request = array_start( &cookie->data.request );
ws->request_size = array_bytes( &cookie->data.request );
return http_handle_request( sock, ws );
}
static void handle_write( const int64 clientsocket ) {
struct http_data* h=io_getcookie( clientsocket );
if( !h || ( iob_send( clientsocket, &h->data.batch ) <= 0 ) )
handle_dead( clientsocket );
static void handle_write( const int64 sock ) {
struct http_data* cookie=io_getcookie( sock );
if( !cookie || ( iob_send( sock, &cookie->data.batch ) <= 0 ) )
handle_dead( sock );
}
static void handle_accept( const int64 serversocket ) {
struct http_data *h;
struct http_data *cookie;
int64 sock;
ot_ip6 ip;
uint16 port;
tai6464 t;
int64 i;
while( ( i = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) {
while( ( sock = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) {
/* Put fd into a non-blocking mode */
io_nonblock( i );
io_nonblock( sock );
if( !io_fd( i ) ||
!( h = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) {
io_close( i );
if( !io_fd( sock ) ||
!( cookie = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) {
io_close( sock );
continue;
}
io_setcookie( i, h );
io_wantread( i );
io_setcookie( sock, cookie );
io_wantread( sock );
memset(h, 0, sizeof( struct http_data ) );
memcpy(h->ip,ip,sizeof(ot_ip6));
memset(cookie, 0, sizeof( struct http_data ) );
memcpy(cookie->ip,ip,sizeof(ot_ip6));
stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip);
@ -179,7 +183,7 @@ static void handle_accept( const int64 serversocket ) { @@ -179,7 +183,7 @@ static void handle_accept( const int64 serversocket ) {
time this often in FreeBSD and libowfat does not allow to set unix time */
taia_uint( &t, 0 ); /* Clear t */
tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) );
io_timeout( i, t );
io_timeout( sock, t );
}
if( errno == EAGAIN )
@ -202,29 +206,29 @@ static void server_mainloop( ) { @@ -202,29 +206,29 @@ static void server_mainloop( ) {
panic( "Initializing worker failed" );
for( ; ; ) {
int64 i;
int64 sock;
io_wait();
while( ( i = io_canread( ) ) != -1 ) {
const void *cookie = io_getcookie( i );
while( ( sock = io_canread( ) ) != -1 ) {
const void *cookie = io_getcookie( sock );
if( (intptr_t)cookie == FLAG_TCP )
handle_accept( i );
handle_accept( sock );
else if( (intptr_t)cookie == FLAG_UDP )
handle_udp6( i, &ws );
handle_udp6( sock, &ws );
else
handle_read( i, &ws );
handle_read( sock, &ws );
}
while( ( i = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 )
http_sendiovecdata( i, &ws, iovec_entries, iovector );
while( ( sock = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 )
http_sendiovecdata( sock, &ws, iovec_entries, iovector );
while( ( i = io_canwrite( ) ) != -1 )
handle_write( i );
while( ( sock = io_canwrite( ) ) != -1 )
handle_write( sock );
if( g_now_seconds > next_timeout_check ) {
while( ( i = io_timeouted() ) != -1 )
handle_dead( i );
while( ( sock = io_timeouted() ) != -1 )
handle_dead( sock );
next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL;
}
@ -236,7 +240,7 @@ static void server_mainloop( ) { @@ -236,7 +240,7 @@ static void server_mainloop( ) {
}
static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) {
int64 s = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( );
int64 sock = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( );
#ifndef WANT_V6
if( !ip6_isv4mapped(ip) ) {
@ -257,24 +261,24 @@ static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { @@ -257,24 +261,24 @@ static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) {
fputs( _debug, stderr );
#endif
if( socket_bind6_reuse( s, ip, port, 0 ) == -1 )
if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 )
panic( "socket_bind6_reuse" );
if( ( proto == FLAG_TCP ) && ( socket_listen( s, SOMAXCONN) == -1 ) )
if( ( proto == FLAG_TCP ) && ( socket_listen( sock, SOMAXCONN) == -1 ) )
panic( "socket_listen" );
if( !io_fd( s ) )
if( !io_fd( sock ) )
panic( "io_fd" );
io_setcookie( s, (void*)proto );
io_setcookie( sock, (void*)proto );
io_wantread( s );
io_wantread( sock );
#ifdef _DEBUG
fputs( " success.\n", stderr);
#endif
return s;
return sock;
}
char * set_config_option( char **option, char *value ) {

1
ot_clean.c

@ -6,6 +6,7 @@ @@ -6,6 +6,7 @@
/* System */
#include <pthread.h>
#include <unistd.h>
#include <string.h>
/* Libowfat */
#include "io.h"

4
ot_fullscrape.c

@ -80,8 +80,8 @@ void fullscrape_deinit( ) { @@ -80,8 +80,8 @@ void fullscrape_deinit( ) {
pthread_cancel( thread_id );
}
void fullscrape_deliver( int64 socket, ot_tasktype tasktype ) {
mutex_workqueue_pushtask( socket, tasktype );
void fullscrape_deliver( int64 sock, ot_tasktype tasktype ) {
mutex_workqueue_pushtask( sock, tasktype );
}
static int fullscrape_increase( int *iovec_entries, struct iovec **iovector,

2
ot_fullscrape.h

@ -10,7 +10,7 @@ @@ -10,7 +10,7 @@
void fullscrape_init( );
void fullscrape_deinit( );
void fullscrape_deliver( int64 socket, ot_tasktype tasktype );
void fullscrape_deliver( int64 sock, ot_tasktype tasktype );
#else

140
ot_http.c

@ -35,50 +35,50 @@ enum { @@ -35,50 +35,50 @@ enum {
SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING = 32,
SUCCESS_HTTP_SIZE_OFF = 17 };
static void http_senddata( const int64 client_socket, struct ot_workstruct *ws ) {
struct http_data *h = io_getcookie( client_socket );
static void http_senddata( const int64 sock, struct ot_workstruct *ws ) {
struct http_data *cookie = io_getcookie( sock );
ssize_t written_size;
/* whoever sends data is not interested in its input-array */
if( h && ( h->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) ) {
h->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED;
array_reset( &h->data.request );
if( cookie && ( cookie->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) ) {
cookie->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED;
array_reset( &cookie->data.request );
}
written_size = write( client_socket, ws->reply, ws->reply_size );
written_size = write( sock, ws->reply, ws->reply_size );
if( ( written_size < 0 ) || ( written_size == ws->reply_size ) ) {
free( h ); io_close( client_socket );
free( cookie ); io_close( sock );
} else {
char * outbuf;
tai6464 t;
if( !h ) return;
if( !cookie ) return;
if( !( outbuf = malloc( ws->reply_size - written_size ) ) ) {
free(h); io_close( client_socket );
free(cookie); io_close( sock );
return;
}
iob_reset( &h->data.batch );
iob_reset( &cookie->data.batch );
memcpy( outbuf, ws->reply + written_size, ws->reply_size - written_size );
iob_addbuf_free( &h->data.batch, outbuf, ws->reply_size - written_size );
h->flag |= STRUCT_HTTP_FLAG_IOB_USED;
iob_addbuf_free( &cookie->data.batch, outbuf, ws->reply_size - written_size );
cookie->flag |= STRUCT_HTTP_FLAG_IOB_USED;
/* writeable short data sockets just have a tcp timeout */
taia_uint( &t, 0 ); io_timeout( client_socket, t );
io_dontwantread( client_socket );
io_wantwrite( client_socket );
taia_uint( &t, 0 ); io_timeout( sock, t );
io_dontwantread( sock );
io_wantwrite( sock );
}
}
#define HTTPERROR_302 return http_issue_error( client_socket, ws, CODE_HTTPERROR_302 )
#define HTTPERROR_400 return http_issue_error( client_socket, ws, CODE_HTTPERROR_400 )
#define HTTPERROR_400_PARAM return http_issue_error( client_socket, ws, CODE_HTTPERROR_400_PARAM )
#define HTTPERROR_400_COMPACT return http_issue_error( client_socket, ws, CODE_HTTPERROR_400_COMPACT )
#define HTTPERROR_400_DOUBLEHASH return http_issue_error( client_socket, ws, CODE_HTTPERROR_400_PARAM )
#define HTTPERROR_403_IP return http_issue_error( client_socket, ws, CODE_HTTPERROR_403_IP )
#define HTTPERROR_404 return http_issue_error( client_socket, ws, CODE_HTTPERROR_404 )
#define HTTPERROR_500 return http_issue_error( client_socket, ws, CODE_HTTPERROR_500 )
ssize_t http_issue_error( const int64 client_socket, struct ot_workstruct *ws, int code ) {
#define HTTPERROR_302 return http_issue_error( sock, ws, CODE_HTTPERROR_302 )
#define HTTPERROR_400 return http_issue_error( sock, ws, CODE_HTTPERROR_400 )
#define HTTPERROR_400_PARAM return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM )
#define HTTPERROR_400_COMPACT return http_issue_error( sock, ws, CODE_HTTPERROR_400_COMPACT )
#define HTTPERROR_400_DOUBLEHASH return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM )
#define HTTPERROR_403_IP return http_issue_error( sock, ws, CODE_HTTPERROR_403_IP )
#define HTTPERROR_404 return http_issue_error( sock, ws, CODE_HTTPERROR_404 )
#define HTTPERROR_500 return http_issue_error( sock, ws, CODE_HTTPERROR_500 )
ssize_t http_issue_error( const int64 sock, struct ot_workstruct *ws, int code ) {
char *error_code[] = { "302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request",
"403 Access Denied", "404 Not Found", "500 Internal Server Error" };
char *title = error_code[code];
@ -93,32 +93,32 @@ ssize_t http_issue_error( const int64 client_socket, struct ot_workstruct *ws, i @@ -93,32 +93,32 @@ ssize_t http_issue_error( const int64 client_socket, struct ot_workstruct *ws, i
fprintf( stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf );
#endif
stats_issue_event( EVENT_FAILED, FLAG_TCP, code );
http_senddata( client_socket, ws );
http_senddata( sock, ws );
return ws->reply_size = -2;
}
ssize_t http_sendiovecdata( const int64 client_socket, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) {
struct http_data *h = io_getcookie( client_socket );
ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) {
struct http_data *cookie = io_getcookie( sock );
char *header;
int i;
size_t header_size, size = iovec_length( &iovec_entries, &iovector );
tai6464 t;
/* No cookie? Bad socket. Leave. */
if( !h ) {
if( !cookie ) {
iovec_free( &iovec_entries, &iovector );
HTTPERROR_500;
}
/* If this socket collected request in a buffer,
free it now */
if( h->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) {
h->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED;
array_reset( &h->data.request );
if( cookie->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) {
cookie->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED;
array_reset( &cookie->data.request );
}
/* If we came here, wait for the answer is over */
h->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK;
cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK;
/* Our answers never are 0 vectors. Return an error. */
if( !iovec_entries ) {
@ -132,32 +132,32 @@ ssize_t http_sendiovecdata( const int64 client_socket, struct ot_workstruct *ws, @@ -132,32 +132,32 @@ ssize_t http_sendiovecdata( const int64 client_socket, struct ot_workstruct *ws,
HTTPERROR_500;
}
if( h->flag & STRUCT_HTTP_FLAG_GZIP )
if( cookie->flag & STRUCT_HTTP_FLAG_GZIP )
header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size );
else if( h->flag & STRUCT_HTTP_FLAG_BZIP2 )
else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 )
header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size );
else
header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size );
iob_reset( &h->data.batch );
iob_addbuf_free( &h->data.batch, header, header_size );
iob_reset( &cookie->data.batch );
iob_addbuf_free( &cookie->data.batch, header, header_size );
/* Will move to ot_iovec.c */
for( i=0; i<iovec_entries; ++i )
iob_addbuf_munmap( &h->data.batch, iovector[i].iov_base, iovector[i].iov_len );
iob_addbuf_munmap( &cookie->data.batch, iovector[i].iov_base, iovector[i].iov_len );
free( iovector );
h->flag |= STRUCT_HTTP_FLAG_IOB_USED;
cookie->flag |= STRUCT_HTTP_FLAG_IOB_USED;
/* writeable sockets timeout after 10 minutes */
taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND );
io_timeout( client_socket, t );
io_dontwantread( client_socket );
io_wantwrite( client_socket );
io_timeout( sock, t );
io_dontwantread( sock );
io_wantwrite( sock );
return 0;
}
static ssize_t http_handle_stats( const int64 client_socket, struct ot_workstruct *ws, char *read_ptr ) {
static ssize_t http_handle_stats( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) {
static const ot_keywords keywords_main[] =
{ { "mode", 1 }, {"format", 2 }, { NULL, -3 } };
static const ot_keywords keywords_mode[] =
@ -173,9 +173,9 @@ static const ot_keywords keywords_format[] = @@ -173,9 +173,9 @@ static const ot_keywords keywords_format[] =
int mode = TASK_STATS_PEERS, scanon = 1, format = 0;
#ifdef WANT_RESTRICT_STATS
struct http_data *h = io_getcookie( client_socket );
struct http_data *cookie = io_getcookie( sock );
if( !h || !accesslist_isblessed( h->ip, OT_PERMISSION_MAY_STAT ) )
if( !cookie || !accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_STAT ) )
HTTPERROR_403_IP;
#endif
@ -195,22 +195,22 @@ static const ot_keywords keywords_format[] = @@ -195,22 +195,22 @@ static const ot_keywords keywords_format[] =
#ifdef WANT_FULLSCRAPE
if( mode == TASK_STATS_TPB ) {
struct http_data* h = io_getcookie( client_socket );
struct http_data* cookie = io_getcookie( sock );
tai6464 t;
#ifdef WANT_COMPRESSION_GZIP
ws->request[ws->request_size] = 0;
if( strstr( read_ptr - 1, "gzip" ) ) {
h->flag |= STRUCT_HTTP_FLAG_GZIP;
cookie->flag |= STRUCT_HTTP_FLAG_GZIP;
format |= TASK_FLAG_GZIP;
}
#endif
/* Pass this task to the worker thread */
h->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK;
cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK;
/* Clients waiting for us should not easily timeout */
taia_uint( &t, 0 ); io_timeout( client_socket, t );
fullscrape_deliver( client_socket, format );
io_dontwantread( client_socket );
taia_uint( &t, 0 ); io_timeout( sock, t );
fullscrape_deliver( sock, format );
io_dontwantread( sock );
return ws->reply_size = -2;
}
#endif
@ -219,8 +219,8 @@ static const ot_keywords keywords_format[] = @@ -219,8 +219,8 @@ static const ot_keywords keywords_format[] =
if( ( mode & TASK_CLASS_MASK ) == TASK_STATS ) {
tai6464 t;
/* Complex stats also include expensive memory debugging tools */
taia_uint( &t, 0 ); io_timeout( client_socket, t );
stats_deliver( client_socket, mode );
taia_uint( &t, 0 ); io_timeout( sock, t );
stats_deliver( sock, mode );
return ws->reply_size = -2;
}
@ -231,36 +231,36 @@ static const ot_keywords keywords_format[] = @@ -231,36 +231,36 @@ static const ot_keywords keywords_format[] =
}
#ifdef WANT_FULLSCRAPE
static ssize_t http_handle_fullscrape( const int64 client_socket, struct ot_workstruct *ws ) {
struct http_data* h = io_getcookie( client_socket );
static ssize_t http_handle_fullscrape( const int64 sock, struct ot_workstruct *ws ) {
struct http_data* cookie = io_getcookie( sock );
int format = 0;
tai6464 t;
#ifdef WANT_COMPRESSION_GZIP
ws->request[ws->request_size-1] = 0;
if( strstr( ws->request, "gzip" ) ) {
h->flag |= STRUCT_HTTP_FLAG_GZIP;
cookie->flag |= STRUCT_HTTP_FLAG_GZIP;
format = TASK_FLAG_GZIP;
stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)h->ip );
stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)cookie->ip );
} else
#endif
stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)h->ip );
stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip );
#ifdef _DEBUG_HTTPERROR
write( 2, ws->debugbuf, G_DEBUGBUF_SIZE );
#endif
/* Pass this task to the worker thread */
h->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK;
cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK;
/* Clients waiting for us should not easily timeout */
taia_uint( &t, 0 ); io_timeout( client_socket, t );
fullscrape_deliver( client_socket, TASK_FULLSCRAPE | format );
io_dontwantread( client_socket );
taia_uint( &t, 0 ); io_timeout( sock, t );
fullscrape_deliver( sock, TASK_FULLSCRAPE | format );
io_dontwantread( sock );
return ws->reply_size = -2;
}
#endif
static ssize_t http_handle_scrape( const int64 client_socket, struct ot_workstruct *ws, char *read_ptr ) {
static ssize_t http_handle_scrape( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) {
static const ot_keywords keywords_scrape[] = { { "info_hash", 1 }, { NULL, -3 } };
ot_hash * multiscrape_buf = (ot_hash*)ws->request;
@ -305,7 +305,7 @@ static ot_keywords keywords_announce[] = { { "port", 1 }, { "left", 2 }, { "even @@ -305,7 +305,7 @@ static ot_keywords keywords_announce[] = { { "port", 1 }, { "left", 2 }, { "even
#endif
{ NULL, -3 } };
static ot_keywords keywords_announce_event[] = { { "completed", 1 }, { "stopped", 2 }, { NULL, -3 } };
static ssize_t http_handle_announce( const int64 client_socket, struct ot_workstruct *ws, char *read_ptr ) {
static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) {
int numwant, tmp, scanon;
ot_peer peer;
ot_hash *hash = NULL;
@ -320,7 +320,7 @@ static ssize_t http_handle_announce( const int64 client_socket, struct ot_workst @@ -320,7 +320,7 @@ static ssize_t http_handle_announce( const int64 client_socket, struct ot_workst
++read_ptr;
}
OT_SETIP( &peer, ((struct http_data*)io_getcookie( client_socket ) )->ip );
OT_SETIP( &peer, ((struct http_data*)io_getcookie( sock ) )->ip );
OT_SETPORT( &peer, &port );
OT_PEERFLAG( &peer ) = 0;
numwant = 50;
@ -400,7 +400,7 @@ static ssize_t http_handle_announce( const int64 client_socket, struct ot_workst @@ -400,7 +400,7 @@ static ssize_t http_handle_announce( const int64 client_socket, struct ot_workst
return ws->reply_size;
}
ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws ) {
ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) {
ssize_t reply_off, len;
char *read_ptr = ws->request, *write_ptr;
@ -433,17 +433,17 @@ ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws @@ -433,17 +433,17 @@ ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws
/* This is the hardcore match for announce*/
if( ( *write_ptr == 'a' ) || ( *write_ptr == '?' ) )
http_handle_announce( client_socket, ws, read_ptr );
http_handle_announce( sock, ws, read_ptr );
#ifdef WANT_FULLSCRAPE
else if( !memcmp( write_ptr, "scrape HTTP/", 12 ) )
http_handle_fullscrape( client_socket, ws );
http_handle_fullscrape( sock, ws );
#endif
/* This is the hardcore match for scrape */
else if( !memcmp( write_ptr, "sc", 2 ) )
http_handle_scrape( client_socket, ws, read_ptr );
http_handle_scrape( sock, ws, read_ptr );
/* All the rest is matched the standard way */
else if( !memcmp( write_ptr, "stats", 5) )
http_handle_stats( client_socket, ws, read_ptr );
http_handle_stats( sock, ws, read_ptr );
else
HTTPERROR_404;
@ -469,7 +469,7 @@ ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws @@ -469,7 +469,7 @@ ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws
/* 3. Finally we join both blocks neatly */
ws->outbuf[ SUCCESS_HTTP_HEADER_LENGTH - 1 ] = '\n';
http_senddata( client_socket, ws );
http_senddata( sock, ws );
return ws->reply_size;
}

2
ot_livesync.h

@ -94,7 +94,7 @@ void livesync_tell( ot_hash const info_hash, const ot_peer * const peer ); @@ -94,7 +94,7 @@ void livesync_tell( ot_hash const info_hash, const ot_peer * const peer );
void livesync_ticker( );
/* Handle an incoming live sync packet */
void handle_livesync( const int64 serversocket );
void handle_livesync( const int64 sock );
#else

18
ot_mutex.c

@ -113,7 +113,7 @@ size_t mutex_get_torrent_count( ) { @@ -113,7 +113,7 @@ size_t mutex_get_torrent_count( ) {
struct ot_task {
ot_taskid taskid;
ot_tasktype tasktype;
int64 socket;
int64 sock;
int iovec_entries;
struct iovec *iovec;
struct ot_task *next;
@ -124,7 +124,7 @@ static struct ot_task *tasklist = NULL; @@ -124,7 +124,7 @@ static struct ot_task *tasklist = NULL;
static pthread_mutex_t tasklist_mutex;
static pthread_cond_t tasklist_being_filled;
int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) {
int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
struct ot_task ** tmptask, * task;
/* Want exclusive access to tasklist */
@ -148,7 +148,7 @@ int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) { @@ -148,7 +148,7 @@ int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) {
task->taskid = 0;
task->tasktype = tasktype;
task->socket = socket;
task->sock = sock;
task->iovec_entries = 0;
task->iovec = NULL;
task->next = 0;
@ -162,7 +162,7 @@ int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) { @@ -162,7 +162,7 @@ int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) {
return 0;
}
void mutex_workqueue_canceltask( int64 socket ) {
void mutex_workqueue_canceltask( int64 sock ) {
struct ot_task ** task;
/* Want exclusive access to tasklist */
@ -171,10 +171,10 @@ void mutex_workqueue_canceltask( int64 socket ) { @@ -171,10 +171,10 @@ void mutex_workqueue_canceltask( int64 socket ) {
MTX_DBG( "canceltask locked.\n" );
task = &tasklist;
while( *task && ( (*task)->socket != socket ) )
while( *task && ( (*task)->sock != sock ) )
*task = (*task)->next;
if( *task && ( (*task)->socket == socket ) ) {
if( *task && ( (*task)->sock == sock ) ) {
struct iovec *iovec = (*task)->iovec;
struct ot_task *ptask = *task;
int i;
@ -281,7 +281,7 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove @@ -281,7 +281,7 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove
int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) {
struct ot_task ** task;
int64 socket = -1;
int64 sock = -1;
/* Want exclusive access to tasklist */
MTX_DBG( "popresult locks.\n" );
@ -297,7 +297,7 @@ int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { @@ -297,7 +297,7 @@ int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) {
*iovec_entries = (*task)->iovec_entries;
*iovec = (*task)->iovec;
socket = (*task)->socket;
sock = (*task)->sock;
*task = (*task)->next;
free( ptask );
@ -307,7 +307,7 @@ int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { @@ -307,7 +307,7 @@ int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) {
MTX_DBG( "popresult unlocks.\n" );
pthread_mutex_unlock( &tasklist_mutex );
MTX_DBG( "popresult unlocked.\n" );
return socket;
return sock;
}
void mutex_init( ) {

4
ot_mutex.h

@ -59,8 +59,8 @@ typedef enum { @@ -59,8 +59,8 @@ typedef enum {
typedef unsigned long ot_taskid;
int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype );
void mutex_workqueue_canceltask( int64 socket );
int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype );
void mutex_workqueue_canceltask( int64 sock );
void mutex_workqueue_pushsuccess( ot_taskid taskid );
ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype );
int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector );

4
ot_stats.c

@ -611,8 +611,8 @@ static void * stats_worker( void * args ) { @@ -611,8 +611,8 @@ static void * stats_worker( void * args ) {
return NULL;
}
void stats_deliver( int64 socket, int tasktype ) {
mutex_workqueue_pushtask( socket, tasktype );
void stats_deliver( int64 sock, int tasktype ) {
mutex_workqueue_pushtask( sock, tasktype );
}
static pthread_t thread_id;

2
ot_stats.h

@ -33,7 +33,7 @@ enum { @@ -33,7 +33,7 @@ enum {
};
void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data );
void stats_deliver( int64 socket, int tasktype );
void stats_deliver( int64 sock, int tasktype );
size_t return_stats_for_tracker( char *reply, int mode, int format );
size_t stats_return_tracker_version( char *reply );
void stats_init( );

26
ot_udp.c

@ -44,15 +44,15 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { @@ -44,15 +44,15 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) {
uint32_t *outpacket = (uint32_t*)ws->outbuf;
uint32_t numwant, left, event, scopeid;
uint16_t port, remoteport;
size_t r, r_out;
size_t byte_count, scrape_count;
r = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid );
byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid );
stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip );
stats_issue_event( EVENT_READ, FLAG_UDP, r );
stats_issue_event( EVENT_READ, FLAG_UDP, byte_count );
/* Minimum udp tracker packet size, also catches error */
if( r < 16 )
if( byte_count < 16 )
return;
switch( ntohl( inpacket[2] ) ) {
@ -70,7 +70,7 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { @@ -70,7 +70,7 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) {
break;
case 1: /* This is an announce action */
/* Minimum udp announce packet size */
if( r < 98 )
if( byte_count < 98 )
return;
if( !udp_test_connectionid( inpacket, remoteip ))
@ -103,12 +103,12 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { @@ -103,12 +103,12 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) {
outpacket[1] = inpacket[12/4];
if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) /* Peer is gone. */
r = remove_peer_from_torrent( *hash, &peer, ws->outbuf, FLAG_UDP );
byte_count = remove_peer_from_torrent( *hash, &peer, ws->outbuf, FLAG_UDP );
else
r = 8 + add_peer_to_torrent_and_return_peers( *hash, &peer, FLAG_UDP, numwant, ((char*)outpacket) + 8 );
byte_count = 8 + add_peer_to_torrent_and_return_peers( *hash, &peer, FLAG_UDP, numwant, ((char*)outpacket) + 8 );
socket_send6( serversocket, ws->outbuf, r, remoteip, remoteport, 0 );
stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, r );
socket_send6( serversocket, ws->outbuf, byte_count, remoteip, remoteport, 0 );
stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, byte_count );
break;
case 2: /* This is a scrape action */
@ -118,11 +118,11 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { @@ -118,11 +118,11 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) {
outpacket[0] = htonl( 2 ); /* scrape action */
outpacket[1] = inpacket[12/4];
for( r_out = 0; ( r_out * 20 < r - 16) && ( r_out <= 74 ); r_out++ )
return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * r_out ), ((char*)outpacket) + 8 + 12 * r_out );
for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ )
return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count );
socket_send6( serversocket, ws->outbuf, 8 + 12 * r_out, remoteip, remoteport, 0 );
stats_issue_event( EVENT_SCRAPE, FLAG_UDP, r );
socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 );
stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count );
break;
}
}

2
ot_vector.c

@ -23,8 +23,6 @@ static int vector_compare_peer(const void *peer1, const void *peer2 ) { @@ -23,8 +23,6 @@ static int vector_compare_peer(const void *peer1, const void *peer2 ) {
/* This function gives us a binary search that returns a pointer, even if
no exact match is found. In that case it sets exactmatch 0 and gives
calling functions the chance to insert data
NOTE: Minimal compare_size is 4, member_size must be a multiple of 4
*/
void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size,
size_t compare_size, int *exactmatch ) {

Loading…
Cancel
Save