Browse Source

* opentracker now drops permissions in correct order and really chroots() if ran as root

* lock passing between add_peer_to_torrent and return_peers_for_torrent is now avoided by providing a more general add_peer_to_torrent_and_return_peers function that can be used with NULL parameters to not return any peers (in sync case)
* in order to keep a fast overview how many torrents opentracker maintains, every mutex_bucket_unlock operation expects an additional integer parameter that tells ot_mutex.c how many torrents have been added or removed. A function mutex_get_torrent_count has been introduced.
dynamic-accesslists
erdgeist 16 years ago
parent
commit
2df09905f5
  1. 58
      opentracker.c
  2. 4
      ot_clean.c
  3. 6
      ot_fullscrape.c
  4. 14
      ot_http.c
  5. 396
      ot_livesync.c
  6. 22
      ot_livesync.h
  7. 16
      ot_mutex.c
  8. 6
      ot_mutex.h
  9. 32
      ot_stats.c
  10. 14
      ot_udp.c
  11. 73
      trackerlogic.c
  12. 7
      trackerlogic.h

58
opentracker.c

@ -350,8 +350,47 @@ int parse_configfile( char * config_filename ) { @@ -350,8 +350,47 @@ int parse_configfile( char * config_filename ) {
return bound;
}
int main( int argc, char **argv ) {
int drop_privileges (const char * const serverdir) {
struct passwd *pws = NULL;
/* Grab pws entry before chrooting */
pws = getpwnam( "nobody" );
endpwent();
if( geteuid() == 0 ) {
/* Running as root: chroot and drop privileges */
if(chroot( serverdir )) {
fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) );
return -1;
}
if(chdir("/"))
panic("chdir() failed after chrooting: ");
if( !pws ) {
setegid( (gid_t)-2 ); setgid( (gid_t)-2 );
setuid( (uid_t)-2 ); seteuid( (uid_t)-2 );
}
else {
setegid( pws->pw_gid ); setgid( pws->pw_gid );
setuid( pws->pw_uid ); seteuid( pws->pw_uid );
}
if( geteuid() == 0 || getegid() == 0 )
panic("Still running with root privileges?!");
}
else {
/* Normal user, just chdir() */
if(chdir( serverdir )) {
fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) );
return -1;
}
}
return 0;
}
int main( int argc, char **argv ) {
char serverip[4] = {0,0,0,0}, tmpip[4];
int bound = 0, scanon = 1;
uint16_t tmpport;
@ -404,16 +443,8 @@ while( scanon ) { @@ -404,16 +443,8 @@ while( scanon ) {
ot_try_bind( serverip, 6969, FLAG_UDP );
}
/* Drop permissions */
pws = getpwnam( "nobody" );
if( !pws ) {
setegid( (gid_t)-2 ); setuid( (uid_t)-2 );
setgid( (gid_t)-2 ); seteuid( (uid_t)-2 );
} else {
setegid( pws->pw_gid ); setuid( pws->pw_uid );
setgid( pws->pw_gid ); seteuid( pws->pw_uid );
}
endpwent();
if( drop_privileges( g_serverdir ? g_serverdir : "." ) == -1 )
panic( "drop_privileges failed, exiting. Last error");
signal( SIGPIPE, SIG_IGN );
signal( SIGINT, signal_handler );
@ -421,9 +452,10 @@ while( scanon ) { @@ -421,9 +452,10 @@ while( scanon ) {
g_now_seconds = time( NULL );
if( trackerlogic_init( g_serverdir ? g_serverdir : "." ) == -1 )
panic( "Logic not started" );
/* Init all sub systems. This call may fail with an exit() */
trackerlogic_init( );
/* Kick off our initial clock setting alarm */
alarm(5);
server_mainloop( );

4
ot_clean.c

@ -105,15 +105,17 @@ static void * clean_worker( void * args ) { @@ -105,15 +105,17 @@ static void * clean_worker( void * args ) {
while( bucket-- ) {
ot_vector *torrents_list = mutex_bucket_lock( bucket );
size_t toffs;
int delta_torrentcount = 0;
for( toffs=0; toffs<torrents_list->size; ++toffs ) {
ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs;
if( clean_single_torrent( torrent ) ) {
vector_remove_torrent( torrents_list, torrent );
delta_torrentcount -= 1;
--toffs; continue;
}
}
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, delta_torrentcount );
if( !g_opentracker_running )
return NULL;
usleep( OT_CLEAN_SLEEP );

6
ot_fullscrape.c

@ -199,13 +199,13 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas @@ -199,13 +199,13 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas
/* Check if there still is enough buffer left */
while( r >= re )
if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_NO_FLUSH ) ) )
return mutex_bucket_unlock( bucket );
return mutex_bucket_unlock( bucket, 0 );
IF_COMPRESSION( r = compress_buffer; )
}
/* All torrents done: release lock on current bucket */
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, 0 );
/* Parent thread died? */
if( !g_opentracker_running )
@ -225,7 +225,7 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas @@ -225,7 +225,7 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas
while( r >= re )
if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_FINISH ) ) )
return mutex_bucket_unlock( bucket );
return mutex_bucket_unlock( bucket, 0 );
deflateEnd(&strm);
}
#endif

14
ot_http.c

@ -385,7 +385,6 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) { @@ -385,7 +385,6 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) {
char *c = data;
int numwant, tmp, scanon;
ot_peer peer;
ot_torrent *torrent;
ot_hash *hash = NULL;
unsigned short port = htons(6881);
ssize_t len;
@ -403,6 +402,10 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) { @@ -403,6 +402,10 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) {
numwant = 50;
scanon = 1;
#ifdef _DEBUG_PEERID
g_this_peerid_data = NULL;
#endif
while( scanon ) {
switch( scan_urlencoded_query( &c, data = c, SCAN_SEARCHPATH_PARAM ) ) {
case -2: scanon = 0; break; /* TERMINATOR */
@ -483,10 +486,11 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) { @@ -483,10 +486,11 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) {
if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED )
len = remove_peer_from_torrent( hash, &peer, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf, FLAG_TCP );
else {
torrent = add_peer_to_torrent( hash, &peer WANT_SYNC_PARAM( 0 ) );
if( !torrent || !( len = return_peers_for_torrent( torrent, numwant, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf, FLAG_TCP ) ) ) HTTPERROR_500;
}
else
len = add_peer_to_torrent_and_return_peers(hash, &peer, FLAG_TCP, numwant, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf);
if( !len ) HTTPERROR_500;
stats_issue_event( EVENT_ANNOUNCE, FLAG_TCP, len);
return len;
}

396
ot_livesync.c

@ -9,59 +9,109 @@ @@ -9,59 +9,109 @@
#include <string.h>
#include <pthread.h>
#include <unistd.h>
#include <stdlib.h>
/* Libowfat */
#include "socket.h"
#include "ndelay.h"
#include "byte.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_livesync.h"
#include "ot_accesslist.h"
#include "ot_stats.h"
#include "ot_mutex.h"
#ifdef WANT_SYNC_LIVE
char groupip_1[4] = { 224,0,23,42 };
char groupip_1[4] = { 224,0,23,5 };
#define LIVESYNC_BUFFINSIZE (256*256)
#define LIVESYNC_BUFFSIZE 1504
#define LIVESYNC_BUFFWATER (sizeof(ot_peer)+sizeof(ot_hash))
#define LIVESYNC_INCOMING_BUFFSIZE (256*256)
#define LIVESYNC_MAXDELAY 15
#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1504
#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash))
#ifdef WANT_SYNC_SCRAPE
#define LIVESYNC_OUTGOING_BUFFSIZE_SCRAPE 1504
#define LIVESYNC_OUTGOING_WATERMARK_SCRAPE (sizeof(ot_hash)+sizeof(uint64_t)+sizeof(uint32_t))
#define LIVESYNC_OUTGOING_MAXPACKETS_SCRAPE 100
#define LIVESYNC_FIRST_BEACON_DELAY (30*60) /* seconds */
#define LIVESYNC_BEACON_INTERVAL 60 /* seconds */
#define LIVESYNC_INQUIRE_THRESH 0.75
#endif /* WANT_SYNC_SCRAPE */
#define LIVESYNC_MAXDELAY 15 /* seconds */
enum { OT_SYNC_PEER
#ifdef WANT_SYNC_SCRAPE
, OT_SYNC_SCRAPE_BEACON, OT_SYNC_SCRAPE_INQUIRE, OT_SYNC_SCRAPE_TELL
#endif
};
/* Forward declaration */
static void * livesync_worker( void * args );
/* For outgoing packets */
static int64 g_livesync_socket_in = -1;
static int64 g_socket_in = -1;
/* For incoming packets */
static int64 g_livesync_socket_out = -1;
static int64 g_socket_out = -1;
static uint8_t g_inbuffer[LIVESYNC_INCOMING_BUFFSIZE];
static uint8_t g_peerbuffer_start[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
static uint8_t *g_peerbuffer_pos;
static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS;
static ot_time g_next_packet_time;
static uint8_t livesync_inbuffer[LIVESYNC_BUFFINSIZE];
static uint8_t livesync_outbuffer_start[ LIVESYNC_BUFFSIZE ];
static uint8_t *livesync_outbuffer_pos;
static uint8_t *livesync_outbuffer_highwater = livesync_outbuffer_start + LIVESYNC_BUFFSIZE - LIVESYNC_BUFFWATER;
static ot_time livesync_lastpacket_time;
#ifdef WANT_SYNC_SCRAPE
/* Live sync scrape buffers, states and timers */
static ot_time g_next_beacon_time;
static ot_time g_next_inquire_time;
static uint8_t g_scrapebuffer_start[LIVESYNC_OUTGOING_BUFFSIZE_SCRAPE];
static uint8_t *g_scrapebuffer_pos;
static uint8_t *g_scrapebuffer_highwater = g_scrapebuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_SCRAPE - LIVESYNC_OUTGOING_WATERMARK_SCRAPE;
static size_t g_inquire_remote_count;
static uint32_t g_inquire_remote_host;
static int g_inquire_inprogress;
static int g_inquire_bucket;
#endif /* WANT_SYNC_SCRAPE */
static pthread_t thread_id;
void livesync_init( ) {
if( g_livesync_socket_in == -1 )
if( g_socket_in == -1 )
exerr( "No socket address for live sync specified." );
livesync_outbuffer_pos = livesync_outbuffer_start;
memmove( livesync_outbuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) );
livesync_outbuffer_pos += sizeof( g_tracker_id );
livesync_lastpacket_time = g_now_seconds;
/* Prepare outgoing peers buffer */
g_peerbuffer_pos = g_peerbuffer_start;
memmove( g_peerbuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) );
uint32_pack_big( (char*)g_peerbuffer_pos + sizeof( g_tracker_id ), OT_SYNC_PEER);
g_peerbuffer_pos += sizeof( g_tracker_id ) + sizeof( uint32_t);
#ifdef WANT_SYNC_SCRAPE
/* Prepare outgoing scrape buffer */
g_scrapebuffer_pos = g_scrapebuffer_start;
memmove( g_scrapebuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) );
uint32_pack_big( (char*)g_scrapebuffer_pos + sizeof( g_tracker_id ), OT_SYNC_SCRAPE_TELL);
g_scrapebuffer_pos += sizeof( g_tracker_id ) + sizeof( uint32_t);
/* Wind up timers for inquires */
g_next_beacon_time = g_now_seconds + LIVESYNC_FIRST_BEACON_DELAY;
#endif /* WANT_SYNC_SCRAPE */
g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
pthread_create( &thread_id, NULL, livesync_worker, NULL );
}
void livesync_deinit() {
if( g_livesync_socket_in != -1 )
close( g_livesync_socket_in );
if( g_livesync_socket_out != -1 )
close( g_livesync_socket_out );
if( g_socket_in != -1 )
close( g_socket_in );
if( g_socket_out != -1 )
close( g_socket_out );
pthread_cancel( thread_id );
}
@ -69,104 +119,292 @@ void livesync_deinit() { @@ -69,104 +119,292 @@ void livesync_deinit() {
void livesync_bind_mcast( char *ip, uint16_t port) {
char tmpip[4] = {0,0,0,0};
if( g_livesync_socket_in != -1 )
if( g_socket_in != -1 )
exerr("Error: Livesync listen ip specified twice.");
if( ( g_livesync_socket_in = socket_udp4( )) < 0)
if( ( g_socket_in = socket_udp4( )) < 0)
exerr("Error: Cant create live sync incoming socket." );
ndelay_off(g_livesync_socket_in);
ndelay_off(g_socket_in);
if( socket_bind4_reuse( g_livesync_socket_in, tmpip, port ) == -1 )
if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 )
exerr("Error: Cant bind live sync incoming socket." );
if( socket_mcjoin4( g_livesync_socket_in, groupip_1, ip ) )
if( socket_mcjoin4( g_socket_in, groupip_1, ip ) )
exerr("Error: Cant make live sync incoming socket join mcast group.");
if( ( g_livesync_socket_out = socket_udp4()) < 0)
if( ( g_socket_out = socket_udp4()) < 0)
exerr("Error: Cant create live sync outgoing socket." );
if( socket_bind4_reuse( g_livesync_socket_out, ip, port ) == -1 )
if( socket_bind4_reuse( g_socket_out, ip, port ) == -1 )
exerr("Error: Cant bind live sync outgoing socket." );
socket_mcttl4(g_livesync_socket_out, 1);
socket_mcloop4(g_livesync_socket_out, 0);
socket_mcttl4(g_socket_out, 1);
socket_mcloop4(g_socket_out, 0);
}
static void livesync_issuepacket( ) {
socket_send4(g_livesync_socket_out, (char*)livesync_outbuffer_start, livesync_outbuffer_pos - livesync_outbuffer_start,
static void livesync_issue_peersync( ) {
socket_send4(g_socket_out, (char*)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start,
groupip_1, LIVESYNC_PORT);
livesync_outbuffer_pos = livesync_outbuffer_start + sizeof( g_tracker_id );
livesync_lastpacket_time = g_now_seconds;
g_peerbuffer_pos = g_peerbuffer_start + sizeof( g_tracker_id ) + sizeof( uint32_t );
g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
}
/* Inform live sync about whats going on. */
void livesync_tell( ot_hash * const info_hash, const ot_peer * const peer ) {
int i;
for(i=0;i<20;i+=4) WRITE32(livesync_outbuffer_pos,i,READ32(info_hash,i));
WRITE32(livesync_outbuffer_pos,20,READ32(peer,0));
WRITE32(livesync_outbuffer_pos,24,READ32(peer,4));
livesync_outbuffer_pos += 28;
if( livesync_outbuffer_pos >= livesync_outbuffer_highwater )
livesync_issuepacket();
static void livesync_handle_peersync( ssize_t datalen ) {
int off = sizeof( g_tracker_id ) + sizeof( uint32_t );
/* Now basic sanity checks have been done on the live sync packet
We might add more testing and logging. */
while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= datalen ) {
ot_peer *peer = (ot_peer*)(g_inbuffer + off + sizeof(ot_hash));
ot_hash *hash = (ot_hash*)(g_inbuffer + off);
if( !g_opentracker_running ) return;
if( OT_PEERFLAG(peer) & PEER_FLAG_STOPPED )
remove_peer_from_torrent(hash, peer, NULL, FLAG_MCA );
else
add_peer_to_torrent( hash, peer, FLAG_MCA );
off += sizeof( ot_hash ) + sizeof( ot_peer );
}
stats_issue_event(EVENT_SYNC, 0, datalen / ((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer )));
}
#ifdef WANT_SYNC_SCRAPE
void livesync_issue_beacon( ) {
size_t torrent_count = mutex_get_torrent_count();
uint8_t beacon[ sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof( uint64_t ) ];
memmove( beacon, &g_tracker_id, sizeof( g_tracker_id ) );
uint32_pack_big( (char*)beacon + sizeof( g_tracker_id ), OT_SYNC_SCRAPE_BEACON);
uint32_pack_big( (char*)beacon + sizeof( g_tracker_id ) + sizeof(uint32_t), (uint32_t)((uint64_t)(torrent_count)>>32) );
uint32_pack_big( (char*)beacon + sizeof( g_tracker_id ) + 2 * sizeof(uint32_t), (uint32_t)torrent_count );
socket_send4(g_socket_out, (char*)beacon, sizeof(beacon), groupip_1, LIVESYNC_PORT);
}
void livesync_handle_beacon( ssize_t datalen ) {
size_t torrent_count_local, torrent_count_remote;
if( datalen != sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof( uint64_t ) )
return;
torrent_count_local = mutex_get_torrent_count();
torrent_count_remote = (size_t)(((uint64_t)uint32_read_big((char*)g_inbuffer+sizeof( g_tracker_id ) + sizeof(uint32_t))) << 32);
torrent_count_remote |= (size_t)uint32_read_big((char*)g_inbuffer+sizeof( g_tracker_id ) + 2 * sizeof(uint32_t));
/* Empty tracker is useless */
if( !torrent_count_remote ) return;
if( ((double)torrent_count_local ) / ((double)torrent_count_remote) < LIVESYNC_INQUIRE_THRESH) {
if( !g_next_inquire_time ) {
g_next_inquire_time = g_now_seconds + 2 * LIVESYNC_BEACON_INTERVAL;
g_inquire_remote_count = 0;
}
if( torrent_count_remote > g_inquire_remote_count ) {
g_inquire_remote_count = torrent_count_remote;
memmove( &g_inquire_remote_host, g_inbuffer, sizeof( g_tracker_id ) );
}
}
}
void livesync_issue_inquire( ) {
uint8_t inquire[ sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof(g_tracker_id)];
memmove( inquire, &g_tracker_id, sizeof( g_tracker_id ) );
uint32_pack_big( (char*)inquire + sizeof( g_tracker_id ), OT_SYNC_SCRAPE_INQUIRE);
memmove( inquire + sizeof(g_tracker_id) + sizeof(uint32_t), &g_inquire_remote_host, sizeof( g_tracker_id ) );
socket_send4(g_socket_out, (char*)inquire, sizeof(inquire), groupip_1, LIVESYNC_PORT);
}
void livesync_handle_inquire( ssize_t datalen ) {
if( datalen != sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof(g_tracker_id) )
return;
/* If it isn't us, they're inquiring, ignore inquiry */
if( memcmp( &g_tracker_id, g_inbuffer, sizeof( g_tracker_id ) ) )
return;
/* Start scrape tell on next ticker */
if( !g_inquire_inprogress ) {
g_inquire_inprogress = 1;
g_inquire_bucket = 0;
}
}
void livesync_issue_tell( ) {
int packets_to_send = LIVESYNC_OUTGOING_MAXPACKETS_SCRAPE;
while( packets_to_send > 0 && g_inquire_bucket < OT_BUCKET_COUNT ) {
ot_vector *torrents_list = mutex_bucket_lock( g_inquire_bucket );
unsigned int j;
for( j=0; j<torrents_list->size; ++j ) {
ot_torrent *torrent = (ot_torrent*)(torrents_list->data) + j;
memmove(g_scrapebuffer_pos, torrent->hash, sizeof(ot_hash));
g_scrapebuffer_pos += sizeof(ot_hash);
uint32_pack_big( (char*)g_scrapebuffer_pos , (uint32_t)(g_now_minutes - torrent->peer_list->base ));
uint32_pack_big( (char*)g_scrapebuffer_pos + 4, (uint32_t)((uint64_t)(torrent->peer_list->down_count)>>32) );
uint32_pack_big( (char*)g_scrapebuffer_pos + 8, (uint32_t)torrent->peer_list->down_count );
g_scrapebuffer_pos += 12;
if( g_scrapebuffer_pos >= g_scrapebuffer_highwater ) {
socket_send4(g_socket_out, (char*)g_scrapebuffer_start, g_scrapebuffer_pos - g_scrapebuffer_start, groupip_1, LIVESYNC_PORT);
g_scrapebuffer_pos = g_scrapebuffer_start + sizeof( g_tracker_id ) + sizeof( uint32_t);
--packets_to_send;
}
}
mutex_bucket_unlock( g_inquire_bucket++, 0 );
if( !g_opentracker_running )
return;
}
if( g_inquire_bucket == OT_BUCKET_COUNT ) {
socket_send4(g_socket_out, (char*)g_scrapebuffer_start, g_scrapebuffer_pos - g_scrapebuffer_start, groupip_1, LIVESYNC_PORT);
g_inquire_inprogress = 0;
}
}
void livesync_handle_tell( ssize_t datalen ) {
int off = sizeof( g_tracker_id ) + sizeof( uint32_t );
/* Some instance is in progress of telling. Our inquiry was successful.
Don't ask again until we see next beacon. */
g_next_inquire_time = 0;
/* Don't cause any new inquiries during another tracker's tell */
if( g_next_beacon_time - g_now_seconds < LIVESYNC_BEACON_INTERVAL )
g_next_beacon_time = g_now_seconds + LIVESYNC_BEACON_INTERVAL;
while( off + sizeof(ot_hash) + 12 <= (size_t)datalen ) {
ot_hash *hash = (ot_hash*)(g_inbuffer+off);
ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
size_t down_count_remote;
int exactmatch;
ot_torrent * torrent = vector_find_or_insert(torrents_list, hash, sizeof(ot_hash), OT_HASH_COMPARE_SIZE, &exactmatch);
if( !torrent ) {
mutex_bucket_unlock_by_hash( hash, 0 );
continue;
}
if( !exactmatch ) {
/* Create a new torrent entry, then */
int i; for(i=0;i<20;i+=4) WRITE32(&torrent->hash,i,READ32(hash,i));
if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) {
vector_remove_torrent( torrents_list, torrent );
mutex_bucket_unlock_by_hash( hash, 0 );
continue;
}
byte_zero( torrent->peer_list, sizeof( ot_peerlist ) );
torrent->peer_list->base = g_now_minutes - uint32_read_big((char*)g_inbuffer+off+sizeof(ot_hash));
}
down_count_remote = (size_t)(((uint64_t)uint32_read_big((char*)g_inbuffer+off+sizeof( ot_hash ) + sizeof(uint32_t))) << 32);
down_count_remote |= (size_t) uint32_read_big((char*)g_inbuffer+off+sizeof( ot_hash ) + 2 * sizeof(uint32_t));
if( down_count_remote > torrent->peer_list->down_count )
torrent->peer_list->down_count = down_count_remote;
/* else
We might think of sending a tell packet, if we have a much larger downloaded count
*/
mutex_bucket_unlock( g_inquire_bucket++, exactmatch?0:1 );
if( !g_opentracker_running )
return;
off += sizeof(ot_hash) + 12;
}
}
#endif /* WANT_SYNC_SCRAPE */
/* Tickle the live sync module from time to time, so no events get
stuck when there's not enough traffic to fill udp packets fast
enough */
void livesync_ticker( ) {
if( ( g_now_seconds - livesync_lastpacket_time > LIVESYNC_MAXDELAY) &&
( livesync_outbuffer_pos > livesync_outbuffer_start + sizeof( g_tracker_id ) ) )
livesync_issuepacket();
/* livesync_issue_peersync sets g_next_packet_time */
if( g_now_seconds > g_next_packet_time &&
g_peerbuffer_pos > g_peerbuffer_start + sizeof( g_tracker_id ) )
livesync_issue_peersync();
#ifdef WANT_SYNC_SCRAPE
/* Send first beacon after running at least LIVESYNC_FIRST_BEACON_DELAY
seconds and not more often than every LIVESYNC_BEACON_INTERVAL seconds */
if( g_now_seconds > g_next_beacon_time ) {
livesync_issue_beacon( );
g_next_beacon_time = g_now_seconds + LIVESYNC_BEACON_INTERVAL;
}
/* If we're interested in an inquiry and waited long enough to see all
tracker's beacons, go ahead and inquire */
if( g_next_inquire_time && g_now_seconds > g_next_inquire_time ) {
livesync_issue_inquire();
/* If packet gets lost, ask again after LIVESYNC_BEACON_INTERVAL */
g_next_inquire_time = g_now_seconds + LIVESYNC_BEACON_INTERVAL;
}
/* If we're in process of telling, let's tell. */
if( g_inquire_inprogress )
livesync_issue_tell( );
#endif /* WANT_SYNC_SCRAPE */
}
/* Inform live sync about whats going on. */
void livesync_tell( ot_hash * const info_hash, const ot_peer * const peer ) {
unsigned int i;
for(i=0;i<sizeof(ot_hash)/4;i+=4) WRITE32(g_peerbuffer_pos,i,READ32(info_hash,i));
WRITE32(g_peerbuffer_pos,sizeof(ot_hash) ,READ32(peer,0));
WRITE32(g_peerbuffer_pos,sizeof(ot_hash)+4,READ32(peer,4));
g_peerbuffer_pos += sizeof(ot_hash)+8;
if( g_peerbuffer_pos >= g_peerbuffer_highwater )
livesync_issue_peersync();
}
static void * livesync_worker( void * args ) {
uint8_t in_ip[4]; uint16_t in_port;
ssize_t datalen;
int off;
args = args;
(void)args;
while( 1 ) {
datalen = socket_recv4(g_livesync_socket_in, (char*)livesync_inbuffer, LIVESYNC_BUFFINSIZE, (char*)in_ip, &in_port);
off = 4;
datalen = socket_recv4(g_socket_in, (char*)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, (char*)in_ip, &in_port);
if( datalen <= 0 )
/* Expect at least tracker id and packet type */
if( datalen <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) )
continue;
if( datalen < (ssize_t)(sizeof( g_tracker_id ) + sizeof( ot_hash ) + sizeof( ot_peer ) ) ) {
/* TODO: log invalid sync packet */
if( !accesslist_isblessed((char*)in_ip, OT_PERMISSION_MAY_LIVESYNC))
continue;
}
if( !accesslist_isblessed((char*)in_ip, OT_PERMISSION_MAY_LIVESYNC)) {
/* TODO: log invalid sync packet */
continue;
}
if( !memcmp( livesync_inbuffer, &g_tracker_id, sizeof( g_tracker_id ) ) ) {
if( !memcmp( g_inbuffer, &g_tracker_id, sizeof( g_tracker_id ) ) ) {
/* TODO: log packet coming from ourselves */
continue;
}
/* Now basic sanity checks have been done on the live sync packet
We might add more testing and logging. */
while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= datalen ) {
ot_peer *peer = (ot_peer*)(livesync_inbuffer + off + sizeof(ot_hash));
ot_hash *hash = (ot_hash*)(livesync_inbuffer + off);
if( !g_opentracker_running )
return NULL;
if( OT_PEERFLAG(peer) & PEER_FLAG_STOPPED )
remove_peer_from_torrent(hash, peer, NULL, FLAG_MCA);
else
add_peer_to_torrent( hash, peer WANT_SYNC_PARAM(1));
off += sizeof( ot_hash ) + sizeof( ot_peer );
switch( uint32_read_big( (char*)g_inbuffer ) ) {
case OT_SYNC_PEER:
livesync_handle_peersync( datalen );
break;
#ifdef WANT_SYNC_SCRAPE
case OT_SYNC_SCRAPE_BEACON:
livesync_handle_beacon( datalen );
break;
case OT_SYNC_SCRAPE_INQUIRE:
livesync_handle_inquire( datalen );
break;
case OT_SYNC_SCRAPE_TELL:
livesync_handle_tell( datalen );
break;
#endif /* WANT_SYNC_SCRAPE */
default:
break;
}
stats_issue_event(EVENT_SYNC, 0, datalen / ((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer )));
/* Handle outstanding requests */
livesync_ticker( );
}
/* Never returns. */
return NULL;
}

22
ot_livesync.h

@ -39,24 +39,21 @@ @@ -39,24 +39,21 @@
######## SCRAPE SYNC PROTOCOL ########
########
Each tracker instance SHOULD broadcast a beacon once in every 5 minutes after
running at least 30 minutes:
Each tracker instance SHOULD broadcast a beacon every LIVESYNC_BEACON_INTERVAL
seconds after running at least LIVESYNC_FIRST_BEACON_DELAY seconds:
packet type SYNC_SCRAPE_BEACON
[ 0x0008 0x08 amount of torrents served
]
If a tracker instance receives a beacon from another instance that has more than
twice its torrent count, it asks for a scrape. It must wait for at least 5 + 1
minutes in order to inspect beacons from all tracker instances and chose the one
with most torrents.
its torrent count plus a threshold, it inquires for a scrape. It must wait for at
least 2 * LIVESYNC_BEACON_INTERVAL seconds in order to inspect beacons from all
tracker instances and inquire only the one with most torrents.
If it sees a SYNC_SCRAPE_TELL within that time frame, it's likely, that another
scrape sync is going on. So one tracker instance MUST NOT react to beacons within
5 minutes of last seeing a SYNC_SCRAPE_TELL packet. After a scrape sync all
tracker instances have updated their torrents, so an instance in a "want inquire"
state should wait for the next round of beacons to chose the tracker with most
data again.
scrape sync is going on. It should reset its state to needs no inquiry. It should
be reenabled on the next beacon, if still needed.
packet type SYNC_SCRAPE_INQUIRE
[ 0x0008 0x04 id of tracker instance to inquire
@ -71,9 +68,10 @@ @@ -71,9 +68,10 @@
0x0020 0x08 downloaded count
]*
Each tracker instance that receives a scrape tell, looks up each torrent and
Each tracker instance that receives a SYNC_SCRAPE_TELL, looks up each torrent and
compares downloaded count with its own counter. It can send out its own scrape
tell packets, if it knows more.
tell packets, if it knows more. However to not interrupt a scrape tell, a tracker
should wait LIVESYNC_BEACON_INTERVAL after receiving a scrape tell.
*/

16
ot_mutex.c

@ -24,6 +24,7 @@ @@ -24,6 +24,7 @@
/* Our global all torrents list */
static ot_vector all_torrents[OT_BUCKET_COUNT];
static size_t g_torrent_count;
/* Bucket Magic */
static int bucket_locklist[ OT_MAX_THREADS ];
@ -87,15 +88,24 @@ ot_vector *mutex_bucket_lock_by_hash( ot_hash *hash ) { @@ -87,15 +88,24 @@ ot_vector *mutex_bucket_lock_by_hash( ot_hash *hash ) {
return all_torrents + bucket;
}
void mutex_bucket_unlock( int bucket ) {
void mutex_bucket_unlock( int bucket, int delta_torrentcount ) {
pthread_mutex_lock( &bucket_mutex );
bucket_remove( bucket );
g_torrent_count += delta_torrentcount;
pthread_cond_broadcast( &bucket_being_unlocked );
pthread_mutex_unlock( &bucket_mutex );
}
void mutex_bucket_unlock_by_hash( ot_hash *hash ) {
mutex_bucket_unlock( uint32_read_big( (char*)*hash ) >> OT_BUCKET_COUNT_SHIFT );
void mutex_bucket_unlock_by_hash( ot_hash *hash, int delta_torrentcount ) {
mutex_bucket_unlock( uint32_read_big( (char*)*hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount );
}
size_t mutex_get_torrent_count( ) {
size_t torrent_count;
pthread_mutex_lock( &bucket_mutex );
torrent_count = g_torrent_count;
pthread_mutex_unlock( &bucket_mutex );
return torrent_count;
}
/* TaskQueue Magic */

6
ot_mutex.h

@ -14,8 +14,10 @@ void mutex_deinit( ); @@ -14,8 +14,10 @@ void mutex_deinit( );
ot_vector *mutex_bucket_lock( int bucket );
ot_vector *mutex_bucket_lock_by_hash( ot_hash *hash );
void mutex_bucket_unlock( int bucket );
void mutex_bucket_unlock_by_hash( ot_hash *hash );
void mutex_bucket_unlock( int bucket, int delta_torrentcount );
void mutex_bucket_unlock_by_hash( ot_hash *hash, int delta_torrentcount );
size_t mutex_get_torrent_count();
typedef enum {
TASK_STATS_CONNS = 0x0001,

32
ot_stats.c

@ -187,7 +187,7 @@ size_t stats_top10_txt( char * reply ) { @@ -187,7 +187,7 @@ size_t stats_top10_txt( char * reply ) {
top10s[idx].torrent = (ot_torrent*)(torrents_list->data) + j;
}
}
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, 0 );
if( !g_opentracker_running )
return 0;
}
@ -241,7 +241,7 @@ static size_t stats_slash24s_txt( char * reply, size_t amount, uint32_t thresh ) @@ -241,7 +241,7 @@ static size_t stats_slash24s_txt( char * reply, size_t amount, uint32_t thresh )
if( !count ) {
count = malloc( sizeof(uint32_t) * NUM_S24S );
if( !count ) {
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, 0 );
goto bailout_cleanup;
}
byte_zero( count, sizeof( uint32_t ) * NUM_S24S );
@ -251,7 +251,7 @@ static size_t stats_slash24s_txt( char * reply, size_t amount, uint32_t thresh ) @@ -251,7 +251,7 @@ static size_t stats_slash24s_txt( char * reply, size_t amount, uint32_t thresh )
}
}
}
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, 0 );
if( !g_opentracker_running )
goto bailout_cleanup;
}
@ -384,7 +384,7 @@ static size_t stats_peers_mrtg( char * reply ) { @@ -384,7 +384,7 @@ static size_t stats_peers_mrtg( char * reply ) {
ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list;
peer_count += peer_list->peer_count; seed_count += peer_list->seed_count;
}
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, 0 );
if( !g_opentracker_running )
return 0;
}
@ -397,17 +397,7 @@ static size_t stats_peers_mrtg( char * reply ) { @@ -397,17 +397,7 @@ static size_t stats_peers_mrtg( char * reply ) {
static size_t stats_startstop_mrtg( char * reply )
{
size_t torrent_count = 0;
int bucket;
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket )
{
ot_vector *torrents_list = mutex_bucket_lock( bucket );
torrent_count += torrents_list->size;
mutex_bucket_unlock( bucket );
if( !g_opentracker_running )
return 0;
}
size_t torrent_count = mutex_get_torrent_count();
return sprintf( reply, "%zd\n%zd\nopentracker handling %zd torrents\nopentracker",
(size_t)0,
@ -429,7 +419,7 @@ static size_t stats_toraddrem_mrtg( char * reply ) @@ -429,7 +419,7 @@ static size_t stats_toraddrem_mrtg( char * reply )
ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list;
peer_count += peer_list->peer_count;
}
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, 0 );
if( !g_opentracker_running )
return 0;
}
@ -443,15 +433,7 @@ static size_t stats_toraddrem_mrtg( char * reply ) @@ -443,15 +433,7 @@ static size_t stats_toraddrem_mrtg( char * reply )
static size_t stats_torrents_mrtg( char * reply )
{
size_t torrent_count = 0;
int bucket;
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket )
{
ot_vector *torrents_list = mutex_bucket_lock( bucket );
torrent_count += torrents_list->size;
mutex_bucket_unlock( bucket );
}
size_t torrent_count = mutex_get_torrent_count();
return sprintf( reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker",
torrent_count,

14
ot_udp.c

@ -41,7 +41,6 @@ static int udp_test_connectionid( const uint32_t * const connid, const char * re @@ -41,7 +41,6 @@ static int udp_test_connectionid( const uint32_t * const connid, const char * re
/* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */
void handle_udp4( int64 serversocket ) {
ot_peer peer;
ot_torrent *torrent;
ot_hash *hash = NULL;
char remoteip[4];
uint32_t *inpacket = (uint32_t*)static_inbuf;
@ -110,13 +109,8 @@ void handle_udp4( int64 serversocket ) { @@ -110,13 +109,8 @@ void handle_udp4( int64 serversocket ) {
if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) /* Peer is gone. */
r = remove_peer_from_torrent( hash, &peer, static_outbuf, FLAG_UDP );
else {
torrent = add_peer_to_torrent( hash, &peer WANT_SYNC_PARAM( 0 ) );
if( !torrent )
return; /* XXX maybe send error */
r = 8 + return_peers_for_torrent( torrent, numwant, static_outbuf + 8, FLAG_UDP );
}
else
r = 8 + add_peer_to_torrent_and_return_peers( hash, &peer, FLAG_UDP, numwant, static_outbuf + 8 );
socket_send4( serversocket, static_outbuf, r, remoteip, remoteport );
stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, r );
@ -138,4 +132,8 @@ void handle_udp4( int64 serversocket ) { @@ -138,4 +132,8 @@ void handle_udp4( int64 serversocket ) {
}
}
void udp_init( ) {
}
const char *g_version_udp_c = "$Source$: $Revision$\n";

73
trackerlogic.c

@ -25,6 +25,9 @@ @@ -25,6 +25,9 @@
#include "ot_fullscrape.h"
#include "ot_livesync.h"
/* Forward declaration */
size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto );
void free_peerlist( ot_peerlist *peer_list ) {
if( peer_list->peers.data ) {
if( OT_PEERLIST_HASBUCKETS( peer_list ) ) {
@ -43,21 +46,22 @@ extern size_t g_this_peerid_len; @@ -43,21 +46,22 @@ extern size_t g_this_peerid_len;
extern char *g_this_peerid_data;
#endif
ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( int from_sync ) ) {
int exactmatch;
size_t add_peer_to_torrent_and_return_peers( ot_hash *hash, ot_peer *peer, PROTO_FLAG proto, size_t amount, char * reply ) {
int exactmatch, delta_torrentcount = 0;
size_t reply_size;
ot_torrent *torrent;
ot_peer *peer_dest;
ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash );
if( !accesslist_hashisvalid( hash ) ) {
mutex_bucket_unlock_by_hash( hash );
return NULL;
mutex_bucket_unlock_by_hash( hash, 0 );
return 0;
}
torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
if( !torrent ) {
mutex_bucket_unlock_by_hash( hash );
return NULL;
mutex_bucket_unlock_by_hash( hash, 0 );
return 0;
}
if( !exactmatch ) {
@ -66,11 +70,12 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( @@ -66,11 +70,12 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM(
if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) {
vector_remove_torrent( torrents_list, torrent );
mutex_bucket_unlock_by_hash( hash );
return NULL;
mutex_bucket_unlock_by_hash( hash, 0 );
return 0;
}
byte_zero( torrent->peer_list, sizeof( ot_peerlist ) );
delta_torrentcount = 1;
} else
clean_single_torrent( torrent );
@ -79,8 +84,8 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( @@ -79,8 +84,8 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM(
/* Check for peer in torrent */
peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch );
if( !peer_dest ) {
mutex_bucket_unlock_by_hash( hash );
return NULL;
mutex_bucket_unlock_by_hash( hash, delta_torrentcount );
return 0;
}
/* Tell peer that it's fresh */
@ -94,7 +99,7 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( @@ -94,7 +99,7 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM(
if( !exactmatch ) {
#ifdef WANT_SYNC_LIVE
if( !from_sync )
if( proto == FLAG_MCA )
livesync_tell( hash, peer );
else
OT_PEERFLAG( peer ) |= PEER_FLAG_FROM_SYNC;
@ -122,7 +127,7 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( @@ -122,7 +127,7 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM(
#ifdef WANT_SYNC_LIVE
/* Won't live sync peers that come back too fast. Only exception:
fresh "completed" reports */
if( !from_sync ) {
if( proto != FLAG_MCA ) {
if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(peer) & PEER_FLAG_COMPLETED ) ) )
livesync_tell( hash, peer );
@ -141,14 +146,15 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( @@ -141,14 +146,15 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM(
*(uint64_t*)(peer_dest) = *(uint64_t*)(peer);
#ifdef WANT_SYNC
/* In order to avoid an unlock/lock between add_peers and return_peers,
we only unlock the bucket if return_peers won't do the job: either
if we return NULL or if no reply is expected, i.e. when called
from livesync code. */
if( from_sync )
mutex_bucket_unlock_by_hash( hash );
if( proto == FLAG_MCA ) {
mutex_bucket_unlock_by_hash( hash, delta_torrentcount );
return 0;
}
#endif
return torrent;
reply_size = return_peers_for_torrent( torrent, amount, reply, proto );
mutex_bucket_unlock_by_hash( &torrent->hash, delta_torrentcount );
return reply_size;
}
static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) {
@ -220,9 +226,6 @@ static size_t return_peers_selection( ot_peerlist *peer_list, size_t amount, cha @@ -220,9 +226,6 @@ static size_t return_peers_selection( ot_peerlist *peer_list, size_t amount, cha
/* Compiles a list of random peers for a torrent
* reply must have enough space to hold 92+6*amount bytes
* does not yet check not to return self
* the bucket, torrent resides in has been locked by the
add_peer call, the ot_torrent * was gathered from, so we
have to unlock it here.
*/
size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) {
ot_peerlist *peer_list = torrent->peer_list;
@ -251,13 +254,12 @@ size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply @@ -251,13 +254,12 @@ size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply
if( proto == FLAG_TCP )
*r++ = 'e';
mutex_bucket_unlock_by_hash( &torrent->hash );
return r - reply;
}
/* Fetches scrape info for a specific torrent */
size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) {
int exactmatch;
int exactmatch, delta_torrentcount = 0;
ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash );
ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
@ -269,13 +271,14 @@ size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) { @@ -269,13 +271,14 @@ size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) {
if( clean_single_torrent( torrent ) ) {
vector_remove_torrent( torrents_list, torrent );
memset( reply, 0, 12);
delta_torrentcount = -1;
} else {
r[0] = htonl( torrent->peer_list->seed_count );
r[1] = htonl( torrent->peer_list->down_count );
r[2] = htonl( torrent->peer_list->peer_count-torrent->peer_list->seed_count );
}
}
mutex_bucket_unlock_by_hash( hash );
mutex_bucket_unlock_by_hash( hash, 0 );
return 12;
}
@ -283,6 +286,7 @@ size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) { @@ -283,6 +286,7 @@ size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) {
size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *reply ) {
char *r = reply;
int exactmatch, i;
int delta_torrentcount = 0;
r += sprintf( r, "d5:filesd" );
@ -294,6 +298,7 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl @@ -294,6 +298,7 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl
if( exactmatch ) {
if( clean_single_torrent( torrent ) ) {
vector_remove_torrent( torrents_list, torrent );
delta_torrentcount = -1;
} else {
int j;
*r++='2';*r++='0';*r++=':';
@ -302,7 +307,7 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl @@ -302,7 +307,7 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl
torrent->peer_list->seed_count, torrent->peer_list->down_count, torrent->peer_list->peer_count-torrent->peer_list->seed_count );
}
}
mutex_bucket_unlock_by_hash( hash );
mutex_bucket_unlock_by_hash( hash, delta_torrentcount );
}
*r++ = 'e'; *r++ = 'e';
@ -346,7 +351,7 @@ size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROT @@ -346,7 +351,7 @@ size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROT
reply_size = 20;
}
mutex_bucket_unlock_by_hash( hash );
mutex_bucket_unlock_by_hash( hash, 0 );
return reply_size;
}
@ -355,12 +360,7 @@ void exerr( char * message ) { @@ -355,12 +360,7 @@ void exerr( char * message ) {
exit( 111 );
}
int trackerlogic_init( const char * const serverdir ) {
if( serverdir && chdir( serverdir ) ) {
fprintf( stderr, "Could not chdir() to %s, because %s\n", serverdir, strerror(errno) );
return -1;
}
void trackerlogic_init( ) {
srandom( time(NULL) );
g_tracker_id = random();
@ -371,12 +371,10 @@ int trackerlogic_init( const char * const serverdir ) { @@ -371,12 +371,10 @@ int trackerlogic_init( const char * const serverdir ) {
accesslist_init( );
livesync_init( );
stats_init( );
return 0;
}
void trackerlogic_deinit( void ) {
int bucket;
int bucket, delta_torrentcount = 0;
size_t j;
/* Free all torrents... */
@ -386,10 +384,11 @@ void trackerlogic_deinit( void ) { @@ -386,10 +384,11 @@ void trackerlogic_deinit( void ) {
for( j=0; j<torrents_list->size; ++j ) {
ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j;
free_peerlist( torrent->peer_list );
delta_torrentcount -= 1;
}
free( torrents_list->data );
}
mutex_bucket_unlock( bucket );
mutex_bucket_unlock( bucket, delta_torrentcount );
}
/* Deinitialise background worker threads */

7
trackerlogic.h

@ -73,7 +73,6 @@ static const uint8_t PEER_FLAG_LEECHING = 0x00; @@ -73,7 +73,6 @@ static const uint8_t PEER_FLAG_LEECHING = 0x00;
#define OT_PEERFLAG(peer) (((uint8_t*)(peer))[6])
#define OT_PEERTIME(peer) (((uint8_t*)(peer))[7])
#define OT_PEER_COMPARE_SIZE ((size_t)6)
#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash))
struct ot_peerlist;
@ -111,14 +110,14 @@ struct ot_peerlist { @@ -111,14 +110,14 @@ struct ot_peerlist {
#define WANT_SYNC_PARAM( param )
#endif
int trackerlogic_init( const char * const serverdir );
void trackerlogic_init( );
void trackerlogic_deinit( void );
void exerr( char * message );
/* add_peer_to_torrent does only release the torrent bucket if from_sync is set,
otherwise it is released in return_peers_for_torrent */
size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto );
ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( int from_sync ) );
#define add_peer_to_torrent(hash,peer,proto) add_peer_to_torrent_and_return_peers(hash,peer,proto,0,NULL)
size_t add_peer_to_torrent_and_return_peers( ot_hash *hash, ot_peer *peer, PROTO_FLAG proto, size_t amount, char * reply );
size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROTO_FLAG proto );
size_t return_tcp_scrape_for_torrent( ot_hash *hash, int amount, char *reply );
size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply );

Loading…
Cancel
Save