\d+)')
- for match in section_re.finditer(dat):
- txt = match.group(0)
- m = torrent_re.search(txt)
- if m:
- torrent_infos = m.groupdict()
- torrent_infos['name'] = re.sub('?font.*?>', '', torrent_infos['name'])
- torrent_infos['engine_url'] = self.url
- torrent_infos['size'] = anySizeToBytes(torrent_infos['size'])
- torrent_infos['link'] = self.url+torrent_infos['link']
- prettyPrinter(torrent_infos)
-
-class MegaNova(object):
- url = 'http://www.meganova.org'
-
- def search(self, what):
- dat = urllib.urlopen(self.url+'/find/%s/4/1.html'%what).read().decode('utf8', 'replace')
- print 'url is ' + self.url+'/find/%s/4/1.html'%what
- # I know it's not very readable, but the SGML parser feels in pain
-
- section_re = re.compile('(?s)/torrent/.*?)".*?'
- '(?P.*?).*?'
- '>(?P[0-9.]+\s+.B).*?'
- '>(?P\d+)<.*?'
- '>(?P\d+)<')
-
- for match in section_re.finditer(dat):
- txt = match.group(0)
- m = torrent_re.search(txt)
- if m:
- torrent_infos = m.groupdict()
- torrent_infos['engine_url'] = self.url
- torrent_infos['size'] = anySizeToBytes(torrent_infos['size'])
- torrent_infos['link'] = self.url+torrent_infos['link']
- prettyPrinter(torrent_infos)
-
-class Reactor(object):
- url = 'http://www.torrentreactor.net'
-
- class SimpleSGMLParser(sgmllib.SGMLParser):
- def __init__(self, results, url, *args):
- sgmllib.SGMLParser.__init__(self)
- self.td_counter = None
- self.current_item = None
- self.results = results
- self.id = None
- self.url = url
-
- def start_a(self, attr):
- params = dict(attr)
- if params['href'].startswith('http://dl.torrentreactor.net/download.php'):
- self.current_item = {}
- self.td_counter = 0
- equal = params['href'].find("=")
- amp = params['href'].find("&", equal+1)
- self.id = str(int(params['href'][equal+1:amp]))
-
- def handle_data(self, data):
- if self.td_counter == 0:
- if not self.current_item.has_key('name'):
- self.current_item['name'] = ''
- self.current_item['name']+= data.strip()
- if self.td_counter == 1:
- if not self.current_item.has_key('size'):
- self.current_item['size'] = ''
- self.current_item['size']+= data.strip()
- elif self.td_counter == 2:
- if not self.current_item.has_key('seeds'):
- self.current_item['seeds'] = ''
- self.current_item['seeds']+= data.strip()
- elif self.td_counter == 3:
- if not self.current_item.has_key('leech'):
- self.current_item['leech'] = ''
- self.current_item['leech']+= data.strip()
-
- def start_td(self,attr):
- if isinstance(self.td_counter,int):
- self.td_counter += 1
- if self.td_counter > 7:
- self.td_counter = None
- # add item to results
- if self.current_item:
- self.current_item['link']='http://download.torrentreactor.net/download.php?id=%s&name=%s'%(self.id, urllib.quote(self.current_item['name']))
- self.current_item['engine_url'] = self.url
- self.current_item['size']= anySizeToBytes(self.current_item['size'])
- if not self.current_item['seeds'].isdigit():
- self.current_item['seeds'] = 0
- if not self.current_item['leech'].isdigit():
- self.current_item['leech'] = 0
- prettyPrinter(self.current_item)
- self.has_results = True
- self.results.append('a')
-
- def __init__(self):
- self.results = []
- self.parser = self.SimpleSGMLParser(self.results, self.url)
-
- def search(self, what):
- i = 0
- while True:
- results = []
- parser = self.SimpleSGMLParser(results, self.url)
- dat = urllib.urlopen(self.url+'/search.php?search=&words=%s&cid=&sid=&type=2&orderby=a.seeds&asc=0&skip=%s'%(what,(i*35))).read().decode('utf-8', 'replace')
- parser.feed(dat)
- parser.close()
- if len(results) <= 0:
- break
- i += 1
-
-class Isohunt(object):
- url = 'http://isohunt.com'
-
- class SimpleSGMLParser(sgmllib.SGMLParser):
- def __init__(self, results, url, *args):
- sgmllib.SGMLParser.__init__(self)
- self.td_counter = None
- self.current_item = None
- self.results = results
- self.url = url
-
- def start_tr(self, attr):
- params = dict(attr)
- if 'onclick' in params:
- Durl='http://isohunt.com/download'
- self.current_item = {}
- self.td_counter = 0
- try:
- self.current_item['link'] = '%s/%s'%(Durl, params['onclick'].split('/')[2])
- except IndexError:
- self.current_item['link'] = None
-
- def handle_data(self, data):
- if self.td_counter == 3:
- if not self.current_item.has_key('name'):
- self.current_item['name'] = ''
- self.current_item['name']+= data.strip()
- if self.td_counter == 4:
- if not self.current_item.has_key('size'):
- self.current_item['size'] = ''
- self.current_item['size']+= data.strip()
- if self.td_counter == 5:
- if not self.current_item.has_key('seeds'):
- self.current_item['seeds'] = ''
- self.current_item['seeds']+= data.strip()
- if self.td_counter == 6:
- if not self.current_item.has_key('leech'):
- self.current_item['leech'] = ''
- self.current_item['leech']+= data.strip()
-
- def start_td(self,attr):
- if isinstance(self.td_counter,int):
- self.td_counter += 1
- if self.td_counter > 7:
- self.td_counter = None
- # add item to results
- if self.current_item:
- self.current_item['engine_url'] = self.url
- self.current_item['size']= anySizeToBytes(self.current_item['size'])
- if not self.current_item.has_key('seeds') or not self.current_item['seeds'].isdigit():
- self.current_item['seeds'] = 0
- if not self.current_item.has_key('leech') or not self.current_item['leech'].isdigit():
- self.current_item['leech'] = 0
- if self.current_item['link'] is not None:
- prettyPrinter(self.current_item)
- self.results.append('a')
-
- def __init__(self):
- self.results = []
- self.parser = self.SimpleSGMLParser(self.results, self.url)
-
- def search(self, what):
- i = 1
- while True:
- results = []
- parser = self.SimpleSGMLParser(results, self.url)
- dat = urllib.urlopen(self.url+'/torrents.php?ihq=%s&ihp=%s'%(what,i)).read().decode('utf-8', 'replace')
- parser.feed(dat)
- parser.close()
- if len(results) <= 0:
- break
- i += 1
-
-class EngineLauncher(threading.Thread):
- def __init__(self, engine, what):
- threading.Thread.__init__(self)
- self.engine = engine
- self.what = what
- def run(self):
- self.engine.search(self.what)
-
-if __name__ == '__main__':
- available_engines_list = BtJunkie, MegaNova, Mininova, PirateBay, Reactor, Isohunt
-
- if len(sys.argv) < 2:
- raise SystemExit('./nova.py [all|engine1[,engine2]*] \navailable engines: %s'%
- (','.join(e.__name__ for e in available_engines_list)))
-
- engines_list = [e.lower() for e in sys.argv[1].strip().split(',')]
-
- if 'all' in engines_list:
- engines_list = [e.__name__.lower() for e in available_engines_list]
-
- selected_engines = set(e for e in available_engines_list if e.__name__.lower() in engines_list)
-
- if not selected_engines:
- selected_engines = [BtJunkie]
- what = '+'.join(sys.argv[1:])
- else:
- what = '+'.join(sys.argv[2:])
-
- threads = []
- for engine in selected_engines:
- try:
- if THREADED:
- l = EngineLauncher( engine(), what )
- threads.append(l)
- l.start()
- else:
- engine().search(what)
- except:
- if STANDALONE:
- traceback.print_exc()
- if THREADED:
- for t in threads:
- t.join()
-
- best_ratios.sort(lambda a,b : cmp(a['seeds']-a['leech'], b['seeds']-b['leech']))
-
- max_results = 10
-
- print "########## TOP %d RATIOS ##########"%max_results
-
- for br in best_ratios:
- if br['seeds'] > 1: # avoid those with 0 leech to be max rated
- prettyPrinter(br)
- max_results -= 1
- if not max_results:
- break
diff --git a/src/search_engine/nova2.py b/src/search_engine/nova2.py
new file mode 100755
index 000000000..d0bdcb4e8
--- /dev/null
+++ b/src/search_engine/nova2.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+#VERSION: 1.00
+
+# Author:
+# Fabien Devaux
+# Contributors:
+# Christophe Dumez (qbittorrent integration)
+# Thanks to gab #gcu @ irc.freenode.net (multipage support on PirateBay)
+# Thanks to Elias (torrentreactor and isohunt search engines)
+#
+# Licence: BSD
+
+import sys
+import threading
+import os
+import glob
+
+THREADED = True
+
+################################################################################
+# Every engine should have a "search" method taking
+# a space-free string as parameter (ex. "family+guy")
+# it should call prettyPrinter() with a dict as parameter.
+# The keys in the dict must be: link,name,size,seeds,leech,engine_url
+# As a convention, try to list results by decrasing number of seeds or similar
+################################################################################
+
+supported_engines = []
+
+engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines','*.py'))
+for engine in engines:
+ e = engine.split(os.sep)[-1][:-3]
+ if len(e.strip()) == 0: continue
+ if e.startswith('_'): continue
+ try:
+ exec "from engines.%s import %s"%(e,e)
+ supported_engines.append(e)
+ except:
+ pass
+
+class EngineLauncher(threading.Thread):
+ def __init__(self, engine, what):
+ threading.Thread.__init__(self)
+ self.engine = engine
+ self.what = what
+ def run(self):
+ self.engine.search(self.what)
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ raise SystemExit('./nova.py [all|engine1[,engine2]*] \navailable engines: %s'%
+ (','.join(supported_engines)))
+
+ if len(sys.argv) == 2:
+ if sys.argv[1] == "--supported_engines":
+ print ','.join(supported_engines)
+ sys.exit(0)
+ elif sys.argv[1] == "--supported_engines_infos":
+ res = []
+ for e in supported_engines:
+ exec "res.append(%s().name+'|'+%s().url)"%(e,e)
+ print ','.join(res)
+ sys.exit(0)
+ else:
+ raise SystemExit('./nova.py [all|engine1[,engine2]*] \navailable engines: %s'%
+ (','.join(supported_engines)))
+
+ engines_list = [e.lower() for e in sys.argv[1].strip().split(',')]
+
+ if 'all' in engines_list:
+ engines_list = supported_engines
+
+ what = '+'.join(sys.argv[2:])
+
+ threads = []
+ for engine in engines_list:
+ try:
+ if THREADED:
+ exec "l = EngineLauncher(%s(), what)" % engine
+ threads.append(l)
+ l.start()
+ else:
+ engine().search(what)
+ except:
+ pass
+ if THREADED:
+ for t in threads:
+ t.join()
diff --git a/src/search_engine/novaprinter.py b/src/search_engine/novaprinter.py
new file mode 100644
index 000000000..47e6e7561
--- /dev/null
+++ b/src/search_engine/novaprinter.py
@@ -0,0 +1,27 @@
+def prettyPrinter(dictionnary):
+ dictionnary['size'] = anySizeToBytes(dictionnary['size'])
+ print "%(link)s|%(name)s|%(size)s|%(seeds)s|%(leech)s|%(engine_url)s" % dictionnary
+
+def anySizeToBytes(size_string):
+ """
+ Convert a string like '1 KB' to '1024' (bytes)
+ """
+ # separate integer from unit
+ try:
+ size, unit = size_string.split()
+ except (ValueError, TypeError):
+ try:
+ size = size_string.strip()
+ unit = ''.join([c for c in size if c.isalpha()])
+ size = size[:-len(unit)]
+ except(ValueError, TypeError):
+ return -1
+
+ size = float(size)
+ short_unit = unit.upper()[0]
+
+ # convert
+ units_dict = { 'T': 40, 'G': 30, 'M': 20, 'K': 10 }
+ if units_dict.has_key( short_unit ):
+ size = size * 2**units_dict[short_unit]
+ return int(size)
\ No newline at end of file
diff --git a/src/src.pro b/src/src.pro
index 28f3d4365..2fbd07f0d 100644
--- a/src/src.pro
+++ b/src/src.pro
@@ -150,12 +150,13 @@ HEADERS += GUI.h misc.h options_imp.h about_imp.h \
bittorrent.h searchEngine.h \
rss.h rss_imp.h FinishedTorrents.h \
allocationDlg.h FinishedListDelegate.h \
- qtorrenthandle.h downloadingTorrents.h
+ qtorrenthandle.h downloadingTorrents.h \
+ engineSelectDlg.h
FORMS += MainWindow.ui options.ui about.ui \
properties.ui createtorrent.ui preview.ui \
login.ui downloadFromURL.ui addTorrentDialog.ui \
search.ui rss.ui seeding.ui bandwidth_limit.ui \
- download.ui
+ download.ui engineSelect.ui
SOURCES += GUI.cpp \
main.cpp \
options_imp.cpp \
@@ -166,5 +167,6 @@ SOURCES += GUI.cpp \
rss_imp.cpp \
FinishedTorrents.cpp \
qtorrenthandle.cpp \
- downloadingTorrents.cpp
+ downloadingTorrents.cpp \
+ engineSelectDlg.cpp
diff --git a/src/update_qrc_files.py b/src/update_qrc_files.py
index 03e6dcc2c..e93a9dce1 100755
--- a/src/update_qrc_files.py
+++ b/src/update_qrc_files.py
@@ -20,6 +20,27 @@ lang_file = open('lang.qrc', 'w')
lang_file.write(output)
lang_file.close()
+# update search_engine directory
+search_list = []
+for root, dirs, files in os.walk('search_engine'):
+ for file in files:
+ if file.startswith("__"):
+ continue
+ if splitext(file)[-1] in ('.py', '.png'):
+ search_list.append(join(root, file))
+
+output = '''
+
+'''
+for file in search_list:
+ output += ' %s'%(file)
+ output += os.linesep
+output += '''
+'''
+search_file = open('search.qrc', 'w')
+search_file.write(output)
+search_file.close()
+
# update icons files directory
icons_list = []
for root, dirs, files in os.walk('Icons'):
|