Browse Source

Merge pull request #5058 from ngosang/remove_filters

[search engine] Remove filters from Torrentreactor, BTDigg and Demonoid
adaptive-webui-19844
sledgehammer999 8 years ago committed by GitHub
parent
commit
ce22d031f6
  1. 6
      src/searchengine/nova/engines/btdigg.py
  2. 4
      src/searchengine/nova/engines/demonoid.py
  3. 11
      src/searchengine/nova/engines/torrentreactor.py
  4. 6
      src/searchengine/nova/engines/versions.txt
  5. 6
      src/searchengine/nova3/engines/btdigg.py
  6. 4
      src/searchengine/nova3/engines/demonoid.py
  7. 10
      src/searchengine/nova3/engines/torrentreactor.py
  8. 6
      src/searchengine/nova3/engines/versions.txt

6
src/searchengine/nova/engines/btdigg.py

@ -1,4 +1,4 @@
#VERSION: 1.31 #VERSION: 1.32
#AUTHORS: BTDigg team (research@btdigg.org) #AUTHORS: BTDigg team (research@btdigg.org)
# Contributors: Diego de las Heras (ngosang@hotmail.es) # Contributors: Diego de las Heras (ngosang@hotmail.es)
@ -33,7 +33,6 @@ class btdigg(object):
def search(self, what, cat='all'): def search(self, what, cat='all'):
req = urllib.unquote(what) req = urllib.unquote(what)
what_list = req.decode('utf8').split()
i = 0 i = 0
results = 0 results = 0
while i < 3: while i < 3:
@ -44,9 +43,6 @@ class btdigg(object):
info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6] info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6]
name = name.replace('|', '') name = name.replace('|', '')
# BTDigg returns unrelated results, we need to filter
if not all(word in name.lower() for word in what_list):
continue
res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.quote(name.encode('utf8'))), res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.quote(name.encode('utf8'))),
name = name, name = name,

4
src/searchengine/nova/engines/demonoid.py

@ -1,4 +1,4 @@
#VERSION: 1.2 #VERSION: 1.21
#AUTHORS: Douman (custparasite@gmx.se) #AUTHORS: Douman (custparasite@gmx.se)
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es) #CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
@ -121,7 +121,7 @@ class demonoid(object):
""" Performs search """ """ Performs search """
#prepare query #prepare query
cat = self.supported_categories[cat.lower()] cat = self.supported_categories[cat.lower()]
query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S")) query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&uid=0&sort=S"))
data = retrieve_url(query) data = retrieve_url(query)

11
src/searchengine/nova/engines/torrentreactor.py

@ -1,4 +1,4 @@
#VERSION: 1.41 #VERSION: 1.42
#AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net) #AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net)
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org) #CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
# Bruno Barbieri (brunorex@gmail.com) # Bruno Barbieri (brunorex@gmail.com)
@ -42,14 +42,13 @@ class torrentreactor(object):
print(download_file(info)) print(download_file(info))
class SimpleHTMLParser(HTMLParser): class SimpleHTMLParser(HTMLParser):
def __init__(self, results, url, what): def __init__(self, results, url):
HTMLParser.__init__(self) HTMLParser.__init__(self)
self.td_counter = None self.td_counter = None
self.current_item = None self.current_item = None
self.results = results self.results = results
self.id = None self.id = None
self.url = url self.url = url
self.what_list = urllib.unquote(what).decode('utf8').split()
self.torrents_matcher = re_compile("/torrents/\d+.*") self.torrents_matcher = re_compile("/torrents/\d+.*")
self.dispatcher = { 'a' : self.start_a, 'td' : self.start_td } self.dispatcher = { 'a' : self.start_a, 'td' : self.start_td }
@ -88,10 +87,6 @@ class torrentreactor(object):
self.td_counter = None self.td_counter = None
# add item to results # add item to results
if self.current_item: if self.current_item:
self.current_item['name'] = self.current_item['name'].decode('utf8')
# TorrentReactor returns unrelated results, we need to filter
if not all(word in self.current_item['name'].lower() for word in self.what_list):
return
self.current_item['engine_url'] = self.url self.current_item['engine_url'] = self.url
if not self.current_item['seeds'].isdigit(): if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0 self.current_item['seeds'] = 0
@ -105,7 +100,7 @@ class torrentreactor(object):
i = 0 i = 0
dat = '' dat = ''
results = [] results = []
parser = self.SimpleHTMLParser(results, self.url, what) parser = self.SimpleHTMLParser(results, self.url)
while i < 9: while i < 9:
dat = retrieve_url('%s/torrents-search/%s/%s?sort=seeders.desc&type=all&period=none&categories=%s'%(self.url, what, (i*35), self.supported_categories[cat])) dat = retrieve_url('%s/torrents-search/%s/%s?sort=seeders.desc&type=all&period=none&categories=%s'%(self.url, what, (i*35), self.supported_categories[cat]))
parser.feed(dat) parser.feed(dat)

6
src/searchengine/nova/engines/versions.txt

@ -1,10 +1,10 @@
btdigg: 1.31 btdigg: 1.32
demonoid: 1.2 demonoid: 1.21
extratorrent: 2.04 extratorrent: 2.04
kickasstorrents: 1.28 kickasstorrents: 1.28
legittorrents: 2.01 legittorrents: 2.01
mininova: 2.02 mininova: 2.02
piratebay: 2.15 piratebay: 2.15
torlock: 2.0 torlock: 2.0
torrentreactor: 1.41 torrentreactor: 1.42
torrentz: 2.20 torrentz: 2.20

6
src/searchengine/nova3/engines/btdigg.py

@ -1,4 +1,4 @@
#VERSION: 1.31 #VERSION: 1.32
#AUTHORS: BTDigg team (research@btdigg.org) #AUTHORS: BTDigg team (research@btdigg.org)
# Contributors: Diego de las Heras (ngosang@hotmail.es) # Contributors: Diego de las Heras (ngosang@hotmail.es)
@ -33,7 +33,6 @@ class btdigg(object):
def search(self, what, cat='all'): def search(self, what, cat='all'):
req = urllib.parse.unquote(what) req = urllib.parse.unquote(what)
what_list = req.split()
i = 0 i = 0
results = 0 results = 0
while i < 3: while i < 3:
@ -44,9 +43,6 @@ class btdigg(object):
info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6] info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6]
name = name.replace('|', '') name = name.replace('|', '')
# BTDigg returns unrelated results, we need to filter
if not all(word in name.lower() for word in what_list):
continue
res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.parse.quote(name)), res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.parse.quote(name)),
name = name, name = name,

4
src/searchengine/nova3/engines/demonoid.py

@ -1,4 +1,4 @@
#VERSION: 1.2 #VERSION: 1.21
#AUTHORS: Douman (custparasite@gmx.se) #AUTHORS: Douman (custparasite@gmx.se)
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es) #CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
@ -121,7 +121,7 @@ class demonoid(object):
""" Performs search """ """ Performs search """
#prepare query #prepare query
cat = self.supported_categories[cat.lower()] cat = self.supported_categories[cat.lower()]
query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S")) query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&uid=0&sort=S"))
data = retrieve_url(query) data = retrieve_url(query)

10
src/searchengine/nova3/engines/torrentreactor.py

@ -1,4 +1,4 @@
#VERSION: 1.41 #VERSION: 1.42
#AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net) #AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net)
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org) #CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
# Bruno Barbieri (brunorex@gmail.com) # Bruno Barbieri (brunorex@gmail.com)
@ -42,14 +42,13 @@ class torrentreactor(object):
print(download_file(info)) print(download_file(info))
class SimpleHTMLParser(HTMLParser): class SimpleHTMLParser(HTMLParser):
def __init__(self, results, url, what): def __init__(self, results, url):
HTMLParser.__init__(self) HTMLParser.__init__(self)
self.td_counter = None self.td_counter = None
self.current_item = None self.current_item = None
self.results = results self.results = results
self.id = None self.id = None
self.url = url self.url = url
self.what_list = parse.unquote(what).split()
self.torrents_matcher = re_compile("/torrents/\d+.*") self.torrents_matcher = re_compile("/torrents/\d+.*")
self.dispatcher = { 'a' : self.start_a, 'td' : self.start_td } self.dispatcher = { 'a' : self.start_a, 'td' : self.start_td }
@ -88,9 +87,6 @@ class torrentreactor(object):
self.td_counter = None self.td_counter = None
# add item to results # add item to results
if self.current_item: if self.current_item:
# TorrentReactor returns unrelated results, we need to filter
if not all(word in self.current_item['name'].lower() for word in self.what_list):
return
self.current_item['engine_url'] = self.url self.current_item['engine_url'] = self.url
if not self.current_item['seeds'].isdigit(): if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0 self.current_item['seeds'] = 0
@ -104,7 +100,7 @@ class torrentreactor(object):
i = 0 i = 0
dat = '' dat = ''
results = [] results = []
parser = self.SimpleHTMLParser(results, self.url, what) parser = self.SimpleHTMLParser(results, self.url)
while i < 9: while i < 9:
dat = retrieve_url('%s/torrents-search/%s/%s?sort=seeders.desc&type=all&period=none&categories=%s'%(self.url, what, (i*35), self.supported_categories[cat])) dat = retrieve_url('%s/torrents-search/%s/%s?sort=seeders.desc&type=all&period=none&categories=%s'%(self.url, what, (i*35), self.supported_categories[cat]))
parser.feed(dat) parser.feed(dat)

6
src/searchengine/nova3/engines/versions.txt

@ -1,10 +1,10 @@
btdigg: 1.31 btdigg: 1.32
demonoid: 1.2 demonoid: 1.21
extratorrent: 2.04 extratorrent: 2.04
kickasstorrents: 1.28 kickasstorrents: 1.28
legittorrents: 2.01 legittorrents: 2.01
mininova: 2.02 mininova: 2.02
piratebay: 2.15 piratebay: 2.15
torlock: 2.0 torlock: 2.0
torrentreactor: 1.41 torrentreactor: 1.42
torrentz: 2.20 torrentz: 2.20

Loading…
Cancel
Save