Browse Source

Merge pull request #3311 from ngosang/btdiggimp

[search engine] Update BTDigg
adaptive-webui-19844
sledgehammer999 10 years ago
parent
commit
4854a1b2ee
  1. 31
      src/searchengine/nova/engines/btdigg.py
  2. 2
      src/searchengine/nova/engines/versions.txt
  3. 29
      src/searchengine/nova3/engines/btdigg.py
  4. 2
      src/searchengine/nova3/engines/versions.txt

31
src/searchengine/nova/engines/btdigg.py

@ -1,4 +1,4 @@
#VERSION: 1.25 #VERSION: 1.30
#AUTHORS: BTDigg team (research@btdigg.org) #AUTHORS: BTDigg team (research@btdigg.org)
# GNU GENERAL PUBLIC LICENSE # GNU GENERAL PUBLIC LICENSE
@ -33,26 +33,39 @@ class btdigg(object):
def search(self, what, cat='all'): def search(self, what, cat='all'):
req = urllib.unquote(what) req = urllib.unquote(what)
u = urllib2.urlopen('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % (urllib.urlencode(dict(q = req)),)) what_list = req.decode('utf8').split()
i = 0
try: results = 0
while i < 3:
u = urllib2.urlopen('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % urllib.urlencode(dict(q = req, p = i)))
for line in u: for line in u:
try:
line = line.decode('utf8')
if line.startswith('#'): if line.startswith('#'):
continue continue
info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6] info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6]
name = name.translate(None, '|') name = name.replace('|', '')
res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.quote(name)), # BTDigg returns unrelated results, we need to filter
if not all(word in name.lower() for word in what_list):
continue
res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.quote(name.encode('utf8'))),
name = name, name = name,
size = size, size = size,
seeds = int(dl), seeds = int(dl),
leech = int(dl), leech = int(dl),
engine_url = self.url, engine_url = self.url,
desc_link = '%s/search?%s' % (self.url, urllib.urlencode(dict(info_hash = info_hash, q = req)),)) desc_link = '%s/search?%s' % (self.url, urllib.urlencode(dict(info_hash = info_hash, q = req))))
prettyPrinter(res) prettyPrinter(res)
finally: results += 1
u.close() except:
pass
if results == 0:
break
i += 1
if __name__ == "__main__": if __name__ == "__main__":
s = btdigg() s = btdigg()

2
src/searchengine/nova/engines/versions.txt

@ -1,4 +1,4 @@
btdigg: 1.25 btdigg: 1.30
demonoid: 1.1 demonoid: 1.1
extratorrent: 2.0 extratorrent: 2.0
kickasstorrents: 1.28 kickasstorrents: 1.28

29
src/searchengine/nova3/engines/btdigg.py

@ -1,4 +1,4 @@
#VERSION: 1.25 #VERSION: 1.30
#AUTHORS: BTDigg team (research@btdigg.org) #AUTHORS: BTDigg team (research@btdigg.org)
# GNU GENERAL PUBLIC LICENSE # GNU GENERAL PUBLIC LICENSE
@ -33,30 +33,39 @@ class btdigg(object):
def search(self, what, cat='all'): def search(self, what, cat='all'):
req = urllib.parse.unquote(what) req = urllib.parse.unquote(what)
u = urllib.request.urlopen('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % (urllib.parse.urlencode(dict(q = req)),)) what_list = req.split()
i = 0
try: results = 0
while i < 3:
u = urllib.request.urlopen('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % urllib.parse.urlencode(dict(q = req, p = i)))
for line in u: for line in u:
line = line.decode('utf-8') try:
line = line.decode('utf8')
if line.startswith('#'): if line.startswith('#'):
continue continue
info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6] info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6]
name = name.replace('|', '') name = name.replace('|', '')
# BTDigg returns unrelated results, we need to filter
if not all(word in name.lower() for word in what_list):
continue
res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.parse.quote(name)), res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.parse.quote(name)),
name = name, name = name,
size = size, size = size,
seeds = int(dl), seeds = int(dl),
leech = int(dl), leech = int(dl),
engine_url = self.url, engine_url = self.url,
desc_link = '%s/search?%s' % (self.url, urllib.parse.urlencode(dict(info_hash = info_hash, q = req)),)) desc_link = '%s/search?%s' % (self.url, urllib.parse.urlencode(dict(info_hash = info_hash, q = req))))
prettyPrinter(res) prettyPrinter(res)
finally: results += 1
u.close() except:
pass
if results == 0:
break
i += 1
if __name__ == "__main__": if __name__ == "__main__":
s = btdigg() s = btdigg()

2
src/searchengine/nova3/engines/versions.txt

@ -1,4 +1,4 @@
btdigg: 1.25 btdigg: 1.30
demonoid: 1.1 demonoid: 1.1
extratorrent: 2.0 extratorrent: 2.0
kickasstorrents: 1.28 kickasstorrents: 1.28

Loading…
Cancel
Save