Browse Source

[Search engine] Change URL getting mechanism in BTDigg

adaptive-webui-19844
ngosang 9 years ago
parent
commit
e557634feb
  1. 48
      src/searchengine/nova/engines/btdigg.py
  2. 2
      src/searchengine/nova/engines/versions.txt
  3. 56
      src/searchengine/nova3/engines/btdigg.py
  4. 2
      src/searchengine/nova3/engines/versions.txt

48
src/searchengine/nova/engines/btdigg.py

@ -1,5 +1,6 @@
#VERSION: 1.30 #VERSION: 1.31
#AUTHORS: BTDigg team (research@btdigg.org) #AUTHORS: BTDigg team (research@btdigg.org)
# Contributors: Diego de las Heras (ngosang@hotmail.es)
# GNU GENERAL PUBLIC LICENSE # GNU GENERAL PUBLIC LICENSE
# Version 3, 29 June 2007 # Version 3, 29 June 2007
@ -16,12 +17,11 @@
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. # GNU General Public License for more details.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
import urllib import urllib
import urllib2
import sys import sys
from novaprinter import prettyPrinter
class btdigg(object): class btdigg(object):
url = 'https://btdigg.org' url = 'https://btdigg.org'
name = 'BTDigg' name = 'BTDigg'
@ -37,31 +37,27 @@ class btdigg(object):
i = 0 i = 0
results = 0 results = 0
while i < 3: while i < 3:
u = urllib2.urlopen('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % urllib.urlencode(dict(q = req, p = i))) data = retrieve_url('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % urllib.urlencode(dict(q = req, p = i)))
for line in u: for line in data.splitlines():
try: if line.startswith('#'):
line = line.decode('utf8') continue
if line.startswith('#'):
continue
info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6] info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6]
name = name.replace('|', '') name = name.replace('|', '')
# BTDigg returns unrelated results, we need to filter # BTDigg returns unrelated results, we need to filter
if not all(word in name.lower() for word in what_list): if not all(word in name.lower() for word in what_list):
continue continue
res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.quote(name.encode('utf8'))), res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.quote(name.encode('utf8'))),
name = name, name = name,
size = size, size = size,
seeds = int(dl), seeds = int(dl),
leech = int(dl), leech = int(dl),
engine_url = self.url, engine_url = self.url,
desc_link = '%s/search?%s' % (self.url, urllib.urlencode(dict(info_hash = info_hash, q = req)))) desc_link = '%s/search?%s' % (self.url, urllib.urlencode(dict(info_hash = info_hash, q = req))))
prettyPrinter(res) prettyPrinter(res)
results += 1 results += 1
except:
pass
if results == 0: if results == 0:
break break

2
src/searchengine/nova/engines/versions.txt

@ -1,4 +1,4 @@
btdigg: 1.30 btdigg: 1.31
demonoid: 1.2 demonoid: 1.2
extratorrent: 2.04 extratorrent: 2.04
kickasstorrents: 1.28 kickasstorrents: 1.28

56
src/searchengine/nova3/engines/btdigg.py

@ -1,5 +1,6 @@
#VERSION: 1.30 #VERSION: 1.31
#AUTHORS: BTDigg team (research@btdigg.org) #AUTHORS: BTDigg team (research@btdigg.org)
# Contributors: Diego de las Heras (ngosang@hotmail.es)
# GNU GENERAL PUBLIC LICENSE # GNU GENERAL PUBLIC LICENSE
# Version 3, 29 June 2007 # Version 3, 29 June 2007
@ -16,52 +17,47 @@
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. # GNU General Public License for more details.
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import sys
from novaprinter import prettyPrinter from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
import urllib
import sys
class btdigg(object): class btdigg(object):
url = 'https://btdigg.org' url = 'https://btdigg.org'
name = 'BTDigg' name = 'BTDigg'
supported_categories = {'all': ''} supported_categories = {'all': ''}
def __init__(self): def __init__(self):
pass pass
def search(self, what, cat='all'): def search(self, what, cat='all'):
req = urllib.parse.unquote(what) req = urllib.parse.unquote(what)
what_list = req.split() what_list = req.split()
i = 0 i = 0
results = 0 results = 0
while i < 3: while i < 3:
u = urllib.request.urlopen('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % urllib.parse.urlencode(dict(q = req, p = i))) data = retrieve_url('https://api.btdigg.org/api/public-8e9a50f8335b964f/s01?%s' % urllib.parse.urlencode(dict(q = req, p = i)))
for line in u: for line in data.splitlines():
try: if line.startswith('#'):
line = line.decode('utf8') continue
if line.startswith('#'):
continue
info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6] info_hash, name, files, size, dl, seen = line.strip().split('\t')[:6]
name = name.replace('|', '') name = name.replace('|', '')
# BTDigg returns unrelated results, we need to filter # BTDigg returns unrelated results, we need to filter
if not all(word in name.lower() for word in what_list): if not all(word in name.lower() for word in what_list):
continue continue
res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.parse.quote(name)), res = dict(link = 'magnet:?xt=urn:btih:%s&dn=%s' % (info_hash, urllib.parse.quote(name)),
name = name, name = name,
size = size, size = size,
seeds = int(dl), seeds = int(dl),
leech = int(dl), leech = int(dl),
engine_url = self.url, engine_url = self.url,
desc_link = '%s/search?%s' % (self.url, urllib.parse.urlencode(dict(info_hash = info_hash, q = req)))) desc_link = '%s/search?%s' % (self.url, urllib.parse.urlencode(dict(info_hash = info_hash, q = req))))
prettyPrinter(res) prettyPrinter(res)
results += 1 results += 1
except:
pass
if results == 0: if results == 0:
break break

2
src/searchengine/nova3/engines/versions.txt

@ -1,4 +1,4 @@
btdigg: 1.30 btdigg: 1.31
demonoid: 1.2 demonoid: 1.2
extratorrent: 2.04 extratorrent: 2.04
kickasstorrents: 1.28 kickasstorrents: 1.28

Loading…
Cancel
Save