Browse Source

- Fix Mininova search engine plugin

adaptive-webui-19844
Christophe Dumez 15 years ago
parent
commit
67d60766c1
  1. 159
      src/search_engine/engines/mininova.py
  2. 2
      src/search_engine/engines/versions.txt

159
src/search_engine/engines/mininova.py

@ -1,5 +1,5 @@
#VERSION: 1.32 #VERSION: 1.40
#AUTHORS: Fabien Devaux (fab@gnux.info) #AUTHORS: Christophe Dumez (chris@qbittorrent.org)
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
@ -27,88 +27,85 @@
from novaprinter import prettyPrinter from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file from helpers import retrieve_url, download_file
from xml.dom import minidom import sgmllib
import re import re
class mininova(object): class mininova(object):
# Mandatory properties # Mandatory properties
url = 'http://www.mininova.org' url = 'http://www.mininova.org'
name = 'Mininova' name = 'Mininova'
supported_categories = {'all': '0', 'movies': '4', 'tv': '8', 'music': '5', 'games': '3', 'anime': '1', 'software': '7', 'pictures': '6', 'books': '2'} supported_categories = {'all': '0', 'movies': '4', 'tv': '8', 'music': '5', 'games': '3', 'anime': '1', 'software': '7', 'pictures': '6', 'books': '2'}
def download_torrent(self, info):
print download_file(info)
def search(self, what, cat='all'): def __init__(self):
self.results = []
self.parser = self.SimpleSGMLParser(self.results, self.url)
def get_link(lnk): def download_torrent(self, info):
lnks = lnk.getElementsByTagName('a') print download_file(info)
i = 0
try:
while not lnks.item(i).attributes.get('href').value.startswith('/get'):
i += 1
except:
return None
return (self.url+lnks.item(i).attributes.get('href').value).strip()
def get_name(lnk):
lnks = lnk.getElementsByTagName('a')
i = 0
try:
while not lnks.item(i).attributes.get('href').value.startswith('/tor'):
i += 1
except:
return None
name = ""
for node in lnks[i].childNodes:
if node.hasChildNodes():
name += node.firstChild.toxml()
else:
name += node.toxml()
return re.sub('<[a-zA-Z\/][^>]*>', '', name)
def get_text(txt): class SimpleSGMLParser(sgmllib.SGMLParser):
if txt.nodeType == txt.TEXT_NODE: def __init__(self, results, url, *args):
return txt.toxml() sgmllib.SGMLParser.__init__(self)
else: self.url = url
return ''.join([ get_text(n) for n in txt.childNodes]) self.td_counter = None
self.current_item = None
if cat == 'all': self.results = results
self.table_items = 'added cat name size seeds leech'.split()
else: def start_a(self, attr):
self.table_items = 'added name size seeds leech'.split() params = dict(attr)
page = 1 #print params
while True and page<11: if params.has_key('href') and params['href'].startswith("/get/"):
res = 0 self.current_item = {}
dat = retrieve_url(self.url+'/search/%s/%s/seeds/%d'%(what, self.supported_categories[cat], page)) self.td_counter = 0
dat = re.sub("<a href=\"http://www.boardreader.com/index.php.*\"", "<a href=\"plop\"", dat) self.current_item['link']=self.url+params['href'].strip()
dat = re.sub("<=", "&lt;=", dat)
dat = re.sub("&\s", "&amp; ", dat) def handle_data(self, data):
dat = re.sub("&(?!amp)", "&amp;", dat) if self.td_counter == 0:
x = minidom.parseString(dat) if not self.current_item.has_key('name'):
table = x.getElementsByTagName('table').item(0) self.current_item['name'] = ''
if not table: return self.current_item['name']+= data
for tr in table.getElementsByTagName('tr'): elif self.td_counter == 1:
tds = tr.getElementsByTagName('td') if not self.current_item.has_key('size'):
if tds: self.current_item['size'] = ''
i = 0 self.current_item['size']+= data.strip()
vals = {} elif self.td_counter == 2:
for td in tds: if not self.current_item.has_key('seeds'):
if self.table_items[i] == 'name': self.current_item['seeds'] = ''
vals['link'] = get_link(td) self.current_item['seeds']+= data.strip()
vals['name'] = get_name(td) elif self.td_counter == 3:
else: if not self.current_item.has_key('leech'):
vals[self.table_items[i]] = get_text(td).strip() self.current_item['leech'] = ''
i += 1 self.current_item['leech']+= data.strip()
vals['engine_url'] = self.url
if not vals['seeds'].isdigit(): def start_td(self,attr):
vals['seeds'] = 0 if isinstance(self.td_counter,int):
if not vals['leech'].isdigit(): self.td_counter += 1
vals['leech'] = 0 if self.td_counter > 4:
if vals['link'] is None: self.td_counter = None
continue # Display item
prettyPrinter(vals) if self.current_item:
res = res + 1 self.current_item['engine_url'] = self.url
if res == 0: if not self.current_item['seeds'].isdigit():
break self.current_item['seeds'] = 0
page = page +1 if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
prettyPrinter(self.current_item)
self.results.append('a')
def search(self, what, cat='all'):
ret = []
i = 1
while True and i<11:
results = []
parser = self.SimpleSGMLParser(results, self.url)
dat = retrieve_url(self.url+'/search/%s/%s/seeds/%d'%(what, self.supported_categories[cat], i))
results_re = re.compile('(?s)<h1>Search results for.*')
for match in results_re.finditer(dat):
res_tab = match.group(0)
parser.feed(res_tab)
parser.close()
break
if len(results) <= 0:
break
i += 1

2
src/search_engine/engines/versions.txt

@ -1,5 +1,5 @@
isohunt: 1.30 isohunt: 1.30
torrentreactor: 1.20 torrentreactor: 1.20
btjunkie: 2.21 btjunkie: 2.21
mininova: 1.32 mininova: 1.40
piratebay: 1.30 piratebay: 1.30

Loading…
Cancel
Save