Browse Source

Merge pull request #1997 from BrunoReX/torrentreactor_fix

Fix TorrentReactor search plugin
adaptive-webui-19844
sledgehammer999 10 years ago
parent
commit
91e53971b2
  1. 40
      src/searchengine/nova/engines/torrentreactor.py
  2. 2
      src/searchengine/nova/engines/versions.txt
  3. 39
      src/searchengine/nova3/engines/torrentreactor.py
  4. 2
      src/searchengine/nova3/engines/versions.txt

40
src/searchengine/nova/engines/torrentreactor.py

@ -1,6 +1,7 @@
#VERSION: 1.32 #VERSION: 1.33
#AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net) #AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net)
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org) #CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
# Bruno Barbieri (brunorex@gmail.com)
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
@ -27,8 +28,11 @@
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter from novaprinter import prettyPrinter
import sgmllib
from helpers import retrieve_url, download_file from helpers import retrieve_url, download_file
from urllib2 import HTTPError
from HTMLParser import HTMLParser
import urllib
import re
class torrentreactor(object): class torrentreactor(object):
url = 'http://www.torrentreactor.net' url = 'http://www.torrentreactor.net'
@ -38,29 +42,31 @@ class torrentreactor(object):
def download_torrent(self, info): def download_torrent(self, info):
print download_file(info) print download_file(info)
class SimpleSGMLParser(sgmllib.SGMLParser): class SimpleHTMLParser(HTMLParser):
def __init__(self, results, url, *args): def __init__(self, results, url, *args):
sgmllib.SGMLParser.__init__(self) HTMLParser.__init__(self)
self.td_counter = None self.td_counter = None
self.current_item = None self.current_item = None
self.results = results self.results = results
self.id = None self.id = None
self.url = url self.url = url
self.dispatcher = { 'a' : self.start_a, 'td' : self.start_td }
def handle_starttag(self, tag, attrs):
if tag in self.dispatcher:
self.dispatcher[tag](attrs)
def start_a(self, attr): def start_a(self, attr):
params = dict(attr) params = dict(attr)
if 'torrentreactor.net/download.php' in params['href']: if re.match("/torrents/\d+.*", params['href']):
self.current_item = {} self.current_item = {}
self.current_item['desc_link'] = self.url+params['href'].strip()
elif 'torrentreactor.net/download.php' in params['href']:
self.td_counter = 0 self.td_counter = 0
self.current_item['link'] = params['href'].strip() self.current_item['link'] = params['href'].strip()
elif params['href'].startswith('/torrents/'): self.current_item['name'] = urllib.unquote_plus(params['href'].split('&')[1].split('name=')[1])
self.current_item['desc_link'] = 'http://www.torrentreactor.net'+params['href'].strip()
def handle_data(self, data): def handle_data(self, data):
if self.td_counter == 0:
if not self.current_item.has_key('name'):
self.current_item['name'] = ''
self.current_item['name']+= data.strip()
if self.td_counter == 1: if self.td_counter == 1:
if not self.current_item.has_key('size'): if not self.current_item.has_key('size'):
self.current_item['size'] = '' self.current_item['size'] = ''
@ -92,14 +98,20 @@ class torrentreactor(object):
def __init__(self): def __init__(self):
self.results = [] self.results = []
self.parser = self.SimpleSGMLParser(self.results, self.url) self.parser = self.SimpleHTMLParser(self.results, self.url)
def search(self, what, cat='all'): def search(self, what, cat='all'):
i = 0 i = 0
dat = ''
while True and i<11: while True and i<11:
results = [] results = []
parser = self.SimpleSGMLParser(results, self.url) parser = self.SimpleHTMLParser(results, self.url)
dat = retrieve_url(self.url+'/ts.php?search=&words=%s&cid=%s&sid=&type=1&orderby=a.seeds&asc=0&skip=%s'%(what, self.supported_categories[cat], (i*35)))
try:
dat = retrieve_url(self.url+'/torrent-search/%s/%s?sort=seeders.desc&type=all&period=none&categories=%s'%(what, (i*35), self.supported_categories[cat]))
except HTTPError:
break
parser.feed(dat) parser.feed(dat)
parser.close() parser.close()
if len(results) <= 0: if len(results) <= 0:

2
src/searchengine/nova/engines/versions.txt

@ -1,4 +1,4 @@
torrentreactor: 1.32 torrentreactor: 1.33
mininova: 1.50 mininova: 1.50
piratebay: 1.53 piratebay: 1.53
vertor: 1.3 vertor: 1.3

39
src/searchengine/nova3/engines/torrentreactor.py

@ -1,6 +1,7 @@
#VERSION: 1.32 #VERSION: 1.33
#AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net) #AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net)
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org) #CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
# Bruno Barbieri (brunorex@gmail.com)
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
@ -27,8 +28,10 @@
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter from novaprinter import prettyPrinter
import sgmllib3
from helpers import retrieve_url, download_file from helpers import retrieve_url, download_file
from urllib import error, parse
from html.parser import HTMLParser
import re
class torrentreactor(object): class torrentreactor(object):
url = 'http://www.torrentreactor.net' url = 'http://www.torrentreactor.net'
@ -38,29 +41,31 @@ class torrentreactor(object):
def download_torrent(self, info): def download_torrent(self, info):
print(download_file(info)) print(download_file(info))
class SimpleSGMLParser(sgmllib3.SGMLParser): class SimpleHTMLParser(HTMLParser):
def __init__(self, results, url, *args): def __init__(self, results, url, *args):
sgmllib3.SGMLParser.__init__(self) HTMLParser.__init__(self)
self.td_counter = None self.td_counter = None
self.current_item = None self.current_item = None
self.results = results self.results = results
self.id = None self.id = None
self.url = url self.url = url
self.dispatcher = { 'a' : self.start_a, 'td' : self.start_td }
def handle_starttag(self, tag, attrs):
if tag in self.dispatcher:
self.dispatcher[tag](attrs)
def start_a(self, attr): def start_a(self, attr):
params = dict(attr) params = dict(attr)
if 'torrentreactor.net/download.php' in params['href']: if re.match("/torrents/\d+.*", params['href']):
self.current_item = {} self.current_item = {}
self.current_item['desc_link'] = self.url+params['href'].strip()
elif 'torrentreactor.net/download.php' in params['href']:
self.td_counter = 0 self.td_counter = 0
self.current_item['link'] = params['href'].strip() self.current_item['link'] = params['href'].strip()
elif params['href'].startswith('/torrents/'): self.current_item['name'] = parse.unquote_plus(params['href'].split('&')[1].split('name=')[1])
self.current_item['desc_link'] = 'http://www.torrentreactor.net'+params['href'].strip()
def handle_data(self, data): def handle_data(self, data):
if self.td_counter == 0:
if 'name' not in self.current_item:
self.current_item['name'] = ''
self.current_item['name']+= data.strip()
if self.td_counter == 1: if self.td_counter == 1:
if 'size' not in self.current_item: if 'size' not in self.current_item:
self.current_item['size'] = '' self.current_item['size'] = ''
@ -92,14 +97,20 @@ class torrentreactor(object):
def __init__(self): def __init__(self):
self.results = [] self.results = []
self.parser = self.SimpleSGMLParser(self.results, self.url) self.parser = self.SimpleHTMLParser(self.results, self.url)
def search(self, what, cat='all'): def search(self, what, cat='all'):
i = 0 i = 0
dat = ''
while True and i<11: while True and i<11:
results = [] results = []
parser = self.SimpleSGMLParser(results, self.url) parser = self.SimpleHTMLParser(results, self.url)
dat = retrieve_url(self.url+'/ts.php?search=&words=%s&cid=%s&sid=&type=1&orderby=a.seeds&asc=0&skip=%s'%(what, self.supported_categories[cat], (i*35)))
try:
dat = retrieve_url(self.url+'/torrent-search/%s/%s?sort=seeders.desc&type=all&period=none&categories=%s'%(what, (i*35), self.supported_categories[cat]))
except error.HTTPError:
break
parser.feed(dat) parser.feed(dat)
parser.close() parser.close()
if len(results) <= 0: if len(results) <= 0:

2
src/searchengine/nova3/engines/versions.txt

@ -1,4 +1,4 @@
torrentreactor: 1.32 torrentreactor: 1.33
mininova: 1.50 mininova: 1.50
piratebay: 1.53 piratebay: 1.53
vertor: 1.3 vertor: 1.3

Loading…
Cancel
Save