Remove all search plugins from repo.
@ -5,35 +5,11 @@
|
|||||||
<file>searchengine/nova/nova2.py</file>
|
<file>searchengine/nova/nova2.py</file>
|
||||||
<file>searchengine/nova/novaprinter.py</file>
|
<file>searchengine/nova/novaprinter.py</file>
|
||||||
<file>searchengine/nova/socks.py</file>
|
<file>searchengine/nova/socks.py</file>
|
||||||
<file>searchengine/nova/engines/btdb.png</file>
|
|
||||||
<file>searchengine/nova/engines/btdb.py</file>
|
|
||||||
<file>searchengine/nova/engines/demonoid.png</file>
|
|
||||||
<file>searchengine/nova/engines/demonoid.py</file>
|
|
||||||
<file>searchengine/nova/engines/legittorrents.png</file>
|
|
||||||
<file>searchengine/nova/engines/legittorrents.py</file>
|
|
||||||
<file>searchengine/nova/engines/piratebay.png</file>
|
|
||||||
<file>searchengine/nova/engines/piratebay.py</file>
|
|
||||||
<file>searchengine/nova/engines/torlock.png</file>
|
|
||||||
<file>searchengine/nova/engines/torlock.py</file>
|
|
||||||
<file>searchengine/nova/engines/torrentz.png</file>
|
|
||||||
<file>searchengine/nova/engines/torrentz.py</file>
|
|
||||||
<file>searchengine/nova3/helpers.py</file>
|
<file>searchengine/nova3/helpers.py</file>
|
||||||
<file>searchengine/nova3/nova2.py</file>
|
<file>searchengine/nova3/nova2.py</file>
|
||||||
<file>searchengine/nova3/novaprinter.py</file>
|
<file>searchengine/nova3/novaprinter.py</file>
|
||||||
<file>searchengine/nova3/sgmllib3.py</file>
|
<file>searchengine/nova3/sgmllib3.py</file>
|
||||||
<file>searchengine/nova3/socks.py</file>
|
<file>searchengine/nova3/socks.py</file>
|
||||||
<file>searchengine/nova3/engines/btdb.png</file>
|
|
||||||
<file>searchengine/nova3/engines/btdb.py</file>
|
|
||||||
<file>searchengine/nova3/engines/demonoid.png</file>
|
|
||||||
<file>searchengine/nova3/engines/demonoid.py</file>
|
|
||||||
<file>searchengine/nova3/engines/legittorrents.png</file>
|
|
||||||
<file>searchengine/nova3/engines/legittorrents.py</file>
|
|
||||||
<file>searchengine/nova3/engines/piratebay.png</file>
|
|
||||||
<file>searchengine/nova3/engines/piratebay.py</file>
|
|
||||||
<file>searchengine/nova3/engines/torlock.png</file>
|
|
||||||
<file>searchengine/nova3/engines/torlock.py</file>
|
|
||||||
<file>searchengine/nova3/engines/torrentz.png</file>
|
|
||||||
<file>searchengine/nova3/engines/torrentz.py</file>
|
|
||||||
<file>searchengine/nova/nova2dl.py</file>
|
<file>searchengine/nova/nova2dl.py</file>
|
||||||
<file>searchengine/nova3/nova2dl.py</file>
|
<file>searchengine/nova3/nova2dl.py</file>
|
||||||
</qresource>
|
</qresource>
|
||||||
|
Before Width: | Height: | Size: 562 B |
@ -1,147 +0,0 @@
|
|||||||
#VERSION: 1.01
|
|
||||||
#AUTHORS: Charles Worthing
|
|
||||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from HTMLParser import HTMLParser
|
|
||||||
#qBt
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import download_file, retrieve_url
|
|
||||||
|
|
||||||
class btdb(object):
|
|
||||||
""" Search engine class """
|
|
||||||
url = 'https://btdb.in'
|
|
||||||
name = 'BTDB'
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
""" Downloader """
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParser(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, results, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.results = results
|
|
||||||
self.url = url
|
|
||||||
self.current_item = {} # One torrent result
|
|
||||||
self.add_query = True
|
|
||||||
self.torrent_info_index = 0 # Count of the meta data encountered
|
|
||||||
self.torrent_info_array = []
|
|
||||||
self.meta_data_grabbing = 0
|
|
||||||
self.meta_data_array = []
|
|
||||||
self.torrent_no_files = 0
|
|
||||||
self.torrent_date_added = 0
|
|
||||||
self.torrent_popularity = 0
|
|
||||||
self.mangnet_link = ""
|
|
||||||
self.desc_link = ""
|
|
||||||
self.torrent_name = ""
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
if tag == "span":
|
|
||||||
span_dict = dict(attrs)
|
|
||||||
if "class" in span_dict:
|
|
||||||
the_class = span_dict["class"]
|
|
||||||
if the_class == "item-meta-info-value":
|
|
||||||
self.meta_data_grabbing += 1
|
|
||||||
else:
|
|
||||||
self.meta_data_grabbing = 0
|
|
||||||
if tag == "script":
|
|
||||||
return
|
|
||||||
if tag == "li":
|
|
||||||
for attr in attrs:
|
|
||||||
if attr[1] == "search-ret-item":
|
|
||||||
self.torrent_info_index = 1
|
|
||||||
if tag == "a":
|
|
||||||
if self.torrent_info_index > 0:
|
|
||||||
params = dict(attrs)
|
|
||||||
if "href" in params:
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/torrent"):
|
|
||||||
self.desc_link = "".join((self.url, link))
|
|
||||||
self.torrent_name = params["title"]
|
|
||||||
if link.startswith("magnet:"):
|
|
||||||
self.mangnet_link = link
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
if tag == "script":
|
|
||||||
return
|
|
||||||
if tag == "div":
|
|
||||||
if self.meta_data_grabbing > 0:
|
|
||||||
|
|
||||||
self.torrent_no_files = self.meta_data_array[2] # Not used
|
|
||||||
self.torrent_date_added = self.meta_data_array[4] # Not used
|
|
||||||
self.torrent_popularity = self.meta_data_array[6] # Not used
|
|
||||||
|
|
||||||
self.current_item["size"] = self.meta_data_array[0]
|
|
||||||
self.current_item["name"] = self.torrent_name
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
self.current_item["link"] = self.mangnet_link
|
|
||||||
self.current_item["desc_link"] = self.desc_link
|
|
||||||
self.current_item["seeds"] = -1
|
|
||||||
self.current_item["leech"] = -1
|
|
||||||
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.results.append('a')
|
|
||||||
self.current_item = {}
|
|
||||||
|
|
||||||
self.meta_data_grabbing = 0
|
|
||||||
self.meta_data_array = []
|
|
||||||
self.mangnet_link = ""
|
|
||||||
self.desc_link = ""
|
|
||||||
self.torrent_name = ""
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
if self.torrent_info_index > 0:
|
|
||||||
self.torrent_info_array.append(data)
|
|
||||||
self.torrent_info_index += 1
|
|
||||||
if self.meta_data_grabbing > 0:
|
|
||||||
self.meta_data_array.append(data)
|
|
||||||
self.meta_data_grabbing += 1
|
|
||||||
|
|
||||||
def handle_entityref(self, name):
|
|
||||||
c = unichr(name2codepoint[name])
|
|
||||||
|
|
||||||
def handle_charref(self, name):
|
|
||||||
if name.startswith('x'):
|
|
||||||
c = unichr(int(name[1:], 16))
|
|
||||||
else:
|
|
||||||
c = unichr(int(name))
|
|
||||||
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
results_list = []
|
|
||||||
parser = self.MyHtmlParser(results_list, self.url)
|
|
||||||
i = 1
|
|
||||||
while i < 31:
|
|
||||||
# "what" is already urlencoded
|
|
||||||
html = retrieve_url(self.url + '/q/%s/%d?sort=popular' % (what, i))
|
|
||||||
parser.feed(html)
|
|
||||||
if len(results_list) < 1:
|
|
||||||
break
|
|
||||||
del results_list[:]
|
|
||||||
i += 1
|
|
||||||
parser.close()
|
|
Before Width: | Height: | Size: 675 B |
@ -1,144 +0,0 @@
|
|||||||
#VERSION: 1.23
|
|
||||||
#AUTHORS: Douman (custparasite@gmx.se)
|
|
||||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from HTMLParser import HTMLParser
|
|
||||||
from re import compile as re_compile
|
|
||||||
from re import DOTALL
|
|
||||||
from itertools import islice
|
|
||||||
#qBt
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import download_file, retrieve_url
|
|
||||||
|
|
||||||
class demonoid(object):
|
|
||||||
""" Search engine class """
|
|
||||||
url = "https://www.demonoid.pw"
|
|
||||||
name = "Demonoid"
|
|
||||||
supported_categories = {'all': '0',
|
|
||||||
'music': '2',
|
|
||||||
'movies': '1',
|
|
||||||
'games': '4',
|
|
||||||
'software': '5',
|
|
||||||
'books': '11',
|
|
||||||
'anime': '9',
|
|
||||||
'tv': '3'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
""" Downloader """
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParseWithBlackJack(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.url = url
|
|
||||||
self.current_item = None
|
|
||||||
self.save_data = None
|
|
||||||
self.seeds_leech = False
|
|
||||||
self.size_repl = re_compile(",")
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
""" Parser's start tag handler """
|
|
||||||
if tag == "a":
|
|
||||||
params = dict(attrs)
|
|
||||||
if "href" in params:
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/files/details"):
|
|
||||||
self.current_item = dict()
|
|
||||||
self.current_item["desc_link"] = "".join((self.url, link))
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
self.save_data = "name"
|
|
||||||
elif link.startswith("/files/download"):
|
|
||||||
self.current_item["link"] = "".join((self.url, link))
|
|
||||||
|
|
||||||
elif self.current_item:
|
|
||||||
if tag == "td":
|
|
||||||
params = dict(attrs)
|
|
||||||
if "class" in params and "align" in params:
|
|
||||||
if params["class"].startswith("tone"):
|
|
||||||
if params["align"] == "right":
|
|
||||||
self.save_data = "size"
|
|
||||||
elif params["align"] == "center":
|
|
||||||
self.seeds_leech = True
|
|
||||||
|
|
||||||
elif self.seeds_leech and tag == "font":
|
|
||||||
for attr in attrs:
|
|
||||||
if "class" in attr:
|
|
||||||
if attr[1] == "green":
|
|
||||||
self.save_data = "seeds"
|
|
||||||
elif attr[1] == "red":
|
|
||||||
self.save_data = "leech"
|
|
||||||
|
|
||||||
self.seeds_leech = False
|
|
||||||
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
""" Parser's data handler """
|
|
||||||
if self.save_data:
|
|
||||||
if self.save_data == "name":
|
|
||||||
# names with special characters like '&' are splitted in several pieces
|
|
||||||
if 'name' not in self.current_item:
|
|
||||||
self.current_item['name'] = ''
|
|
||||||
self.current_item['name'] += data
|
|
||||||
else:
|
|
||||||
self.current_item[self.save_data] = data
|
|
||||||
self.save_data = None
|
|
||||||
if self.current_item.__len__() == 7:
|
|
||||||
self.current_item["size"] = self.size_repl.sub("", self.current_item["size"])
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = None
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
""" Parser's end tag handler """
|
|
||||||
if self.save_data == "name":
|
|
||||||
self.save_data = None
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
#prepare query
|
|
||||||
cat = self.supported_categories[cat.lower()]
|
|
||||||
query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&uid=0&sort=S"))
|
|
||||||
|
|
||||||
data = retrieve_url(query)
|
|
||||||
|
|
||||||
add_res_list = re_compile("/files.*page=[0-9]+")
|
|
||||||
torrent_list = re_compile("start torrent list -->(.*)<!-- end torrent", DOTALL)
|
|
||||||
data = torrent_list.search(data).group(0)
|
|
||||||
list_results = add_res_list.findall(data)
|
|
||||||
|
|
||||||
parser = self.MyHtmlParseWithBlackJack(self.url)
|
|
||||||
parser.feed(data)
|
|
||||||
|
|
||||||
del data
|
|
||||||
|
|
||||||
if list_results:
|
|
||||||
for search_query in islice((add_res_list.search(result).group(0) for result in list_results[1].split(" | ")), 0, 5):
|
|
||||||
response = retrieve_url(self.url + search_query)
|
|
||||||
parser.feed(torrent_list.search(response).group(0))
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
return
|
|
Before Width: | Height: | Size: 532 B |
@ -1,101 +0,0 @@
|
|||||||
#VERSION: 2.02
|
|
||||||
#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
|
|
||||||
# Douman (custparasite@gmx.se)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import retrieve_url, download_file
|
|
||||||
from HTMLParser import HTMLParser
|
|
||||||
from re import compile as re_compile
|
|
||||||
|
|
||||||
class legittorrents(object):
|
|
||||||
url = 'http://www.legittorrents.info'
|
|
||||||
name = 'Legit Torrents'
|
|
||||||
supported_categories = {'all': '0', 'movies': '1', 'tv': '13', 'music': '2', 'games': '3', 'anime': '5', 'books': '6'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParseWithBlackJack(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.url = url
|
|
||||||
self.current_item = None
|
|
||||||
self.save_item_key = None
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
""" Parser's start tag handler """
|
|
||||||
if self.current_item:
|
|
||||||
params = dict(attrs)
|
|
||||||
if tag == "a":
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("index") and "title" in params:
|
|
||||||
#description link
|
|
||||||
self.current_item["name"] = params["title"][14:]
|
|
||||||
self.current_item["desc_link"] = "/".join((self.url, link))
|
|
||||||
elif link.startswith("download"):
|
|
||||||
self.current_item["link"] = "/".join((self.url, link))
|
|
||||||
elif tag == "td":
|
|
||||||
if "width" in params and params["width"] == "30" and not "leech" in self.current_item:
|
|
||||||
self.save_item_key = "leech" if "seeds" in self.current_item else "seeds"
|
|
||||||
|
|
||||||
elif tag == "tr":
|
|
||||||
self.current_item = {}
|
|
||||||
self.current_item["size"] = ""
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
""" Parser's end tag handler """
|
|
||||||
if self.current_item and tag == "tr":
|
|
||||||
if len(self.current_item) > 4:
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = None
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
""" Parser's data handler """
|
|
||||||
if self.save_item_key:
|
|
||||||
self.current_item[self.save_item_key] = data.strip()
|
|
||||||
self.save_item_key = None
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=", self.supported_categories.get(cat, '0'), "&active=1"))
|
|
||||||
|
|
||||||
get_table = re_compile('(?s)<table\sclass="lista".*>(.*)</table>')
|
|
||||||
data = get_table.search(retrieve_url(query)).group(0)
|
|
||||||
#extract first ten pages of next results
|
|
||||||
next_pages = re_compile('(?m)<option value="(.*)">[0-9]+</option>')
|
|
||||||
next_pages = ["".join((self.url, page)) for page in next_pages.findall(data)[:10]]
|
|
||||||
|
|
||||||
parser = self.MyHtmlParseWithBlackJack(self.url)
|
|
||||||
parser.feed(data)
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
for page in next_pages:
|
|
||||||
parser.feed(get_table.search(retrieve_url(page)).group(0))
|
|
||||||
parser.close()
|
|
Before Width: | Height: | Size: 278 B |
@ -1,176 +0,0 @@
|
|||||||
#VERSION: 2.15
|
|
||||||
#AUTHORS: Fabien Devaux (fab@gnux.info)
|
|
||||||
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
|
|
||||||
# Arthur (custparasite@gmx.se)
|
|
||||||
# Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from HTMLParser import HTMLParser
|
|
||||||
#qBt
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import download_file, retrieve_url
|
|
||||||
|
|
||||||
class piratebay(object):
|
|
||||||
""" Search engine class """
|
|
||||||
url = 'https://thepiratebay.org'
|
|
||||||
name = 'The Pirate Bay'
|
|
||||||
supported_categories = {'all': '0', 'music': '100', 'movies': '200', 'games': '400', 'software': '300'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
""" Downloader """
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParseWithBlackJack(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, list_searches, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.list_searches = list_searches
|
|
||||||
self.url = url
|
|
||||||
self.current_item = None
|
|
||||||
self.save_item = None
|
|
||||||
self.result_table = False #table with results is found
|
|
||||||
self.result_tbody = False
|
|
||||||
self.add_query = True
|
|
||||||
self.result_query = False
|
|
||||||
|
|
||||||
def handle_start_tag_default(self, attrs):
|
|
||||||
""" Default handler for start tag dispatcher """
|
|
||||||
pass
|
|
||||||
|
|
||||||
def handle_start_tag_a(self, attrs):
|
|
||||||
""" Handler for start tag a """
|
|
||||||
params = dict(attrs)
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/torrent"):
|
|
||||||
self.current_item["desc_link"] = "".join((self.url, link))
|
|
||||||
self.save_item = "name"
|
|
||||||
elif link.startswith("magnet"):
|
|
||||||
self.current_item["link"] = link
|
|
||||||
# end of the 'name' item
|
|
||||||
self.current_item['name'] = self.current_item['name'].strip()
|
|
||||||
self.save_item = None
|
|
||||||
|
|
||||||
def handle_start_tag_font(self, attrs):
|
|
||||||
""" Handler for start tag font """
|
|
||||||
for attr in attrs:
|
|
||||||
if attr[1] == "detDesc":
|
|
||||||
self.save_item = "size"
|
|
||||||
break
|
|
||||||
|
|
||||||
def handle_start_tag_td(self, attrs):
|
|
||||||
""" Handler for start tag td """
|
|
||||||
for attr in attrs:
|
|
||||||
if attr[1] == "right":
|
|
||||||
if "seeds" in self.current_item.keys():
|
|
||||||
self.save_item = "leech"
|
|
||||||
else:
|
|
||||||
self.save_item = "seeds"
|
|
||||||
break
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
""" Parser's start tag handler """
|
|
||||||
if self.current_item:
|
|
||||||
dispatcher = getattr(self, "_".join(("handle_start_tag", tag)), self.handle_start_tag_default)
|
|
||||||
dispatcher(attrs)
|
|
||||||
|
|
||||||
elif self.result_tbody:
|
|
||||||
if tag == "tr":
|
|
||||||
self.current_item = {"engine_url" : self.url}
|
|
||||||
|
|
||||||
elif tag == "table":
|
|
||||||
self.result_table = "searchResult" == attrs[0][1]
|
|
||||||
|
|
||||||
elif self.add_query:
|
|
||||||
if self.result_query and tag == "a":
|
|
||||||
if len(self.list_searches) < 10:
|
|
||||||
self.list_searches.append(attrs[0][1])
|
|
||||||
else:
|
|
||||||
self.add_query = False
|
|
||||||
self.result_query = False
|
|
||||||
elif tag == "div":
|
|
||||||
self.result_query = "center" == attrs[0][1]
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
""" Parser's end tag handler """
|
|
||||||
if self.result_tbody:
|
|
||||||
if tag == "tr":
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = None
|
|
||||||
elif tag == "font":
|
|
||||||
self.save_item = None
|
|
||||||
elif tag == "table":
|
|
||||||
self.result_table = self.result_tbody = False
|
|
||||||
|
|
||||||
elif self.result_table:
|
|
||||||
if tag == "thead":
|
|
||||||
self.result_tbody = True
|
|
||||||
elif tag == "table":
|
|
||||||
self.result_table = self.result_tbody = False
|
|
||||||
|
|
||||||
elif self.add_query and self.result_query:
|
|
||||||
if tag == "div":
|
|
||||||
self.add_query = self.result_query = False
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
""" Parser's data handler """
|
|
||||||
if self.save_item:
|
|
||||||
if self.save_item == "size":
|
|
||||||
temp_data = data.split()
|
|
||||||
if "Size" in temp_data:
|
|
||||||
indx = temp_data.index("Size")
|
|
||||||
self.current_item[self.save_item] = temp_data[indx + 1] + " " + temp_data[indx + 2]
|
|
||||||
|
|
||||||
elif self.save_item == "name":
|
|
||||||
# names with special characters like '&' are splitted in several pieces
|
|
||||||
if 'name' not in self.current_item:
|
|
||||||
self.current_item['name'] = ''
|
|
||||||
self.current_item['name'] += data
|
|
||||||
|
|
||||||
else:
|
|
||||||
self.current_item[self.save_item] = data
|
|
||||||
self.save_item = None
|
|
||||||
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
#prepare query. 7 is filtering by seeders
|
|
||||||
cat = cat.lower()
|
|
||||||
query = "/".join((self.url, "search", what, "0", "7", self.supported_categories[cat]))
|
|
||||||
|
|
||||||
response = retrieve_url(query)
|
|
||||||
|
|
||||||
list_searches = []
|
|
||||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
|
||||||
parser.feed(response)
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
parser.add_query = False
|
|
||||||
for search_query in list_searches:
|
|
||||||
response = retrieve_url(self.url + search_query)
|
|
||||||
parser.feed(response)
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
return
|
|
Before Width: | Height: | Size: 561 B |
@ -1,97 +0,0 @@
|
|||||||
#VERSION: 2.0
|
|
||||||
#AUTHORS: Douman (custparasite@gmx.se)
|
|
||||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import retrieve_url, download_file
|
|
||||||
from re import compile as re_compile
|
|
||||||
from HTMLParser import HTMLParser
|
|
||||||
|
|
||||||
class torlock(object):
|
|
||||||
url = "https://www.torlock.com"
|
|
||||||
name = "TorLock"
|
|
||||||
supported_categories = {'all' : 'all',
|
|
||||||
'anime' : 'anime',
|
|
||||||
'software' : 'software',
|
|
||||||
'games' : 'game',
|
|
||||||
'movies' : 'movie',
|
|
||||||
'music' : 'music',
|
|
||||||
'tv' : 'television',
|
|
||||||
'books' : 'ebooks'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParser(HTMLParser):
|
|
||||||
""" Sub-class for parsing results """
|
|
||||||
def __init__(self, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.url = url
|
|
||||||
self.article_found = False #true when <article> with results is found
|
|
||||||
self.item_found = False
|
|
||||||
self.item_bad = False #set to True for malicious links
|
|
||||||
self.current_item = None #dict for found item
|
|
||||||
self.item_name = None #key's name in current_item dict
|
|
||||||
self.parser_class = {"ts" : "size",
|
|
||||||
"tul" : "seeds",
|
|
||||||
"tdl" : "leech"}
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
params = dict(attrs)
|
|
||||||
if self.item_found:
|
|
||||||
if tag == "td":
|
|
||||||
if "class" in params:
|
|
||||||
self.item_name = self.parser_class.get(params["class"], None)
|
|
||||||
if self.item_name:
|
|
||||||
self.current_item[self.item_name] = ""
|
|
||||||
|
|
||||||
elif self.article_found and tag == "a":
|
|
||||||
if "href" in params:
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/torrent"):
|
|
||||||
self.current_item["desc_link"] = "".join((self.url, link))
|
|
||||||
self.current_item["link"] = "".join((self.url, "/tor/", link.split('/')[2], ".torrent"))
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
self.item_found = True
|
|
||||||
self.item_name = "name"
|
|
||||||
self.current_item["name"] = ""
|
|
||||||
self.item_bad = "rel" in params and params["rel"] == "nofollow"
|
|
||||||
|
|
||||||
elif tag == "article":
|
|
||||||
self.article_found = True
|
|
||||||
self.current_item = {}
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
if self.item_name:
|
|
||||||
self.current_item[self.item_name] += data
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
if tag == "article":
|
|
||||||
self.article_found = False
|
|
||||||
elif self.item_name and (tag == "a" or tag == "td"):
|
|
||||||
self.item_name = None
|
|
||||||
elif self.item_found and tag == "tr":
|
|
||||||
self.item_found = False
|
|
||||||
if not self.item_bad:
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = {}
|
|
||||||
|
|
||||||
def search(self, query, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
query = query.replace("%20", "-")
|
|
||||||
|
|
||||||
parser = self.MyHtmlParser(self.url)
|
|
||||||
page = "".join((self.url, "/", self.supported_categories[cat], "/torrents/", query, ".html?sort=seeds&page=1"))
|
|
||||||
html = retrieve_url(page)
|
|
||||||
parser.feed(html)
|
|
||||||
|
|
||||||
counter = 1
|
|
||||||
additional_pages = re_compile("/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+".format(self.supported_categories[cat], query))
|
|
||||||
list_searches = additional_pages.findall(html)[:-1] #last link is next(i.e. second)
|
|
||||||
for page in map(lambda link: "".join((self.url, link)), list_searches):
|
|
||||||
html = retrieve_url(page)
|
|
||||||
parser.feed(html)
|
|
||||||
counter += 1
|
|
||||||
if counter > 3:
|
|
||||||
break
|
|
||||||
parser.close()
|
|
Before Width: | Height: | Size: 122 B |
@ -1,119 +0,0 @@
|
|||||||
#VERSION: 2.22
|
|
||||||
#AUTHORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import retrieve_url, download_file
|
|
||||||
from HTMLParser import HTMLParser
|
|
||||||
from urllib import urlencode
|
|
||||||
|
|
||||||
class torrentz(object):
|
|
||||||
# mandatory properties
|
|
||||||
url = 'https://torrentz2.eu'
|
|
||||||
name = 'Torrentz2'
|
|
||||||
supported_categories = {'all': ''}
|
|
||||||
|
|
||||||
trackers_list = ['udp://tracker.coppersurfer.tk:6969/announce',
|
|
||||||
'udp://tracker.opentrackr.org:1337/announce',
|
|
||||||
'udp://tracker.zer0day.to:1337/announce',
|
|
||||||
'udp://tracker.leechers-paradise.org:6969/announce',
|
|
||||||
'udp://9.rarbg.com:2710/announce',
|
|
||||||
'udp://explodie.org:6969/announce']
|
|
||||||
|
|
||||||
class MyHtmlParser(HTMLParser):
|
|
||||||
def __init__(self, results, url, trackers):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.results = results
|
|
||||||
self.url = url
|
|
||||||
self.trackers = trackers
|
|
||||||
self.td_counter = None
|
|
||||||
self.current_item = None
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
params = dict(attrs)
|
|
||||||
if tag == 'a':
|
|
||||||
if 'href' in params:
|
|
||||||
self.current_item = {}
|
|
||||||
self.td_counter = 0
|
|
||||||
self.current_item['link'] = 'magnet:?xt=urn:btih:' + \
|
|
||||||
params['href'].strip(' /') + self.trackers
|
|
||||||
self.current_item['desc_link'] = self.url + params['href'].strip()
|
|
||||||
elif tag == 'span':
|
|
||||||
if self.td_counter is not None:
|
|
||||||
self.td_counter += 1
|
|
||||||
if self.td_counter > 5: # safety
|
|
||||||
self.td_counter = None
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
if self.td_counter == 0:
|
|
||||||
if 'name' not in self.current_item:
|
|
||||||
self.current_item['name'] = ''
|
|
||||||
self.current_item['name'] += data
|
|
||||||
elif self.td_counter == 3:
|
|
||||||
if 'size' not in self.current_item:
|
|
||||||
self.current_item['size'] = data.strip()
|
|
||||||
if self.current_item['size'] == 'Pending':
|
|
||||||
self.current_item['size'] = ''
|
|
||||||
elif self.td_counter == 4:
|
|
||||||
if 'seeds' not in self.current_item:
|
|
||||||
self.current_item['seeds'] = data.strip().replace(',', '')
|
|
||||||
if not self.current_item['seeds'].isdigit():
|
|
||||||
self.current_item['seeds'] = 0
|
|
||||||
elif self.td_counter == 5:
|
|
||||||
if 'leech' not in self.current_item:
|
|
||||||
self.current_item['leech'] = data.strip().replace(',', '')
|
|
||||||
if not self.current_item['leech'].isdigit():
|
|
||||||
self.current_item['leech'] = 0
|
|
||||||
|
|
||||||
# display item
|
|
||||||
self.td_counter = None
|
|
||||||
self.current_item['engine_url'] = self.url
|
|
||||||
if self.current_item['name'].find(u' \xbb'):
|
|
||||||
self.current_item['name'] = self.current_item['name'].split(u' \xbb')[0]
|
|
||||||
self.current_item['link'] += '&' + urlencode({'dn' : self.current_item['name'].encode('utf-8')})
|
|
||||||
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.results.append('a')
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
# initialize trackers for magnet links
|
|
||||||
trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in self.trackers_list)
|
|
||||||
|
|
||||||
results_list = []
|
|
||||||
parser = self.MyHtmlParser(results_list, self.url, trackers)
|
|
||||||
i = 0
|
|
||||||
while i < 6:
|
|
||||||
# "what" is already urlencoded
|
|
||||||
html = retrieve_url(self.url + '/search?f=%s&p=%d' % (what, i))
|
|
||||||
parser.feed(html)
|
|
||||||
if len(results_list) < 1:
|
|
||||||
break
|
|
||||||
del results_list[:]
|
|
||||||
i += 1
|
|
||||||
parser.close()
|
|
@ -1,6 +0,0 @@
|
|||||||
btdb: 1.01
|
|
||||||
demonoid: 1.23
|
|
||||||
legittorrents: 2.02
|
|
||||||
piratebay: 2.15
|
|
||||||
torlock: 2.0
|
|
||||||
torrentz: 2.22
|
|
Before Width: | Height: | Size: 562 B |
@ -1,147 +0,0 @@
|
|||||||
#VERSION: 1.01
|
|
||||||
#AUTHORS: Charles Worthing
|
|
||||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from html.parser import HTMLParser
|
|
||||||
#qBt
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import download_file, retrieve_url
|
|
||||||
|
|
||||||
class btdb(object):
|
|
||||||
""" Search engine class """
|
|
||||||
url = 'https://btdb.in'
|
|
||||||
name = 'BTDB'
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
""" Downloader """
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParser(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, results, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.results = results
|
|
||||||
self.url = url
|
|
||||||
self.current_item = {} # One torrent result
|
|
||||||
self.add_query = True
|
|
||||||
self.torrent_info_index = 0 # Count of the meta data encountered
|
|
||||||
self.torrent_info_array = []
|
|
||||||
self.meta_data_grabbing = 0
|
|
||||||
self.meta_data_array = []
|
|
||||||
self.torrent_no_files = 0
|
|
||||||
self.torrent_date_added = 0
|
|
||||||
self.torrent_popularity = 0
|
|
||||||
self.mangnet_link = ""
|
|
||||||
self.desc_link = ""
|
|
||||||
self.torrent_name = ""
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
if tag == "span":
|
|
||||||
span_dict = dict(attrs)
|
|
||||||
if "class" in span_dict:
|
|
||||||
the_class = span_dict["class"]
|
|
||||||
if the_class == "item-meta-info-value":
|
|
||||||
self.meta_data_grabbing += 1
|
|
||||||
else:
|
|
||||||
self.meta_data_grabbing = 0
|
|
||||||
if tag == "script":
|
|
||||||
return
|
|
||||||
if tag == "li":
|
|
||||||
for attr in attrs:
|
|
||||||
if attr[1] == "search-ret-item":
|
|
||||||
self.torrent_info_index = 1
|
|
||||||
if tag == "a":
|
|
||||||
if self.torrent_info_index > 0:
|
|
||||||
params = dict(attrs)
|
|
||||||
if "href" in params:
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/torrent"):
|
|
||||||
self.desc_link = "".join((self.url, link))
|
|
||||||
self.torrent_name = params["title"]
|
|
||||||
if link.startswith("magnet:"):
|
|
||||||
self.mangnet_link = link
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
if tag == "script":
|
|
||||||
return
|
|
||||||
if tag == "div":
|
|
||||||
if self.meta_data_grabbing > 0:
|
|
||||||
|
|
||||||
self.torrent_no_files = self.meta_data_array[2] # Not used
|
|
||||||
self.torrent_date_added = self.meta_data_array[4] # Not used
|
|
||||||
self.torrent_popularity = self.meta_data_array[6] # Not used
|
|
||||||
|
|
||||||
self.current_item["size"] = self.meta_data_array[0]
|
|
||||||
self.current_item["name"] = self.torrent_name
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
self.current_item["link"] = self.mangnet_link
|
|
||||||
self.current_item["desc_link"] = self.desc_link
|
|
||||||
self.current_item["seeds"] = -1
|
|
||||||
self.current_item["leech"] = -1
|
|
||||||
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.results.append('a')
|
|
||||||
self.current_item = {}
|
|
||||||
|
|
||||||
self.meta_data_grabbing = 0
|
|
||||||
self.meta_data_array = []
|
|
||||||
self.mangnet_link = ""
|
|
||||||
self.desc_link = ""
|
|
||||||
self.torrent_name = ""
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
if self.torrent_info_index > 0:
|
|
||||||
self.torrent_info_array.append(data)
|
|
||||||
self.torrent_info_index += 1
|
|
||||||
if self.meta_data_grabbing > 0:
|
|
||||||
self.meta_data_array.append(data)
|
|
||||||
self.meta_data_grabbing += 1
|
|
||||||
|
|
||||||
def handle_entityref(self, name):
|
|
||||||
c = unichr(name2codepoint[name])
|
|
||||||
|
|
||||||
def handle_charref(self, name):
|
|
||||||
if name.startswith('x'):
|
|
||||||
c = unichr(int(name[1:], 16))
|
|
||||||
else:
|
|
||||||
c = unichr(int(name))
|
|
||||||
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
results_list = []
|
|
||||||
parser = self.MyHtmlParser(results_list, self.url)
|
|
||||||
i = 1
|
|
||||||
while i < 31:
|
|
||||||
# "what" is already urlencoded
|
|
||||||
html = retrieve_url(self.url + '/q/%s/%d?sort=popular' % (what, i))
|
|
||||||
parser.feed(html)
|
|
||||||
if len(results_list) < 1:
|
|
||||||
break
|
|
||||||
del results_list[:]
|
|
||||||
i += 1
|
|
||||||
parser.close()
|
|
Before Width: | Height: | Size: 675 B |
@ -1,144 +0,0 @@
|
|||||||
#VERSION: 1.23
|
|
||||||
#AUTHORS: Douman (custparasite@gmx.se)
|
|
||||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from html.parser import HTMLParser
|
|
||||||
from re import compile as re_compile
|
|
||||||
from re import DOTALL
|
|
||||||
from itertools import islice
|
|
||||||
#qBt
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import download_file, retrieve_url
|
|
||||||
|
|
||||||
class demonoid(object):
|
|
||||||
""" Search engine class """
|
|
||||||
url = "https://www.demonoid.pw"
|
|
||||||
name = "Demonoid"
|
|
||||||
supported_categories = {'all': '0',
|
|
||||||
'music': '2',
|
|
||||||
'movies': '1',
|
|
||||||
'games': '4',
|
|
||||||
'software': '5',
|
|
||||||
'books': '11',
|
|
||||||
'anime': '9',
|
|
||||||
'tv': '3'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
""" Downloader """
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParseWithBlackJack(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.url = url
|
|
||||||
self.current_item = None
|
|
||||||
self.save_data = None
|
|
||||||
self.seeds_leech = False
|
|
||||||
self.size_repl = re_compile(",")
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
""" Parser's start tag handler """
|
|
||||||
if tag == "a":
|
|
||||||
params = dict(attrs)
|
|
||||||
if "href" in params:
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/files/details"):
|
|
||||||
self.current_item = dict()
|
|
||||||
self.current_item["desc_link"] = "".join((self.url, link))
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
self.save_data = "name"
|
|
||||||
elif link.startswith("/files/download"):
|
|
||||||
self.current_item["link"] = "".join((self.url, link))
|
|
||||||
|
|
||||||
elif self.current_item:
|
|
||||||
if tag == "td":
|
|
||||||
params = dict(attrs)
|
|
||||||
if "class" in params and "align" in params:
|
|
||||||
if params["class"].startswith("tone"):
|
|
||||||
if params["align"] == "right":
|
|
||||||
self.save_data = "size"
|
|
||||||
elif params["align"] == "center":
|
|
||||||
self.seeds_leech = True
|
|
||||||
|
|
||||||
elif self.seeds_leech and tag == "font":
|
|
||||||
for attr in attrs:
|
|
||||||
if "class" in attr:
|
|
||||||
if attr[1] == "green":
|
|
||||||
self.save_data = "seeds"
|
|
||||||
elif attr[1] == "red":
|
|
||||||
self.save_data = "leech"
|
|
||||||
|
|
||||||
self.seeds_leech = False
|
|
||||||
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
""" Parser's data handler """
|
|
||||||
if self.save_data:
|
|
||||||
if self.save_data == "name":
|
|
||||||
# names with special characters like '&' are splitted in several pieces
|
|
||||||
if 'name' not in self.current_item:
|
|
||||||
self.current_item['name'] = ''
|
|
||||||
self.current_item['name'] += data
|
|
||||||
else:
|
|
||||||
self.current_item[self.save_data] = data
|
|
||||||
self.save_data = None
|
|
||||||
if self.current_item.__len__() == 7:
|
|
||||||
self.current_item["size"] = self.size_repl.sub("", self.current_item["size"])
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = None
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
""" Parser's end tag handler """
|
|
||||||
if self.save_data == "name":
|
|
||||||
self.save_data = None
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
#prepare query
|
|
||||||
cat = self.supported_categories[cat.lower()]
|
|
||||||
query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&uid=0&sort=S"))
|
|
||||||
|
|
||||||
data = retrieve_url(query)
|
|
||||||
|
|
||||||
add_res_list = re_compile("/files.*page=[0-9]+")
|
|
||||||
torrent_list = re_compile("start torrent list -->(.*)<!-- end torrent", DOTALL)
|
|
||||||
data = torrent_list.search(data).group(0)
|
|
||||||
list_results = add_res_list.findall(data)
|
|
||||||
|
|
||||||
parser = self.MyHtmlParseWithBlackJack(self.url)
|
|
||||||
parser.feed(data)
|
|
||||||
|
|
||||||
del data
|
|
||||||
|
|
||||||
if list_results:
|
|
||||||
for search_query in islice((add_res_list.search(result).group(0) for result in list_results[1].split(" | ")), 0, 5):
|
|
||||||
response = retrieve_url(self.url + search_query)
|
|
||||||
parser.feed(torrent_list.search(response).group(0))
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
return
|
|
Before Width: | Height: | Size: 532 B |
@ -1,101 +0,0 @@
|
|||||||
#VERSION: 2.02
|
|
||||||
#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
|
|
||||||
# Douman (custparasite@gmx.se)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import retrieve_url, download_file
|
|
||||||
from html.parser import HTMLParser
|
|
||||||
from re import compile as re_compile
|
|
||||||
|
|
||||||
class legittorrents(object):
|
|
||||||
url = 'http://www.legittorrents.info'
|
|
||||||
name = 'Legit Torrents'
|
|
||||||
supported_categories = {'all': '0', 'movies': '1', 'tv': '13', 'music': '2', 'games': '3', 'anime': '5', 'books': '6'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParseWithBlackJack(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.url = url
|
|
||||||
self.current_item = None
|
|
||||||
self.save_item_key = None
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
""" Parser's start tag handler """
|
|
||||||
if self.current_item:
|
|
||||||
params = dict(attrs)
|
|
||||||
if tag == "a":
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("index") and "title" in params:
|
|
||||||
#description link
|
|
||||||
self.current_item["name"] = params["title"][14:]
|
|
||||||
self.current_item["desc_link"] = "/".join((self.url, link))
|
|
||||||
elif link.startswith("download"):
|
|
||||||
self.current_item["link"] = "/".join((self.url, link))
|
|
||||||
elif tag == "td":
|
|
||||||
if "width" in params and params["width"] == "30" and not "leech" in self.current_item:
|
|
||||||
self.save_item_key = "leech" if "seeds" in self.current_item else "seeds"
|
|
||||||
|
|
||||||
elif tag == "tr":
|
|
||||||
self.current_item = {}
|
|
||||||
self.current_item["size"] = ""
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
""" Parser's end tag handler """
|
|
||||||
if self.current_item and tag == "tr":
|
|
||||||
if len(self.current_item) > 4:
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = None
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
""" Parser's data handler """
|
|
||||||
if self.save_item_key:
|
|
||||||
self.current_item[self.save_item_key] = data.strip()
|
|
||||||
self.save_item_key = None
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=", self.supported_categories.get(cat, '0'), "&active=1"))
|
|
||||||
|
|
||||||
get_table = re_compile('(?s)<table\sclass="lista".*>(.*)</table>')
|
|
||||||
data = get_table.search(retrieve_url(query)).group(0)
|
|
||||||
#extract first ten pages of next results
|
|
||||||
next_pages = re_compile('(?m)<option value="(.*)">[0-9]+</option>')
|
|
||||||
next_pages = ["".join((self.url, page)) for page in next_pages.findall(data)[:10]]
|
|
||||||
|
|
||||||
parser = self.MyHtmlParseWithBlackJack(self.url)
|
|
||||||
parser.feed(data)
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
for page in next_pages:
|
|
||||||
parser.feed(get_table.search(retrieve_url(page)).group(0))
|
|
||||||
parser.close()
|
|
Before Width: | Height: | Size: 278 B |
@ -1,176 +0,0 @@
|
|||||||
#VERSION: 2.15
|
|
||||||
#AUTHORS: Fabien Devaux (fab@gnux.info)
|
|
||||||
#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
|
|
||||||
# Arthur (custparasite@gmx.se)
|
|
||||||
# Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from html.parser import HTMLParser
|
|
||||||
#qBt
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import download_file, retrieve_url
|
|
||||||
|
|
||||||
class piratebay(object):
|
|
||||||
""" Search engine class """
|
|
||||||
url = 'https://thepiratebay.org'
|
|
||||||
name = 'The Pirate Bay'
|
|
||||||
supported_categories = {'all': '0', 'music': '100', 'movies': '200', 'games': '400', 'software': '300'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
""" Downloader """
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParseWithBlackJack(HTMLParser):
|
|
||||||
""" Parser class """
|
|
||||||
def __init__(self, list_searches, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.list_searches = list_searches
|
|
||||||
self.url = url
|
|
||||||
self.current_item = None
|
|
||||||
self.save_item = None
|
|
||||||
self.result_table = False #table with results is found
|
|
||||||
self.result_tbody = False
|
|
||||||
self.add_query = True
|
|
||||||
self.result_query = False
|
|
||||||
|
|
||||||
def handle_start_tag_default(self, attrs):
|
|
||||||
""" Default handler for start tag dispatcher """
|
|
||||||
pass
|
|
||||||
|
|
||||||
def handle_start_tag_a(self, attrs):
|
|
||||||
""" Handler for start tag a """
|
|
||||||
params = dict(attrs)
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/torrent"):
|
|
||||||
self.current_item["desc_link"] = "".join((self.url, link))
|
|
||||||
self.save_item = "name"
|
|
||||||
elif link.startswith("magnet"):
|
|
||||||
self.current_item["link"] = link
|
|
||||||
# end of the 'name' item
|
|
||||||
self.current_item['name'] = self.current_item['name'].strip()
|
|
||||||
self.save_item = None
|
|
||||||
|
|
||||||
def handle_start_tag_font(self, attrs):
|
|
||||||
""" Handler for start tag font """
|
|
||||||
for attr in attrs:
|
|
||||||
if attr[1] == "detDesc":
|
|
||||||
self.save_item = "size"
|
|
||||||
break
|
|
||||||
|
|
||||||
def handle_start_tag_td(self, attrs):
|
|
||||||
""" Handler for start tag td """
|
|
||||||
for attr in attrs:
|
|
||||||
if attr[1] == "right":
|
|
||||||
if "seeds" in self.current_item.keys():
|
|
||||||
self.save_item = "leech"
|
|
||||||
else:
|
|
||||||
self.save_item = "seeds"
|
|
||||||
break
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
""" Parser's start tag handler """
|
|
||||||
if self.current_item:
|
|
||||||
dispatcher = getattr(self, "_".join(("handle_start_tag", tag)), self.handle_start_tag_default)
|
|
||||||
dispatcher(attrs)
|
|
||||||
|
|
||||||
elif self.result_tbody:
|
|
||||||
if tag == "tr":
|
|
||||||
self.current_item = {"engine_url" : self.url}
|
|
||||||
|
|
||||||
elif tag == "table":
|
|
||||||
self.result_table = "searchResult" == attrs[0][1]
|
|
||||||
|
|
||||||
elif self.add_query:
|
|
||||||
if self.result_query and tag == "a":
|
|
||||||
if len(self.list_searches) < 10:
|
|
||||||
self.list_searches.append(attrs[0][1])
|
|
||||||
else:
|
|
||||||
self.add_query = False
|
|
||||||
self.result_query = False
|
|
||||||
elif tag == "div":
|
|
||||||
self.result_query = "center" == attrs[0][1]
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
""" Parser's end tag handler """
|
|
||||||
if self.result_tbody:
|
|
||||||
if tag == "tr":
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = None
|
|
||||||
elif tag == "font":
|
|
||||||
self.save_item = None
|
|
||||||
elif tag == "table":
|
|
||||||
self.result_table = self.result_tbody = False
|
|
||||||
|
|
||||||
elif self.result_table:
|
|
||||||
if tag == "thead":
|
|
||||||
self.result_tbody = True
|
|
||||||
elif tag == "table":
|
|
||||||
self.result_table = self.result_tbody = False
|
|
||||||
|
|
||||||
elif self.add_query and self.result_query:
|
|
||||||
if tag == "div":
|
|
||||||
self.add_query = self.result_query = False
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
""" Parser's data handler """
|
|
||||||
if self.save_item:
|
|
||||||
if self.save_item == "size":
|
|
||||||
temp_data = data.split()
|
|
||||||
if "Size" in temp_data:
|
|
||||||
indx = temp_data.index("Size")
|
|
||||||
self.current_item[self.save_item] = temp_data[indx + 1] + " " + temp_data[indx + 2]
|
|
||||||
|
|
||||||
elif self.save_item == "name":
|
|
||||||
# names with special characters like '&' are splitted in several pieces
|
|
||||||
if 'name' not in self.current_item:
|
|
||||||
self.current_item['name'] = ''
|
|
||||||
self.current_item['name'] += data
|
|
||||||
|
|
||||||
else:
|
|
||||||
self.current_item[self.save_item] = data
|
|
||||||
self.save_item = None
|
|
||||||
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
#prepare query. 7 is filtering by seeders
|
|
||||||
cat = cat.lower()
|
|
||||||
query = "/".join((self.url, "search", what, "0", "7", self.supported_categories[cat]))
|
|
||||||
|
|
||||||
response = retrieve_url(query)
|
|
||||||
|
|
||||||
list_searches = []
|
|
||||||
parser = self.MyHtmlParseWithBlackJack(list_searches, self.url)
|
|
||||||
parser.feed(response)
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
parser.add_query = False
|
|
||||||
for search_query in list_searches:
|
|
||||||
response = retrieve_url(self.url + search_query)
|
|
||||||
parser.feed(response)
|
|
||||||
parser.close()
|
|
||||||
|
|
||||||
return
|
|
Before Width: | Height: | Size: 561 B |
@ -1,97 +0,0 @@
|
|||||||
#VERSION: 2.0
|
|
||||||
#AUTHORS: Douman (custparasite@gmx.se)
|
|
||||||
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import retrieve_url, download_file
|
|
||||||
from re import compile as re_compile
|
|
||||||
from html.parser import HTMLParser
|
|
||||||
|
|
||||||
class torlock(object):
|
|
||||||
url = "https://www.torlock.com"
|
|
||||||
name = "TorLock"
|
|
||||||
supported_categories = {'all' : 'all',
|
|
||||||
'anime' : 'anime',
|
|
||||||
'software' : 'software',
|
|
||||||
'games' : 'game',
|
|
||||||
'movies' : 'movie',
|
|
||||||
'music' : 'music',
|
|
||||||
'tv' : 'television',
|
|
||||||
'books' : 'ebooks'}
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
class MyHtmlParser(HTMLParser):
|
|
||||||
""" Sub-class for parsing results """
|
|
||||||
def __init__(self, url):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.url = url
|
|
||||||
self.article_found = False #true when <article> with results is found
|
|
||||||
self.item_found = False
|
|
||||||
self.item_bad = False #set to True for malicious links
|
|
||||||
self.current_item = None #dict for found item
|
|
||||||
self.item_name = None #key's name in current_item dict
|
|
||||||
self.parser_class = {"ts" : "size",
|
|
||||||
"tul" : "seeds",
|
|
||||||
"tdl" : "leech"}
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
params = dict(attrs)
|
|
||||||
if self.item_found:
|
|
||||||
if tag == "td":
|
|
||||||
if "class" in params:
|
|
||||||
self.item_name = self.parser_class.get(params["class"], None)
|
|
||||||
if self.item_name:
|
|
||||||
self.current_item[self.item_name] = ""
|
|
||||||
|
|
||||||
elif self.article_found and tag == "a":
|
|
||||||
if "href" in params:
|
|
||||||
link = params["href"]
|
|
||||||
if link.startswith("/torrent"):
|
|
||||||
self.current_item["desc_link"] = "".join((self.url, link))
|
|
||||||
self.current_item["link"] = "".join((self.url, "/tor/", link.split('/')[2], ".torrent"))
|
|
||||||
self.current_item["engine_url"] = self.url
|
|
||||||
self.item_found = True
|
|
||||||
self.item_name = "name"
|
|
||||||
self.current_item["name"] = ""
|
|
||||||
self.item_bad = "rel" in params and params["rel"] == "nofollow"
|
|
||||||
|
|
||||||
elif tag == "article":
|
|
||||||
self.article_found = True
|
|
||||||
self.current_item = {}
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
if self.item_name:
|
|
||||||
self.current_item[self.item_name] += data
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
if tag == "article":
|
|
||||||
self.article_found = False
|
|
||||||
elif self.item_name and (tag == "a" or tag == "td"):
|
|
||||||
self.item_name = None
|
|
||||||
elif self.item_found and tag == "tr":
|
|
||||||
self.item_found = False
|
|
||||||
if not self.item_bad:
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.current_item = {}
|
|
||||||
|
|
||||||
def search(self, query, cat='all'):
|
|
||||||
""" Performs search """
|
|
||||||
query = query.replace("%20", "-")
|
|
||||||
|
|
||||||
parser = self.MyHtmlParser(self.url)
|
|
||||||
page = "".join((self.url, "/", self.supported_categories[cat], "/torrents/", query, ".html?sort=seeds&page=1"))
|
|
||||||
html = retrieve_url(page)
|
|
||||||
parser.feed(html)
|
|
||||||
|
|
||||||
counter = 1
|
|
||||||
additional_pages = re_compile("/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+".format(self.supported_categories[cat], query))
|
|
||||||
list_searches = additional_pages.findall(html)[:-1] #last link is next(i.e. second)
|
|
||||||
for page in map(lambda link: "".join((self.url, link)), list_searches):
|
|
||||||
html = retrieve_url(page)
|
|
||||||
parser.feed(html)
|
|
||||||
counter += 1
|
|
||||||
if counter > 3:
|
|
||||||
break
|
|
||||||
parser.close()
|
|
Before Width: | Height: | Size: 122 B |
@ -1,119 +0,0 @@
|
|||||||
#VERSION: 2.22
|
|
||||||
#AUTHORS: Diego de las Heras (ngosang@hotmail.es)
|
|
||||||
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer in the
|
|
||||||
# documentation and/or other materials provided with the distribution.
|
|
||||||
# * Neither the name of the author nor the names of its contributors may be
|
|
||||||
# used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from novaprinter import prettyPrinter
|
|
||||||
from helpers import retrieve_url, download_file
|
|
||||||
from html.parser import HTMLParser
|
|
||||||
from urllib.parse import urlencode
|
|
||||||
|
|
||||||
class torrentz(object):
|
|
||||||
# mandatory properties
|
|
||||||
url = 'https://torrentz2.eu'
|
|
||||||
name = 'Torrentz2'
|
|
||||||
supported_categories = {'all': ''}
|
|
||||||
|
|
||||||
trackers_list = ['udp://tracker.coppersurfer.tk:6969/announce',
|
|
||||||
'udp://tracker.opentrackr.org:1337/announce',
|
|
||||||
'udp://tracker.zer0day.to:1337/announce',
|
|
||||||
'udp://tracker.leechers-paradise.org:6969/announce',
|
|
||||||
'udp://9.rarbg.com:2710/announce',
|
|
||||||
'udp://explodie.org:6969/announce']
|
|
||||||
|
|
||||||
class MyHtmlParser(HTMLParser):
|
|
||||||
def __init__(self, results, url, trackers):
|
|
||||||
HTMLParser.__init__(self)
|
|
||||||
self.results = results
|
|
||||||
self.url = url
|
|
||||||
self.trackers = trackers
|
|
||||||
self.td_counter = None
|
|
||||||
self.current_item = None
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
params = dict(attrs)
|
|
||||||
if tag == 'a':
|
|
||||||
if 'href' in params:
|
|
||||||
self.current_item = {}
|
|
||||||
self.td_counter = 0
|
|
||||||
self.current_item['link'] = 'magnet:?xt=urn:btih:' + \
|
|
||||||
params['href'].strip(' /') + self.trackers
|
|
||||||
self.current_item['desc_link'] = self.url + params['href'].strip()
|
|
||||||
elif tag == 'span':
|
|
||||||
if self.td_counter is not None:
|
|
||||||
self.td_counter += 1
|
|
||||||
if self.td_counter > 5: # safety
|
|
||||||
self.td_counter = None
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
if self.td_counter == 0:
|
|
||||||
if 'name' not in self.current_item:
|
|
||||||
self.current_item['name'] = ''
|
|
||||||
self.current_item['name'] += data
|
|
||||||
elif self.td_counter == 3:
|
|
||||||
if 'size' not in self.current_item:
|
|
||||||
self.current_item['size'] = data.strip()
|
|
||||||
if self.current_item['size'] == 'Pending':
|
|
||||||
self.current_item['size'] = ''
|
|
||||||
elif self.td_counter == 4:
|
|
||||||
if 'seeds' not in self.current_item:
|
|
||||||
self.current_item['seeds'] = data.strip().replace(',', '')
|
|
||||||
if not self.current_item['seeds'].isdigit():
|
|
||||||
self.current_item['seeds'] = 0
|
|
||||||
elif self.td_counter == 5:
|
|
||||||
if 'leech' not in self.current_item:
|
|
||||||
self.current_item['leech'] = data.strip().replace(',', '')
|
|
||||||
if not self.current_item['leech'].isdigit():
|
|
||||||
self.current_item['leech'] = 0
|
|
||||||
|
|
||||||
# display item
|
|
||||||
self.td_counter = None
|
|
||||||
self.current_item['engine_url'] = self.url
|
|
||||||
if self.current_item['name'].find(' »'):
|
|
||||||
self.current_item['name'] = self.current_item['name'].split(' »')[0]
|
|
||||||
self.current_item['link'] += '&' + urlencode({'dn' : self.current_item['name']})
|
|
||||||
|
|
||||||
prettyPrinter(self.current_item)
|
|
||||||
self.results.append('a')
|
|
||||||
|
|
||||||
def download_torrent(self, info):
|
|
||||||
print(download_file(info))
|
|
||||||
|
|
||||||
def search(self, what, cat='all'):
|
|
||||||
# initialize trackers for magnet links
|
|
||||||
trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in self.trackers_list)
|
|
||||||
|
|
||||||
results_list = []
|
|
||||||
parser = self.MyHtmlParser(results_list, self.url, trackers)
|
|
||||||
i = 0
|
|
||||||
while i < 6:
|
|
||||||
# "what" is already urlencoded
|
|
||||||
html = retrieve_url(self.url + '/search?f=%s&p=%d' % (what, i))
|
|
||||||
parser.feed(html)
|
|
||||||
if len(results_list) < 1:
|
|
||||||
break
|
|
||||||
del results_list[:]
|
|
||||||
i += 1
|
|
||||||
parser.close()
|
|
@ -1,6 +0,0 @@
|
|||||||
btdb: 1.01
|
|
||||||
demonoid: 1.23
|
|
||||||
legittorrents: 2.02
|
|
||||||
piratebay: 2.15
|
|
||||||
torlock: 2.0
|
|
||||||
torrentz: 2.22
|
|