Browse Source

Reformat python code to be compliant with PEP8

The following command is used:
`pycodestyle --ignore=E265,E722 --max-line-length=100 <py files>`
adaptive-webui-19844
Chocobo1 6 years ago
parent
commit
bbe76231cf
No known key found for this signature in database
GPG Key ID: 210D9C873253A68C
  1. 35
      src/searchengine/nova/helpers.py
  2. 37
      src/searchengine/nova/nova2.py
  3. 16
      src/searchengine/nova/nova2dl.py
  4. 13
      src/searchengine/nova/novaprinter.py
  5. 37
      src/searchengine/nova3/helpers.py
  6. 39
      src/searchengine/nova3/nova2.py
  7. 16
      src/searchengine/nova3/nova2dl.py
  8. 8
      src/searchengine/nova3/novaprinter.py

35
src/searchengine/nova/helpers.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.41
#VERSION: 1.42
# Author:
# Christophe DUMEZ (chris@qbittorrent.org)
@ -29,25 +29,30 @@ @@ -29,25 +29,30 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re, htmlentitydefs
import tempfile
import gzip
import htmlentitydefs
import os
import StringIO, gzip, urllib2
import re
import socket
import socks
import re
import StringIO
import tempfile
import urllib2
# Some sites blocks default python User-agent
user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
headers = {'User-Agent': user_agent}
headers = {'User-Agent': user_agent}
# SOCKS5 Proxy support
if os.environ.has_key("sock_proxy") and len(os.environ["sock_proxy"].strip()) > 0:
if ("sock_proxy" in os.environ) and (len(os.environ["sock_proxy"].strip()) > 0):
proxy_str = os.environ["sock_proxy"].strip()
m=re.match(r"^(?:(?P<username>[^:]+):(?P<password>[^@]+)@)?(?P<host>[^:]+):(?P<port>\w+)$", proxy_str)
m = re.match(r"^(?:(?P<username>[^:]+):(?P<password>[^@]+)@)?(?P<host>[^:]+):(?P<port>\w+)$",
proxy_str)
if m is not None:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, m.group('host'), int(m.group('port')), True, m.group('username'), m.group('password'))
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, m.group('host'),
int(m.group('port')), True, m.group('username'), m.group('password'))
socket.socket = socks.socksocket
def htmlentitydecode(s):
# First convert alpha entities (such as &eacute;)
# (Inspired from http://mail.python.org/pipermail/python-list/2007-June/443813.html)
@ -59,14 +64,15 @@ def htmlentitydecode(s): @@ -59,14 +64,15 @@ def htmlentitydecode(s):
t = re.sub(u'&(%s);' % u'|'.join(htmlentitydefs.name2codepoint), entity2char, s)
# Then convert numerical entities (such as &#233;)
t = re.sub(u'&#(\d+);', lambda x: unichr(int(x.group(1))), t)
t = re.sub(r'&#(\d+);', lambda x: unichr(int(x.group(1))), t)
# Then convert hexa entities (such as &#x00E9;)
return re.sub(u'&#x(\w+);', lambda x: unichr(int(x.group(1),16)), t)
return re.sub(r'&#x(\w+);', lambda x: unichr(int(x.group(1), 16)), t)
def retrieve_url(url):
""" Return the content of the url page as a string """
req = urllib2.Request(url, headers = headers)
req = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(req)
except urllib2.URLError as errno:
@ -90,12 +96,13 @@ def retrieve_url(url): @@ -90,12 +96,13 @@ def retrieve_url(url):
dat = htmlentitydecode(dat)
return dat
def download_file(url, referer=None):
""" Download file at url and write it to a file, return the path to the file and the url """
file, path = tempfile.mkstemp()
file = os.fdopen(file, "w")
# Download url
req = urllib2.Request(url, headers = headers)
req = urllib2.Request(url, headers=headers)
if referer is not None:
req.add_header('referer', referer)
response = urllib2.urlopen(req)
@ -112,4 +119,4 @@ def download_file(url, referer=None): @@ -112,4 +119,4 @@ def download_file(url, referer=None):
file.write(dat)
file.close()
# return file path
return path+" "+url
return (path + " " + url)

37
src/searchengine/nova/nova2.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.41
#VERSION: 1.42
# Author:
# Fabien Devaux <fab AT gnux DOT info>
@ -56,6 +56,7 @@ CATEGORIES = {'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pic @@ -56,6 +56,7 @@ CATEGORIES = {'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pic
# As a convention, try to list results by decreasing number of seeds or similar
################################################################################
def initialize_engines():
""" Import available engines
@ -69,11 +70,11 @@ def initialize_engines(): @@ -69,11 +70,11 @@ def initialize_engines():
if len(engi) == 0 or engi.startswith('_'):
continue
try:
#import engines.[engine]
# import engines.[engine]
engine_module = __import__(".".join(("engines", engi)))
#get low-level module
# get low-level module
engine_module = getattr(engine_module, engi)
#bind class name
# bind class name
globals()[engi] = getattr(engine_module, engi)
supported_engines.append(engi)
except:
@ -81,6 +82,7 @@ def initialize_engines(): @@ -81,6 +82,7 @@ def initialize_engines():
return supported_engines
def engines_to_xml(supported_engines):
""" Generates xml for supported engines """
tab = " " * 4
@ -90,14 +92,16 @@ def engines_to_xml(supported_engines): @@ -90,14 +92,16 @@ def engines_to_xml(supported_engines):
supported_categories = ""
if hasattr(search_engine, "supported_categories"):
supported_categories = " ".join((key for key in search_engine.supported_categories.keys()
supported_categories = " ".join((key
for key in search_engine.supported_categories.keys()
if key is not "all"))
yield "".join((tab, "<", short_name, ">\n",
tab, tab, "<name>", search_engine.name, "</name>\n",
tab, tab, "<url>", search_engine.url, "</url>\n",
tab, tab, "<categories>", supported_categories, "</categories>\n",
tab, "</", short_name, ">\n"))
yield "".join((tab, "<", short_name, ">\n",
tab, tab, "<name>", search_engine.name, "</name>\n",
tab, tab, "<url>", search_engine.url, "</url>\n",
tab, tab, "<categories>", supported_categories, "</categories>\n",
tab, "</", short_name, ">\n"))
def displayCapabilities(supported_engines):
"""
@ -115,6 +119,7 @@ def displayCapabilities(supported_engines): @@ -115,6 +119,7 @@ def displayCapabilities(supported_engines):
"</capabilities>"))
print(xml)
def run_search(engine_list):
""" Run search in engine
@ -126,7 +131,7 @@ def run_search(engine_list): @@ -126,7 +131,7 @@ def run_search(engine_list):
engine, what, cat = engine_list
try:
engine = engine()
#avoid exceptions due to invalid category
# avoid exceptions due to invalid category
if hasattr(engine, 'supported_categories'):
if cat in engine.supported_categories:
engine.search(what, cat)
@ -136,6 +141,7 @@ def run_search(engine_list): @@ -136,6 +141,7 @@ def run_search(engine_list):
except:
return False
def main(args):
fix_encoding()
supported_engines = initialize_engines()
@ -152,18 +158,18 @@ def main(args): @@ -152,18 +158,18 @@ def main(args):
raise SystemExit("./nova2.py [all|engine1[,engine2]*] <category> <keywords>\n"
"available engines: %s" % (','.join(supported_engines)))
#get only unique engines with set
# get only unique engines with set
engines_list = set(e.lower() for e in args[0].strip().split(','))
if 'all' in engines_list:
engines_list = supported_engines
else:
#discard un-supported engines
# discard un-supported engines
engines_list = [engine for engine in engines_list
if engine in supported_engines]
if not engines_list:
#engine list is empty. Nothing to do here
# engine list is empty. Nothing to do here
return
cat = args[1].lower()
@ -174,11 +180,12 @@ def main(args): @@ -174,11 +180,12 @@ def main(args):
what = urllib.quote(' '.join(args[2:]))
if THREADED:
#child process spawning is controlled min(number of searches, number of cpu)
# child process spawning is controlled min(number of searches, number of cpu)
pool = Pool(min(len(engines_list), MAX_THREADS))
pool.map(run_search, ([globals()[engine], what, cat] for engine in engines_list))
else:
map(run_search, ([globals()[engine], what, cat] for engine in engines_list))
if __name__ == "__main__":
main(argv[1:])

16
src/searchengine/nova/nova2dl.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.20
#VERSION: 1.21
# Author:
# Christophe DUMEZ (chris@qbittorrent.org)
@ -34,14 +34,16 @@ from helpers import download_file @@ -34,14 +34,16 @@ from helpers import download_file
supported_engines = dict()
engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines','*.py'))
engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines', '*.py'))
for engine in engines:
e = engine.split(os.sep)[-1][:-3]
if len(e.strip()) == 0: continue
if e.startswith('_'): continue
if len(e.strip()) == 0:
continue
if e.startswith('_'):
continue
try:
exec("from engines.%s import %s"%(e,e))
exec("engine_url = %s.url"%e)
exec("from engines.%s import %s" % (e, e))
exec("engine_url = %s.url" % e)
supported_engines[engine_url] = e
except:
pass
@ -53,7 +55,7 @@ if __name__ == '__main__': @@ -53,7 +55,7 @@ if __name__ == '__main__':
download_param = sys.argv[2].strip()
if engine_url not in list(supported_engines.keys()):
raise SystemExit('./nova2dl.py: this engine_url was not recognized')
exec("engine = %s()"%supported_engines[engine_url])
exec("engine = %s()" % supported_engines[engine_url])
if hasattr(engine, 'download_torrent'):
engine.download_torrent(download_param)
else:

13
src/searchengine/nova/novaprinter.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.45
#VERSION: 1.46
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@ -24,21 +24,26 @@ @@ -24,21 +24,26 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, codecs
import codecs
import sys
from io import open
# Force UTF-8 printing
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
def prettyPrinter(dictionary):
dictionary['size'] = anySizeToBytes(dictionary['size'])
outtext = "|".join((dictionary["link"], dictionary["name"].replace("|", " "), str(dictionary["size"]), str(dictionary["seeds"]), str(dictionary["leech"]), dictionary["engine_url"]))
outtext = "|".join((dictionary["link"], dictionary["name"].replace("|", " "),
str(dictionary["size"]), str(dictionary["seeds"]),
str(dictionary["leech"]), dictionary["engine_url"]))
if 'desc_link' in dictionary:
outtext = "|".join((outtext, dictionary["desc_link"]))
with open(1, 'w', encoding='utf-8', closefd=False) as utf8_stdout:
utf8_stdout.write(unicode("".join((outtext, "\n"))))
def anySizeToBytes(size_string):
"""
Convert a string like '1 KB' to '1024' (bytes)
@ -63,6 +68,6 @@ def anySizeToBytes(size_string): @@ -63,6 +68,6 @@ def anySizeToBytes(size_string):
# convert
units_dict = {'T': 40, 'G': 30, 'M': 20, 'K': 10}
if units_dict.has_key(short_unit):
if short_unit in units_dict:
size = size * 2**units_dict[short_unit]
return int(size)

37
src/searchengine/nova3/helpers.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.40
#VERSION: 1.41
# Author:
# Christophe DUMEZ (chris@qbittorrent.org)
@ -27,25 +27,32 @@ @@ -27,25 +27,32 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re, html.entities
import tempfile
import gzip
import html.entities
import io
import os
import io, gzip, urllib.request, urllib.error, urllib.parse
import re
import socket
import socks
import re
import tempfile
import urllib.error
import urllib.parse
import urllib.request
# Some sites blocks default python User-agent
user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
headers = {'User-Agent': user_agent}
headers = {'User-Agent': user_agent}
# SOCKS5 Proxy support
if "sock_proxy" in os.environ and len(os.environ["sock_proxy"].strip()) > 0:
proxy_str = os.environ["sock_proxy"].strip()
m=re.match(r"^(?:(?P<username>[^:]+):(?P<password>[^@]+)@)?(?P<host>[^:]+):(?P<port>\w+)$", proxy_str)
m = re.match(r"^(?:(?P<username>[^:]+):(?P<password>[^@]+)@)?(?P<host>[^:]+):(?P<port>\w+)$",
proxy_str)
if m is not None:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, m.group('host'), int(m.group('port')), True, m.group('username'), m.group('password'))
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, m.group('host'),
int(m.group('port')), True, m.group('username'), m.group('password'))
socket.socket = socks.socksocket
def htmlentitydecode(s):
# First convert alpha entities (such as &eacute;)
# (Inspired from http://mail.python.org/pipermail/python-list/2007-June/443813.html)
@ -57,14 +64,15 @@ def htmlentitydecode(s): @@ -57,14 +64,15 @@ def htmlentitydecode(s):
t = re.sub('&(%s);' % '|'.join(html.entities.name2codepoint), entity2char, s)
# Then convert numerical entities (such as &#233;)
t = re.sub('&#(\d+);', lambda x: chr(int(x.group(1))), t)
t = re.sub(r'&#(\d+);', lambda x: chr(int(x.group(1))), t)
# Then convert hexa entities (such as &#x00E9;)
return re.sub('&#x(\w+);', lambda x: chr(int(x.group(1),16)), t)
return re.sub(r'&#x(\w+);', lambda x: chr(int(x.group(1), 16)), t)
def retrieve_url(url):
""" Return the content of the url page as a string """
req = urllib.request.Request(url, headers = headers)
req = urllib.request.Request(url, headers=headers)
try:
response = urllib.request.urlopen(req)
except urllib.error.URLError as errno:
@ -86,15 +94,16 @@ def retrieve_url(url): @@ -86,15 +94,16 @@ def retrieve_url(url):
pass
dat = dat.decode(charset, 'replace')
dat = htmlentitydecode(dat)
#return dat.encode('utf-8', 'replace')
# return dat.encode('utf-8', 'replace')
return dat
def download_file(url, referer=None):
""" Download file at url and write it to a file, return the path to the file and the url """
file, path = tempfile.mkstemp()
file = os.fdopen(file, "wb")
# Download url
req = urllib.request.Request(url, headers = headers)
req = urllib.request.Request(url, headers=headers)
if referer is not None:
req.add_header('referer', referer)
response = urllib.request.urlopen(req)
@ -111,4 +120,4 @@ def download_file(url, referer=None): @@ -111,4 +120,4 @@ def download_file(url, referer=None):
file.write(dat)
file.close()
# return file path
return path+" "+url
return (path + " " + url)

39
src/searchengine/nova3/nova2.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.41
#VERSION: 1.42
# Author:
# Fabien Devaux <fab AT gnux DOT info>
@ -55,6 +55,7 @@ CATEGORIES = {'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pic @@ -55,6 +55,7 @@ CATEGORIES = {'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pic
# As a convention, try to list results by decreasing number of seeds or similar
################################################################################
def initialize_engines():
""" Import available engines
@ -68,11 +69,11 @@ def initialize_engines(): @@ -68,11 +69,11 @@ def initialize_engines():
if len(engi) == 0 or engi.startswith('_'):
continue
try:
#import engines.[engine]
# import engines.[engine]
engine_module = __import__(".".join(("engines", engi)))
#get low-level module
# get low-level module
engine_module = getattr(engine_module, engi)
#bind class name
# bind class name
globals()[engi] = getattr(engine_module, engi)
supported_engines.append(engi)
except:
@ -80,6 +81,7 @@ def initialize_engines(): @@ -80,6 +81,7 @@ def initialize_engines():
return supported_engines
def engines_to_xml(supported_engines):
""" Generates xml for supported engines """
tab = " " * 4
@ -89,14 +91,16 @@ def engines_to_xml(supported_engines): @@ -89,14 +91,16 @@ def engines_to_xml(supported_engines):
supported_categories = ""
if hasattr(search_engine, "supported_categories"):
supported_categories = " ".join((key for key in search_engine.supported_categories.keys()
supported_categories = " ".join((key
for key in search_engine.supported_categories.keys()
if key is not "all"))
yield "".join((tab, "<", short_name, ">\n",
tab, tab, "<name>", search_engine.name, "</name>\n",
tab, tab, "<url>", search_engine.url, "</url>\n",
tab, tab, "<categories>", supported_categories, "</categories>\n",
tab, "</", short_name, ">\n"))
yield "".join((tab, "<", short_name, ">\n",
tab, tab, "<name>", search_engine.name, "</name>\n",
tab, tab, "<url>", search_engine.url, "</url>\n",
tab, tab, "<categories>", supported_categories, "</categories>\n",
tab, "</", short_name, ">\n"))
def displayCapabilities(supported_engines):
"""
@ -114,6 +118,7 @@ def displayCapabilities(supported_engines): @@ -114,6 +118,7 @@ def displayCapabilities(supported_engines):
"</capabilities>"))
print(xml)
def run_search(engine_list):
""" Run search in engine
@ -125,7 +130,7 @@ def run_search(engine_list): @@ -125,7 +130,7 @@ def run_search(engine_list):
engine, what, cat = engine_list
try:
engine = engine()
#avoid exceptions due to invalid category
# avoid exceptions due to invalid category
if hasattr(engine, 'supported_categories'):
if cat in engine.supported_categories:
engine.search(what, cat)
@ -136,6 +141,7 @@ def run_search(engine_list): @@ -136,6 +141,7 @@ def run_search(engine_list):
except:
return False
def main(args):
supported_engines = initialize_engines()
@ -151,18 +157,18 @@ def main(args): @@ -151,18 +157,18 @@ def main(args):
raise SystemExit("./nova2.py [all|engine1[,engine2]*] <category> <keywords>\n"
"available engines: %s" % (','.join(supported_engines)))
#get only unique engines with set
# get only unique engines with set
engines_list = set(e.lower() for e in args[0].strip().split(','))
if 'all' in engines_list:
engines_list = supported_engines
else:
#discard un-supported engines
# discard un-supported engines
engines_list = [engine for engine in engines_list
if engine in supported_engines]
if not engines_list:
#engine list is empty. Nothing to do here
# engine list is empty. Nothing to do here
return
cat = args[1].lower()
@ -172,12 +178,13 @@ def main(args): @@ -172,12 +178,13 @@ def main(args):
what = urllib.parse.quote(' '.join(args[2:]))
if THREADED:
#child process spawning is controlled min(number of searches, number of cpu)
# child process spawning is controlled min(number of searches, number of cpu)
with Pool(min(len(engines_list), MAX_THREADS)) as pool:
pool.map(run_search, ([globals()[engine], what, cat] for engine in engines_list))
else:
#py3 note: map is needed to be evaluated for content to be executed
# py3 note: map is needed to be evaluated for content to be executed
all(map(run_search, ([globals()[engine], what, cat] for engine in engines_list)))
if __name__ == "__main__":
main(argv[1:])

16
src/searchengine/nova3/nova2dl.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.20
#VERSION: 1.21
# Author:
# Christophe DUMEZ (chris@qbittorrent.org)
@ -34,14 +34,16 @@ from helpers import download_file @@ -34,14 +34,16 @@ from helpers import download_file
supported_engines = dict()
engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines','*.py'))
engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines', '*.py'))
for engine in engines:
e = engine.split(os.sep)[-1][:-3]
if len(e.strip()) == 0: continue
if e.startswith('_'): continue
if len(e.strip()) == 0:
continue
if e.startswith('_'):
continue
try:
exec("from engines.%s import %s"%(e,e))
exec("engine_url = %s.url"%e)
exec("from engines.%s import %s" % (e, e))
exec("engine_url = %s.url" % e)
supported_engines[engine_url] = e
except:
pass
@ -53,7 +55,7 @@ if __name__ == '__main__': @@ -53,7 +55,7 @@ if __name__ == '__main__':
download_param = sys.argv[2].strip()
if engine_url not in list(supported_engines.keys()):
raise SystemExit('./nova2dl.py: this engine_url was not recognized')
exec("engine = %s()"%supported_engines[engine_url])
exec("engine = %s()" % supported_engines[engine_url])
if hasattr(engine, 'download_torrent'):
engine.download_torrent(download_param)
else:

8
src/searchengine/nova3/novaprinter.py

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
#VERSION: 1.45
#VERSION: 1.46
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@ -24,9 +24,12 @@ @@ -24,9 +24,12 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def prettyPrinter(dictionary):
dictionary['size'] = anySizeToBytes(dictionary['size'])
outtext = "|".join((dictionary["link"], dictionary["name"].replace("|", " "), str(dictionary["size"]), str(dictionary["seeds"]), str(dictionary["leech"]), dictionary["engine_url"]))
outtext = "|".join((dictionary["link"], dictionary["name"].replace("|", " "),
str(dictionary["size"]), str(dictionary["seeds"]),
str(dictionary["leech"]), dictionary["engine_url"]))
if 'desc_link' in dictionary:
outtext = "|".join((outtext, dictionary["desc_link"]))
@ -34,6 +37,7 @@ def prettyPrinter(dictionary): @@ -34,6 +37,7 @@ def prettyPrinter(dictionary):
with open(1, 'w', encoding='utf-8', closefd=False) as utf8stdout:
print(outtext, file=utf8stdout)
def anySizeToBytes(size_string):
"""
Convert a string like '1 KB' to '1024' (bytes)

Loading…
Cancel
Save