diff --git a/src/search_engine/engines/btjunkie.py b/src/search_engine/engines/btjunkie.py index 67e2da817..11b5f63fc 100644 --- a/src/search_engine/engines/btjunkie.py +++ b/src/search_engine/engines/btjunkie.py @@ -1,4 +1,4 @@ -#VERSION: 1.10 +#VERSION: 1.11 #AUTHORS: Fabien Devaux (fab@gnux.info) from novaprinter import prettyPrinter import urllib @@ -10,7 +10,7 @@ class btjunkie(object): def search(self, what): i = 1 - while True: + while True and i<11: res = 0 dat = urllib.urlopen(self.url+'/search?q=%s&o=52&p=%d'%(what,i)).read().decode('utf8', 'replace') # I know it's not very readable, but the SGML parser feels in pain @@ -32,4 +32,4 @@ class btjunkie(object): res = res + 1 if res == 0: break - i = i + 1 \ No newline at end of file + i = i + 1 diff --git a/src/search_engine/engines/isohunt.py b/src/search_engine/engines/isohunt.py index 12f1b2619..8548f0808 100644 --- a/src/search_engine/engines/isohunt.py +++ b/src/search_engine/engines/isohunt.py @@ -1,4 +1,4 @@ -#VERSION: 1.00 +#VERSION: 1.01 #AUTHORS: Gekko Dam Beer (gekko04@users.sourceforge.net) from novaprinter import prettyPrinter import sgmllib @@ -67,7 +67,7 @@ class isohunt(object): def search(self, what): i = 1 - while True: + while True and i<11: results = [] parser = self.SimpleSGMLParser(results, self.url) dat = urllib.urlopen(self.url+'/torrents.php?ihq=%s&ihp=%s'%(what,i)).read().decode('utf-8', 'replace') @@ -75,4 +75,4 @@ class isohunt(object): parser.close() if len(results) <= 0: break - i += 1 \ No newline at end of file + i += 1 diff --git a/src/search_engine/engines/mininova.py b/src/search_engine/engines/mininova.py index dca2b4d31..1f5e6f143 100644 --- a/src/search_engine/engines/mininova.py +++ b/src/search_engine/engines/mininova.py @@ -1,4 +1,4 @@ -#VERSION: 1.11 +#VERSION: 1.12 #AUTHORS: Fabien Devaux (fab@gnux.info) from novaprinter import prettyPrinter import urllib @@ -29,7 +29,7 @@ class mininova(object): else: return ''.join([ get_text(n) for n in txt.childNodes]) page = 1 - while True: + while True and page<11: res = 0 dat = urllib.urlopen(self.url+'/search/%s/seeds/%d'%(what, page)).read().decode('utf-8', 'replace') dat = re.sub("