# -*- coding: utf-8 -*- import __future__ import sys import urlparse import requests from lxml import html import re try: import sys if 'threading' in sys.modules: del sys.modules['threading'] print('threading module loaded before patching!') print('threading module deleted from sys.modules!\n') from gevent import monkey, pool monkey.patch_all() gevent_installed = True except: print("Gevent does not installed. Parsing process will be slower.") gevent_installed = False class Crawler: def __init__(self, url, outputfile='sitemap.xml', logfile='error.log', oformat='xml'): self.url = url self.logfile = open(logfile, 'a') self.oformat = oformat self.outputfile = outputfile # create lists for the urls in que and visited urls self.urls = set([url]) self.visited = set([url]) self.exts = ['htm', 'php'] self.allowed_regex = '\.((?!htm)(?!php)\w+)$' self.errors = {'404': []} def set_exts(self, exts): self.exts = exts def allow_regex(self, regex=None): if regex is not None: self.allowed_regex = regex else: allowed_regex = '' for ext in self.exts: allowed_regex += '(!{})'.format(ext) self.allowed_regex = '\.({}\w+)$'.format(allowed_regex) def crawl(self, echo=False, pool_size=1): self.echo = echo self.regex = re.compile(self.allowed_regex) if gevent_installed and pool_size >= 1: self.pool = pool.Pool(pool_size) self.pool.spawn(self.parse_gevent) self.pool.join() else: while len(self.urls) > 0: self.parse() if self.oformat == 'xml': self.write_xml() elif self.oformat == 'txt': self.write_txt() with open('errors.txt', 'w') as err_file: for key, val in self.errors.items(): err_file.write(u'\n\nError {}\n\n'.format(key)) err_file.write(u'\n'.join(set(val))) def parse_gevent(self): self.parse() while len(self.urls) > 0 and not self.pool.full(): self.pool.spawn(self.parse_gevent) def parse(self): if self.echo: if not gevent_installed: print('{} pages parsed :: {} pages in the queue'.format(len(self.visited), len(self.urls))) else: print('{} pages parsed :: {} parsing processes :: {} pages in the queue'.format(len(self.visited), len(self.pool), len(self.urls))) # Set the startingpoint for the spider and initialize # the a mechanize browser object if not self.urls: return else: url = self.urls.pop() try: response = requests.get(url) # if status code is not 404, then add url in seld.errors dictionary if response.status_code != 200: if self.errors.get(str(response.status_code), False): self.errors[str(response.status_code)].extend([url]) else: self.errors.update({str(response.status_code): [url]}) self.errlog("Error {} at url {}".format(response.status_code, url)) return tree = html.fromstring(response.text) for link_tag in tree.findall('.//a'): link = link_tag.attrib.get('href', '') newurl = urlparse.urljoin(self.url, link) # print(newurl) if self.is_valid(newurl): self.visited.update([newurl]) self.urls.update([newurl]) except Exception, e: self.errlog(e.message) def is_valid(self, url): if '#' in url: url = url[:url.find('#')] if url in self.visited: return False if self.url not in url: return False if re.search(self.regex, url): return False return True def errlog(self, msg): self.logfile.write(msg) self.logfile.write('\n') def write_xml(self): of = open(self.outputfile, 'w') of.write('\n') of.write('\n') url_str = '{}\n' while self.visited: of.write(url_str.format(self.visited.pop())) of.write('') of.close() def write_txt(self): of = open(self.outputfile, 'w') url_str = '{}\n' while self.visited: of.write(url_str.format(self.visited.pop())) of.close()