import urllib from bs4 import BeautifulSoup import urlparse import mechanize import pickle import re class Crawler: def __init__(self, url, outputfile='sitemap.xml', logfile='error.log', oformat='xml'): # Set the startingpoint for the spider and initialize # the a mechanize browser object self.url = url self.br = mechanize.Browser() self.logfile = open(logfile, 'a') self.oformat = oformat self.outputfile = outputfile # create lists for the urls in que and visited urls self.urls = set([url]) self.visited = set([url]) self.exts = ['htm', 'php'] self.allowed_regex = '(\w+)\.((?!htm)(?!rar)\w+)$' def set_exts(self, exts): self.exts = exts def allow_regex(self, regex=None): if not regex is None: self.allowed_regex = regex else: allowed_regex = '' for ext in self.exts: allowed_regex += '(!{})'.format(ext) self.allowed_regex = '(\w+)\.({}\w+)$'.format(allowed_regex) def crawl(self): self.regex = re.compile(self.allowed_regex) while len(self.urls)>0: try: url = self.urls.pop() self.br.open(url) for link in self.br.links(): newurl = urlparse.urljoin(link.base_url,link.url) #print newurl if self.is_valid(newurl): self.visited.update([newurl]) self.urls.update([newurl]) except Exception, e: self.errlog(e.message) if self.oformat == 'xml': self.write_xml() def is_valid(self, url): valid = False if url in self.visited: return False if not self.url in url: return False if re.search(self.regex, url): return False return True def errlog(self, msg): self.logfile.write(msg) self.logfile.write('\n') def write_xml(self): of = open(self.outputfile, 'w') of.write('\n') of.write('\n') url_str = '{}\n' while self.visited: of.write(url_str.format(self.visited.pop())) of.write('') of.close()