|
|
@ -18,9 +18,8 @@ class Crawler: |
|
|
|
self.outputfile = outputfile |
|
|
|
|
|
|
|
# create lists for the urls in que and visited urls |
|
|
|
self.urls = [url] |
|
|
|
self.visited = [url] |
|
|
|
self.excepted = [] |
|
|
|
self.urls = set([url]) |
|
|
|
self.visited = set([url]) |
|
|
|
self.exts = ['htm', 'php'] |
|
|
|
self.allowed_regex = '(\w+)\.((?!htm)(?!rar)\w+)$' |
|
|
|
|
|
|
@ -40,25 +39,24 @@ class Crawler: |
|
|
|
self.regex = re.compile(self.allowed_regex) |
|
|
|
while len(self.urls)>0: |
|
|
|
try: |
|
|
|
self.br.open(self.urls[0]) |
|
|
|
url = self.urls.pop() |
|
|
|
self.br.open(url) |
|
|
|
for link in self.br.links(): |
|
|
|
newurl = urlparse.urljoin(link.base_url,link.url) |
|
|
|
#print newurl |
|
|
|
if self.is_valid(newurl): |
|
|
|
self.visited.append(newurl) |
|
|
|
self.urls.append(newurl) |
|
|
|
self.visited.update([newurl]) |
|
|
|
self.urls.update([newurl]) |
|
|
|
except Exception, e: |
|
|
|
self.errlog(e.message) |
|
|
|
|
|
|
|
self.urls.pop(0) |
|
|
|
|
|
|
|
if self.oformat == 'xml': |
|
|
|
self.write_xml() |
|
|
|
|
|
|
|
|
|
|
|
def is_valid(self, url): |
|
|
|
valid = False |
|
|
|
if url in self.visited and not url in self.excepted: |
|
|
|
if url in self.visited: |
|
|
|
return False |
|
|
|
if not self.url in url: |
|
|
|
return False |
|
|
@ -75,8 +73,8 @@ class Crawler: |
|
|
|
of.write('<?xml version="1.0" encoding="utf-8"?><!--Generated by Screaming Frog SEO Spider 2,55-->\n') |
|
|
|
of.write('<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">\n') |
|
|
|
url_str = '<url><loc>{}</loc></url>\n' |
|
|
|
for url in self.visited: |
|
|
|
of.write(url_str.format(url)) |
|
|
|
while self.visited: |
|
|
|
of.write(url_str.format(self.visited.pop())) |
|
|
|
|
|
|
|
of.write('</urlset>') |
|
|
|
of.close() |
|
|
|