Sitemap generator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

122 lines
3.8 KiB

9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
  1. import urllib
  2. from bs4 import BeautifulSoup
  3. import urlparse
  4. import mechanize
  5. import pickle
  6. import re
  7. try:
  8. import gevent
  9. from gevent import monkey, pool
  10. monkey.patch_all()
  11. gevent_installed = True
  12. except:
  13. print("Gevent does not installed. Parsing process will be slower.")
  14. gevent_installed = False
  15. class Crawler:
  16. def __init__(self, url, outputfile='sitemap.xml', logfile='error.log', oformat='xml'):
  17. self.url = url
  18. self.logfile = open(logfile, 'a')
  19. self.oformat = oformat
  20. self.outputfile = outputfile
  21. # create lists for the urls in que and visited urls
  22. self.urls = set([url])
  23. self.visited = set([url])
  24. self.exts = ['htm', 'php']
  25. self.allowed_regex = '\.((?!htm)(?!php)\w+)$'
  26. def set_exts(self, exts):
  27. self.exts = exts
  28. def allow_regex(self, regex=None):
  29. if not regex is None:
  30. self.allowed_regex = regex
  31. else:
  32. allowed_regex = ''
  33. for ext in self.exts:
  34. allowed_regex += '(!{})'.format(ext)
  35. self.allowed_regex = '\.({}\w+)$'.format(allowed_regex)
  36. def crawl(self, echo=False, pool_size=1):
  37. self.echo = echo
  38. self.regex = re.compile(self.allowed_regex)
  39. if gevent_installed and pool_size > 1:
  40. self.pool = pool.Pool(pool_size)
  41. self.pool.spawn(self.parse_gevent)
  42. self.pool.join()
  43. else:
  44. while len(self.urls) > 0:
  45. self.parse()
  46. if self.oformat == 'xml':
  47. self.write_xml()
  48. def parse_gevent(self):
  49. self.parse()
  50. while len(self.urls) > 0 and not self.pool.full():
  51. self.pool.spawn(self.parse_gevent)
  52. def parse(self):
  53. if self.echo:
  54. if not gevent_installed:
  55. print('{} pages parsed :: {} pages in the queue'.format(len(self.visited), len(self.urls)))
  56. else:
  57. print('{} pages parsed :: {} parsing processes :: {} pages in the queue'.format(len(self.visited), len(self.pool), len(self.urls)))
  58. # Set the startingpoint for the spider and initialize
  59. # the a mechanize browser object
  60. if not self.urls:
  61. return
  62. else:
  63. url = self.urls.pop()
  64. br = mechanize.Browser()
  65. try:
  66. response = br.open(url)
  67. if response.code >= 400:
  68. self.errlog("Error {} at url {}".format(response.code, url))
  69. return
  70. for link in br.links():
  71. newurl = urlparse.urljoin(link.base_url, link.url)
  72. #print newurl
  73. if self.is_valid(newurl):
  74. self.visited.update([newurl])
  75. self.urls.update([newurl])
  76. except Exception, e:
  77. self.errlog(e.message)
  78. br.close()
  79. del(br)
  80. def is_valid(self, url):
  81. valid = False
  82. if url in self.visited:
  83. return False
  84. if not self.url in url:
  85. return False
  86. if re.search(self.regex, url):
  87. return False
  88. return True
  89. def errlog(self, msg):
  90. self.logfile.write(msg)
  91. self.logfile.write('\n')
  92. def write_xml(self):
  93. of = open(self.outputfile, 'w')
  94. of.write('<?xml version="1.0" encoding="utf-8"?>\n')
  95. of.write('<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">\n')
  96. url_str = '<url><loc>{}</loc></url>\n'
  97. while self.visited:
  98. of.write(url_str.format(self.visited.pop()))
  99. of.write('</urlset>')
  100. of.close()