Sitemap generator
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

113 lines
3.7 KiB

4 years ago
4 years ago
  1. import logging
  2. import asyncio
  3. import re
  4. import urllib.parse
  5. from pysitemap.format_processors.xml import XMLWriter
  6. from pysitemap.format_processors.text import TextWriter
  7. import aiohttp
  8. class Crawler:
  9. format_processors = {
  10. 'xml': XMLWriter,
  11. 'txt': TextWriter
  12. }
  13. def __init__(self, rooturl, out_file, out_format='xml', maxtasks=100):
  14. """
  15. Crawler constructor
  16. :param rooturl: root url of site
  17. :type rooturl: str
  18. :param out_file: file to save sitemap result
  19. :type out_file: str
  20. :param out_format: sitemap type [xml | txt]. Default xml
  21. :type out_format: str
  22. :param maxtasks: maximum count of tasks. Default 100
  23. :type maxtasks: int
  24. """
  25. self.rooturl = rooturl
  26. self.todo = set()
  27. self.busy = set()
  28. self.done = {}
  29. self.tasks = set()
  30. self.sem = asyncio.Semaphore(maxtasks)
  31. # connector stores cookies between requests and uses connection pool
  32. self.session = aiohttp.ClientSession()
  33. self.writer = self.format_processors.get(out_format)(out_file)
  34. async def run(self):
  35. """
  36. Main function to start parsing site
  37. :return:
  38. """
  39. t = asyncio.ensure_future(self.addurls([(self.rooturl, '')]))
  40. await asyncio.sleep(1)
  41. while self.busy:
  42. await asyncio.sleep(1)
  43. await t
  44. await self.session.close()
  45. await self.writer.write([key for key, value in self.done.items() if value])
  46. async def addurls(self, urls):
  47. """
  48. Add urls in queue and run process to parse
  49. :param urls:
  50. :return:
  51. """
  52. for url, parenturl in urls:
  53. url = urllib.parse.urljoin(parenturl, url)
  54. url, frag = urllib.parse.urldefrag(url)
  55. if (url.startswith(self.rooturl) and
  56. url not in self.busy and
  57. url not in self.done and
  58. url not in self.todo):
  59. self.todo.add(url)
  60. # Acquire semaphore
  61. await self.sem.acquire()
  62. # Create async task
  63. task = asyncio.ensure_future(self.process(url))
  64. # Add collback into task to release semaphore
  65. task.add_done_callback(lambda t: self.sem.release())
  66. # Callback to remove task from tasks
  67. task.add_done_callback(self.tasks.remove)
  68. # Add task into tasks
  69. self.tasks.add(task)
  70. async def process(self, url):
  71. """
  72. Process single url
  73. :param url:
  74. :return:
  75. """
  76. print('processing:', url)
  77. # remove url from basic queue and add it into busy list
  78. self.todo.remove(url)
  79. self.busy.add(url)
  80. try:
  81. resp = await self.session.get(url) # await response
  82. except Exception as exc:
  83. # on any exception mark url as BAD
  84. print('...', url, 'has error', repr(str(exc)))
  85. self.done[url] = False
  86. else:
  87. # only url with status == 200 and content type == 'text/html' parsed
  88. if (resp.status == 200 and
  89. ('text/html' in resp.headers.get('content-type'))):
  90. data = (await resp.read()).decode('utf-8', 'replace')
  91. urls = re.findall(r'(?i)href=["\']?([^\s"\'<>]+)', data)
  92. asyncio.Task(self.addurls([(u, url) for u in urls]))
  93. # even if we have no exception, we can mark url as good
  94. resp.close()
  95. self.done[url] = True
  96. self.busy.remove(url)
  97. logging.info(len(self.done), 'completed tasks,', len(self.tasks),
  98. 'still pending, todo', len(self.todo))