middlewares.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. # -*- coding: utf-8 -*-
  2. # Define here the models for your spider middleware
  3. #
  4. # See documentation in:
  5. # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
  6. from scrapy.downloadermiddlewares.retry import RetryMiddleware
  7. from scrapy.utils.response import response_status_message
  8. from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
  9. from scrapy import signals
  10. from scrapy.conf import settings
  11. import logging
  12. import time
  13. import fake_useragent
  14. class ElabspiderSpiderMiddleware(object):
  15. # Not all methods need to be defined. If a method is not defined,
  16. # scrapy acts as if the spider middleware does not modify the
  17. # passed objects.
  18. @classmethod
  19. def from_crawler(cls, crawler):
  20. # This method is used by Scrapy to create your spiders.
  21. s = cls()
  22. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  23. return s
  24. def process_spider_input(self, response, spider):
  25. # Called for each response that goes through the spider
  26. # middleware and into the spider.
  27. # Should return None or raise an exception.
  28. return None
  29. def process_spider_output(self, response, result, spider):
  30. # Called with the results returned from the Spider, after
  31. # it has processed the response.
  32. # Must return an iterable of Request, dict or Item objects.
  33. for i in result:
  34. yield i
  35. def process_spider_exception(self, response, exception, spider):
  36. # Called when a spider or process_spider_input() method
  37. # (from other spider middleware) raises an exception.
  38. # Should return either None or an iterable of Response, dict
  39. # or Item objects.
  40. pass
  41. def process_start_requests(self, start_requests, spider):
  42. # Called with the start requests of the spider, and works
  43. # similarly to the process_spider_output() method, except
  44. # that it doesn’t have a response associated.
  45. # Must return only requests (not items).
  46. for r in start_requests:
  47. yield r
  48. class ElabspiderDownloaderMiddleware(object):
  49. # Not all methods need to be defined. If a method is not defined,
  50. # scrapy acts as if the downloader middleware does not modify the
  51. # passed objects.
  52. @classmethod
  53. def from_crawler(cls, crawler):
  54. # This method is used by Scrapy to create your spiders.
  55. s = cls()
  56. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  57. return s
  58. def process_request(self, request, spider):
  59. # Called for each request that goes through the downloader
  60. # middleware.
  61. # Must either:
  62. # - return None: continue processing this request
  63. # - or return a Response object
  64. # - or return a Request object
  65. # - or raise IgnoreRequest: process_exception() methods of
  66. # installed downloader middleware will be called
  67. # proxy_user_pass = settings['PROXY_IDENTIFY'] + ':' + settings['PROXY_SECRETKEY']
  68. # encoded_proxy_pass = base64.encodebytes(bytes(proxy_user_pass.encode(encoding='utf-8')))
  69. #TODO: 开关代理
  70. # logging.info(msg='process request url: ' + request._url)
  71. # request.meta['proxy'] = settings['PROXY_HOST'] + ':' + settings['PROXY_PORT']
  72. # request.headers['Proxy-Authorization'] = 'Basic ' + 'SDVQMDI5OU44MzBBQzlDRDo1MTZGOTVEMDNFQjFGMDI2'
  73. return None
  74. def process_response(self, request, response, spider):
  75. # Called with the response returned from the downloader.
  76. # Must either;
  77. # - return a Response object
  78. # - return a Request object
  79. # - or raise IgnoreRequest
  80. logging.info(msg='receive response status url: ' + response._url + ' status: ' + str(response.status))
  81. # if response.status != 200:
  82. # logging.debug('retry url: ' + response._url)
  83. # # proxy = self.get_random_proxy()
  84. # # request.meta['proxy'] = proxy
  85. # return request
  86. return response
  87. def process_exception(self, request, exception, spider):
  88. # Called when a download handler or a process_request()
  89. # (from other downloader middleware) raises an exception.
  90. # Must either:
  91. # - return None: continue processing this exception
  92. # - return a Response object: stops process_exception() chain
  93. # - return a Request object: stops process_exception() chain
  94. pass
  95. def spider_opened(self, spider):
  96. spider.logger.info('Spider opened: %s' % spider.name)
  97. # def get_random_proxy(self):
  98. # while 1:
  99. # with open('proxies.txt', 'r') as f:
  100. # proxies = f.readlines()
  101. # if proxies:
  102. # break
  103. # else:
  104. # time.sleep(1)
  105. # proxy = random.choice(proxies).strip()
  106. # return proxy
  107. class TooManyRequestsRetryMiddleware(RetryMiddleware):
  108. def __init__(self, crawler):
  109. super(TooManyRequestsRetryMiddleware, self).__init__(crawler.settings)
  110. self.crawler = crawler
  111. @classmethod
  112. def from_crawler(cls, crawler):
  113. return cls(crawler)
  114. def process_response(self, request, response, spider):
  115. if request.meta.get('dont_retry', False):
  116. return response
  117. elif response.status == 429:
  118. self.crawler.engine.pause()
  119. time.sleep(10) # If the rate limit is renewed in a minute, put 60 seconds, and so on.
  120. self.crawler.engine.unpause()
  121. reason = response_status_message(response.status)
  122. return self._retry(request, reason, spider) or response
  123. elif response.status in self.retry_http_codes:
  124. reason = response_status_message(response.status)
  125. return self._retry(request, reason, spider) or response
  126. return response
  127. class UserAgent(UserAgentMiddleware):
  128. def __init__(self, user_agent=''):
  129. self.user_agent = user_agent
  130. def process_request(self, request, spider):
  131. # agent = random.choice(self.user_agent_list)
  132. agent = fake_useragent.UserAgent(path=settings['USER_AGENT_PATH']).random
  133. if agent:
  134. # print("********Current UserAgent:%s************" % agent)
  135. # log(level=logging.DEBUG, msg='Current UserAgent: ' + agent)
  136. request.headers.setdefault('User-Agent', agent)