123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172 |
- # -*- coding: utf-8 -*-
- # Define here the models for your spider middleware
- #
- # See documentation in:
- # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
- from scrapy.downloadermiddlewares.retry import RetryMiddleware
- from scrapy.utils.response import response_status_message
- from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
- from scrapy import signals
- from scrapy.conf import settings
- import logging
- import time
- import fake_useragent
- class ElabspiderSpiderMiddleware(object):
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the spider middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_spider_input(self, response, spider):
- # Called for each response that goes through the spider
- # middleware and into the spider.
- # Should return None or raise an exception.
- return None
- def process_spider_output(self, response, result, spider):
- # Called with the results returned from the Spider, after
- # it has processed the response.
- # Must return an iterable of Request, dict or Item objects.
- for i in result:
- yield i
- def process_spider_exception(self, response, exception, spider):
- # Called when a spider or process_spider_input() method
- # (from other spider middleware) raises an exception.
- # Should return either None or an iterable of Response, dict
- # or Item objects.
- pass
- def process_start_requests(self, start_requests, spider):
- # Called with the start requests of the spider, and works
- # similarly to the process_spider_output() method, except
- # that it doesn’t have a response associated.
- # Must return only requests (not items).
- for r in start_requests:
- yield r
- class ElabspiderDownloaderMiddleware(object):
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the downloader middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- # Called for each request that goes through the downloader
- # middleware.
- # Must either:
- # - return None: continue processing this request
- # - or return a Response object
- # - or return a Request object
- # - or raise IgnoreRequest: process_exception() methods of
- # installed downloader middleware will be called
- # proxy_user_pass = settings['PROXY_IDENTIFY'] + ':' + settings['PROXY_SECRETKEY']
- # encoded_proxy_pass = base64.encodebytes(bytes(proxy_user_pass.encode(encoding='utf-8')))
- #TODO: 开关代理
- # logging.info(msg='process request url: ' + request._url)
- # request.meta['proxy'] = settings['PROXY_HOST'] + ':' + settings['PROXY_PORT']
- # request.headers['Proxy-Authorization'] = 'Basic ' + 'SDVQMDI5OU44MzBBQzlDRDo1MTZGOTVEMDNFQjFGMDI2'
- return None
- def process_response(self, request, response, spider):
- # Called with the response returned from the downloader.
- # Must either;
- # - return a Response object
- # - return a Request object
- # - or raise IgnoreRequest
- logging.info(msg='receive response status url: ' + response._url + ' status: ' + str(response.status))
- # if response.status != 200:
- # logging.debug('retry url: ' + response._url)
- # # proxy = self.get_random_proxy()
- # # request.meta['proxy'] = proxy
- # return request
- return response
- def process_exception(self, request, exception, spider):
- # Called when a download handler or a process_request()
- # (from other downloader middleware) raises an exception.
- # Must either:
- # - return None: continue processing this exception
- # - return a Response object: stops process_exception() chain
- # - return a Request object: stops process_exception() chain
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- # def get_random_proxy(self):
- # while 1:
- # with open('proxies.txt', 'r') as f:
- # proxies = f.readlines()
- # if proxies:
- # break
- # else:
- # time.sleep(1)
- # proxy = random.choice(proxies).strip()
- # return proxy
- class TooManyRequestsRetryMiddleware(RetryMiddleware):
- def __init__(self, crawler):
- super(TooManyRequestsRetryMiddleware, self).__init__(crawler.settings)
- self.crawler = crawler
- @classmethod
- def from_crawler(cls, crawler):
- return cls(crawler)
- def process_response(self, request, response, spider):
- if request.meta.get('dont_retry', False):
- return response
- elif response.status == 429:
- self.crawler.engine.pause()
- time.sleep(10) # If the rate limit is renewed in a minute, put 60 seconds, and so on.
- self.crawler.engine.unpause()
- reason = response_status_message(response.status)
- return self._retry(request, reason, spider) or response
- elif response.status in self.retry_http_codes:
- reason = response_status_message(response.status)
- return self._retry(request, reason, spider) or response
- return response
- class UserAgent(UserAgentMiddleware):
- def __init__(self, user_agent=''):
- self.user_agent = user_agent
- def process_request(self, request, spider):
- # agent = random.choice(self.user_agent_list)
- agent = fake_useragent.UserAgent(path=settings['USER_AGENT_PATH']).random
- if agent:
- # print("********Current UserAgent:%s************" % agent)
- # log(level=logging.DEBUG, msg='Current UserAgent: ' + agent)
- request.headers.setdefault('User-Agent', agent)
|