当前位置:   article > 正文

【Python爬虫】Scrapy Middlewares一些配置(异常,代理,重试)_python 提示router middleware error

python 提示router middleware error

ProxyMiddleware(obeject):

class ProxyMiddleware(object):
    logger = logging.getLogger(__name__)
    
    # request前,先添加代理
    def process_request(self, request, spider):
    	self.logger.debug('Using Proxy')
    	request.meta['proxy'] = 'http://127.0.0.1:9743'
    
    # 对response进行操作
	def process_response(self, request, response, spider):
		# 对response的status进行改写 
		# 最终在spiders文件上的parse函数显示的response.status 显示为201
		response.status = 201
		return response    

	# 捕捉异常,后再添加代理
    def process_exception(self, request, exception, spider):
        self.logger.debug('Get Exception.')
        self.logger.debug('Try Second time.')
        request.meta['proxy'] = 'http://127.0.0.1:9743'
        return request
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21

ProcessAllExceptionMiddleware(object):

from twisted.internet import defer
from twisted.internet.error import TimeoutError, DNSLookupError, \
    ConnectionRefusedError, ConnectionDone, ConnectError, \
    ConnectionLost, TCPTimedOutError
from scrapy.http import HtmlResponse
from twisted.web.client import ResponseFailed
from scrapy.core.downloader.handlers.http11 import TunnelError


class ProcessAllExceptionMiddleware(object):
    ALL_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError,
                      ConnectionRefusedError, ConnectionDone, ConnectError,
                      ConnectionLost, TCPTimedOutError, ResponseFailed,
                      IOError, TunnelError)
                      
    def process_response(self,request,response,spider):
        # 捕获状态码为40x/50x的response
        if str(response.status).startswith('4') or str(response.status).startswith('5'):
            # 随意封装,直接返回response(url为''),spider代码中根据url==''来处理response
            response = HtmlResponse(url='')
            return response
        # 其他状态码不处理
        return response
        
    def process_exception(self,request,exception,spider):
        # 捕获几乎所有的异常
        if isinstance(exception, self.ALL_EXCEPTIONS):
            # 在日志中打印异常类型
            print('Got exception: %s' % (exception))
            # 随意封装一个response,返回给spider
            response = HtmlResponse(url='exception')
            return response
        # 打印出未捕获到的异常
		print('not contained exception: %s'%exception)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
class XXXXSpider(scrapy.Spider):
    name = 'XXXX'
    allowed_domains = ['XXXX.com']
    start_urls = ['http://www.XXXX.com/']
    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
            'TESTSpider.middlewares.ProcessAllExceptionMiddleware': 120,
        }, # 启动MIDDLEWARES中间件
        'DOWNLOAD_DELAY': 1,  # 延时最低为2s
        'AUTOTHROTTLE_ENABLED': True,  # 启动[自动限速]
        'AUTOTHROTTLE_DEBUG': True,  # 开启[自动限速]的debug
        'AUTOTHROTTLE_MAX_DELAY': 10,  # 设置最大下载延时
        'DOWNLOAD_TIMEOUT': 15,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 4  # 限制对该网站的并发请求数
    }
    def parse(self, response):
        if not response.url: # 接收到url==''时
            print('500')
            yield TESTItem(key=response.meta['key'], _str=500, alias='')
        elif 'exception' in response.url:  # 接收到url=='exception'时
            print('exception')
            yield TESTItem(key=response.meta['key'], _str='EXCEPTION', alias='')

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24

RetryMiddleware(object):

·部分源代码

class RetryMiddleware(object):
    # 当遇到以下Exception时进行重试
    EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError, ConnectionRefusedError, ConnectionDone, ConnectError, ConnectionLost, TCPTimedOutError, ResponseFailed, IOError, TunnelError)

    def __init__(self, settings):
        '''
        这里涉及到了settings.py文件中的几个量
        RETRY_ENABLED: 用于开启中间件,默认为TRUE
        RETRY_TIMES: 重试次数, 默认为2
        RETRY_HTTP_CODES: 遇到哪些返回状态码需要重试, 一个列表,默认为[500, 503, 504, 400, 408]
        RETRY_PRIORITY_ADJUST:调整相对于原始请求的重试请求优先级,默认为-1
        '''
        if not settings.getbool('RETRY_ENABLED'):
            raise NotConfigured
        self.max_retry_times = settings.getint('RETRY_TIMES')
        self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
        self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')

    def process_response(self, request, response, spider):
        # 在之前构造的request中可以加入meta信息dont_retry来决定是否重试    
        if request.meta.get('dont_retry', False):
            return response

        # 检查状态码是否在列表中,在的话就调用_retry方法进行重试
        if response.status in self.retry_http_codes:
            reason = response_status_message(response.status)
            # 在此处进行自己的操作,如删除不可用代理,打日志等
            return self._retry(request, reason, spider) or response
        return response

    def process_exception(self, request, exception, spider):
        # 如果发生了Exception列表中的错误,进行重试
        if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
                and not request.meta.get('dont_retry', False):
            # 在此处进行自己的操作,如删除不可用代理,打日志等
            return self._retry(request, exception, spider)

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

在实际中,可以改写以下自己的代码即可完成一些retry操作

class MyRetryMiddleware(RetryMiddleware):
    logger = logging.getLogger(__name__)

    def delete_proxy(self, proxy):
        if proxy:
            # delete proxy from proxies pool
            # 从代理池删除不可用代理, 连上数据库,进行删除操作

	# 在retry的时候,处理response
    def process_response(self, request, response, spider):
        if request.meta.get('dont_retry', False):
            return response
        if response.status in self.retry_http_codes:
            reason = response_status_message(response.status)
            # 调用函数, 删除该代理
            self.delete_proxy(request.meta.get('proxy', False))
            time.sleep(random.randint(3, 5))
            self.logger.warning('返回值异常, 进行重试...')
            # 调用_retry重试
            return self._retry(request, reason, spider) or response
        return response

	# 捕捉异常
    def process_exception(self, request, exception, spider):
        if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
                and not request.meta.get('dont_retry', False):
            # 删除该代理
            self.delete_proxy(request.meta.get('proxy', False))
            time.sleep(random.randint(3, 5))
            self.logger.warning('连接异常, 进行重试...')

            return self._retry(request, exception, spider)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32

参考链接:https://blog.csdn.net/sc_lilei/article/details/80702449

本文内容由网友自发贡献,转载请注明出处:https://www.wpsshop.cn/w/你好赵伟/article/detail/998599
推荐阅读
相关标签
  

闽ICP备14008679号